aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorVladimir Makarov <vmakarov@redhat.com>2008-07-03 19:49:42 +0000
committerVladimir Makarov <vmakarov@redhat.com>2008-07-03 19:49:42 +0000
commit11c201fccf6c1bcd7d9eee0e7616c977e772485b (patch)
tree42ebb8665401f424bbe1504152b3d64cf5fe1ad0 /gcc
parentf0d10524efc77244f74c715b969dd4225806de57 (diff)
2008-07-03 Vladimir Makarov <vmakarov@redhat.com>
* ira.h, ira-int.h, ira-conflicts.c, ira-color.c, ira-lives.c, ira-emit.c, ira-build.c, ira.c, ira-costs.c: Add prefix ira_ to external variables, functions, common types, and macros. * toplev.c (init_ira_once, init_ira, finish_ira_once): Rename to ira_init_once, ira_init, ira_finish_once. * caller-save.c: Add prefix ira_ to collect_pseudo_call_clobbered_regs. * reload1.c: Add prefix ira_ to sort_insn_chain, mark_memory_move_deletion, better_spill_reload_regno_p, reuse_stack_slot, mark_new_stack_slot, mark_allocation_change, reassign_pseudos, mark_allocation_change. git-svn-id: https://gcc.gnu.org/svn/gcc/branches/ira@137433 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog17
-rw-r--r--gcc/caller-save.c2
-rw-r--r--gcc/ira-build.c686
-rw-r--r--gcc/ira-color.c709
-rw-r--r--gcc/ira-conflicts.c158
-rw-r--r--gcc/ira-costs.c217
-rw-r--r--gcc/ira-emit.c142
-rw-r--r--gcc/ira-int.h447
-rw-r--r--gcc/ira-lives.c224
-rw-r--r--gcc/ira.c457
-rw-r--r--gcc/ira.h26
-rw-r--r--gcc/reload1.c35
-rw-r--r--gcc/toplev.c6
13 files changed, 1604 insertions, 1522 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index c14234a32c3..7b95290d025 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,22 @@
2008-07-03 Vladimir Makarov <vmakarov@redhat.com>
+ * ira.h, ira-int.h, ira-conflicts.c, ira-color.c, ira-lives.c,
+ ira-emit.c, ira-build.c, ira.c, ira-costs.c: Add prefix ira_ to
+ external variables, functions, common types, and macros.
+
+ * toplev.c (init_ira_once, init_ira, finish_ira_once): Rename to
+ ira_init_once, ira_init, ira_finish_once.
+
+ * caller-save.c: Add prefix ira_ to
+ collect_pseudo_call_clobbered_regs.
+
+ * reload1.c: Add prefix ira_ to sort_insn_chain,
+ mark_memory_move_deletion, better_spill_reload_regno_p,
+ reuse_stack_slot, mark_new_stack_slot, mark_allocation_change,
+ reassign_pseudos, mark_allocation_change.
+
+2008-07-03 Vladimir Makarov <vmakarov@redhat.com>
+
* ira-color.c (reassign_conflict_allocnos): Check nrefs first.
* ira.c (ira.c): Exclude unprofitable allocno from reassigning.
diff --git a/gcc/caller-save.c b/gcc/caller-save.c
index 798927523e6..84e349b7e6b 100644
--- a/gcc/caller-save.c
+++ b/gcc/caller-save.c
@@ -473,7 +473,7 @@ setup_save_areas (void)
{
HARD_REG_SET clobbered_regs;
- collect_pseudo_call_clobbered_regs (i, &clobbered_regs);
+ ira_collect_pseudo_call_clobbered_regs (i, &clobbered_regs);
for (r = regno; r < endregno; r++)
if (TEST_HARD_REG_BIT (clobbered_regs, r))
SET_HARD_REG_BIT (hard_regs_used, r);
diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index 2d206ada42f..95eafab146c 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -40,10 +40,11 @@ along with GCC; see the file COPYING3. If not see
#include "sparseset.h"
#include "ira-int.h"
-static copy_t find_allocno_copy (allocno_t, allocno_t, rtx, loop_tree_node_t);
+static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx,
+ ira_loop_tree_node_t);
/* The root of the loop tree corresponding to the all function. */
-loop_tree_node_t ira_loop_tree_root;
+ira_loop_tree_node_t ira_loop_tree_root;
/* Height of the loop tree. */
int ira_loop_tree_height;
@@ -51,35 +52,35 @@ int ira_loop_tree_height;
/* All nodes representing basic blocks are referred through the
following array. We can not use basic block member `aux' for this
because it is used for insertion of insns on edges. */
-loop_tree_node_t ira_bb_nodes;
+ira_loop_tree_node_t ira_bb_nodes;
/* All nodes representing loops are referred through the following
array. */
-loop_tree_node_t ira_loop_nodes;
+ira_loop_tree_node_t ira_loop_nodes;
/* Map regno -> allocnos with given regno (see comments for
allocno member `next_regno_allocno'). */
-allocno_t *regno_allocno_map;
+ira_allocno_t *ira_regno_allocno_map;
/* Array of references to all allocnos. The order number of the
allocno corresponds to the index in the array. Removed allocnos
have NULL element value. */
-allocno_t *allocnos;
+ira_allocno_t *ira_allocnos;
/* Sizes of the previous array. */
-int allocnos_num;
+int ira_allocnos_num;
/* Map conflict id -> allocno with given conflict id (see comments for
allocno member `conflict_id'). */
-allocno_t *conflict_id_allocno_map;
+ira_allocno_t *ira_conflict_id_allocno_map;
/* Array of references to all copies. The order number of the copy
corresponds to the index in the array. Removed copies have NULL
element value. */
-copy_t *copies;
+ira_copy_t *ira_copies;
/* Size of the previous array. */
-int copies_num;
+int ira_copies_num;
/* Bitmap of allocnos used for different purposes. */
static bitmap allocnos_bitmap;
@@ -107,7 +108,7 @@ create_loop_tree_nodes (bool loops_p)
loop_p loop;
ira_bb_nodes
- = ira_allocate (sizeof (struct loop_tree_node) * last_basic_block);
+ = ira_allocate (sizeof (struct ira_loop_tree_node) * last_basic_block);
last_basic_block_before_change = last_basic_block;
for (i = 0; i < (unsigned int) last_basic_block; i++)
{
@@ -119,7 +120,7 @@ create_loop_tree_nodes (bool loops_p)
ira_bb_nodes[i].border_allocnos = NULL;
ira_bb_nodes[i].local_copies = NULL;
}
- ira_loop_nodes = ira_allocate (sizeof (struct loop_tree_node)
+ ira_loop_nodes = ira_allocate (sizeof (struct ira_loop_tree_node)
* VEC_length (loop_p, ira_loops.larray));
max_regno = max_reg_num ();
for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
@@ -151,9 +152,9 @@ create_loop_tree_nodes (bool loops_p)
continue;
}
ira_loop_nodes[i].regno_allocno_map
- = ira_allocate (sizeof (allocno_t) * max_regno);
+ = ira_allocate (sizeof (ira_allocno_t) * max_regno);
memset (ira_loop_nodes[i].regno_allocno_map, 0,
- sizeof (allocno_t) * max_regno);
+ sizeof (ira_allocno_t) * max_regno);
memset (ira_loop_nodes[i].reg_pressure, 0,
sizeof (ira_loop_nodes[i].reg_pressure));
ira_loop_nodes[i].mentioned_allocnos = ira_allocate_bitmap ();
@@ -165,7 +166,7 @@ create_loop_tree_nodes (bool loops_p)
/* Free the loop tree node of a loop. */
static void
-finish_loop_tree_node (loop_tree_node_t loop)
+finish_loop_tree_node (ira_loop_tree_node_t loop)
{
if (loop->regno_allocno_map != NULL)
{
@@ -213,7 +214,7 @@ static void
add_loop_to_tree (struct loop *loop)
{
struct loop *parent;
- loop_tree_node_t loop_node, parent_node;
+ ira_loop_tree_node_t loop_node, parent_node;
/* We can not use loop node access macros here because of potential
checking and because the nodes are not initialized enough
@@ -254,10 +255,10 @@ add_loop_to_tree (struct loop *loop)
tree given its root LOOP_NODE. The enumeration starts with LEVEL.
The function returns maximal value of level in the tree + 1. */
static int
-setup_loop_tree_level (loop_tree_node_t loop_node, int level)
+setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
{
int height, max_height;
- loop_tree_node_t subloop_node;
+ ira_loop_tree_node_t subloop_node;
ira_assert (loop_node->bb == NULL);
loop_node->level = level;
@@ -283,7 +284,7 @@ form_loop_tree (void)
unsigned int i;
basic_block bb;
struct loop *parent;
- loop_tree_node_t bb_node, loop_node;
+ ira_loop_tree_node_t bb_node, loop_node;
loop_p loop;
/* We can not use loop/bb node access macros because of potential
@@ -322,16 +323,17 @@ form_loop_tree (void)
-/* Rebuild REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs. */
+/* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
+ tree nodes. */
static void
rebuild_regno_allocno_maps (void)
{
unsigned int l;
int max_regno, regno;
- allocno_t a;
- loop_tree_node_t loop_tree_node;
+ ira_allocno_t a;
+ ira_loop_tree_node_t loop_tree_node;
loop_p loop;
- allocno_iterator ai;
+ ira_allocno_iterator ai;
max_regno = max_reg_num ();
for (l = 0; VEC_iterate (loop_p, ira_loops.larray, l, loop); l++)
@@ -339,13 +341,13 @@ rebuild_regno_allocno_maps (void)
{
ira_free (ira_loop_nodes[l].regno_allocno_map);
ira_loop_nodes[l].regno_allocno_map
- = ira_allocate (sizeof (allocno_t) * max_regno);
+ = ira_allocate (sizeof (ira_allocno_t) * max_regno);
memset (ira_loop_nodes[l].regno_allocno_map, 0,
- sizeof (allocno_t) * max_regno);
+ sizeof (ira_allocno_t) * max_regno);
}
- ira_free (regno_allocno_map);
- regno_allocno_map = ira_allocate (max_regno * sizeof (allocno_t));
- memset (regno_allocno_map, 0, max_regno * sizeof (allocno_t));
+ ira_free (ira_regno_allocno_map);
+ ira_regno_allocno_map = ira_allocate (max_regno * sizeof (ira_allocno_t));
+ memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
FOR_EACH_ALLOCNO (a, ai)
{
if (ALLOCNO_CAP_MEMBER (a) != NULL)
@@ -353,8 +355,8 @@ rebuild_regno_allocno_maps (void)
continue;
regno = ALLOCNO_REGNO (a);
loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
- ALLOCNO_NEXT_REGNO_ALLOCNO (a) = regno_allocno_map[regno];
- regno_allocno_map[regno] = a;
+ ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
+ ira_regno_allocno_map[regno] = a;
if (loop_tree_node->regno_allocno_map[regno] == NULL)
/* Remember that we can create temporary allocnos to break
cycles in register shuffle. */
@@ -366,7 +368,7 @@ rebuild_regno_allocno_maps (void)
/* Array of vectors containing calls given pseudo-register lives
through. */
-VEC(rtx, heap) **regno_calls;
+VEC(rtx, heap) **ira_regno_calls;
/* The length of the previous array. */
static int regno_calls_num;
@@ -377,8 +379,8 @@ static void
initiate_calls (void)
{
regno_calls_num = max_reg_num () * 3 / 2;
- regno_calls = ira_allocate (regno_calls_num * sizeof (VEC(rtx, heap) *));
- memset (regno_calls, 0, regno_calls_num * sizeof (VEC(rtx, heap) *));
+ ira_regno_calls = ira_allocate (regno_calls_num * sizeof (VEC(rtx, heap) *));
+ memset (ira_regno_calls, 0, regno_calls_num * sizeof (VEC(rtx, heap) *));
}
/* Expand the array of vectors containing calls for
@@ -390,9 +392,9 @@ expand_calls (void)
if (max_regno > regno_calls_num)
{
- regno_calls = ira_reallocate (regno_calls,
+ ira_regno_calls = ira_reallocate (ira_regno_calls,
max_regno * sizeof (VEC(rtx, heap) *));
- memset (regno_calls + regno_calls_num, 0,
+ memset (ira_regno_calls + regno_calls_num, 0,
(max_regno - regno_calls_num) * sizeof (VEC(rtx, heap) *));
regno_calls_num = max_regno;
}
@@ -408,27 +410,27 @@ compress_calls (void)
for (regno = 0; regno < regno_calls_num; regno++)
{
- allocno_calls = VEC_address (rtx, regno_calls[regno]);
- length = VEC_length (rtx, regno_calls[regno]);
+ allocno_calls = VEC_address (rtx, ira_regno_calls[regno]);
+ length = VEC_length (rtx, ira_regno_calls[regno]);
for (last = curr = 0; curr < length; curr++)
if (allocno_calls[curr] != NULL_RTX)
allocno_calls[last++] = allocno_calls[curr];
- VEC_truncate (rtx, regno_calls[regno], last);
+ VEC_truncate (rtx, ira_regno_calls[regno], last);
}
}
/* Add CALLs to REGNO's vector of intersected calls and returns the
element index in the vector. */
int
-add_regno_call (int regno, rtx call)
+ira_add_regno_call (int regno, rtx call)
{
int result;
gcc_assert (regno < regno_calls_num);
- if (regno_calls[regno] == NULL)
- regno_calls[regno] = VEC_alloc (rtx, heap, 10);
- result = VEC_length (rtx, regno_calls[regno]);
- VEC_safe_push (rtx, heap, regno_calls[regno], call);
+ if (ira_regno_calls[regno] == NULL)
+ ira_regno_calls[regno] = VEC_alloc (rtx, heap, 10);
+ result = VEC_length (rtx, ira_regno_calls[regno]);
+ VEC_safe_push (rtx, heap, ira_regno_calls[regno], call);
return result;
}
@@ -440,8 +442,8 @@ finish_calls (void)
int i;
for (i = 0; i < regno_calls_num; i++)
- VEC_free (rtx, heap, regno_calls[i]);
- ira_free (regno_calls);
+ VEC_free (rtx, heap, ira_regno_calls[i]);
+ ira_free (ira_regno_calls);
}
@@ -451,11 +453,11 @@ static alloc_pool allocno_pool, allocno_live_range_pool;
/* Vec containing references to all created allocnos. It is a
container of array allocnos. */
-static VEC(allocno_t,heap) *allocno_vec;
+static VEC(ira_allocno_t,heap) *allocno_vec;
/* Vec containing references to all created allocnos. It is a
- container of conflict_id_allocno_map. */
-static VEC(allocno_t,heap) *conflict_id_allocno_map_vec;
+ container of ira_conflict_id_allocno_map. */
+static VEC(ira_allocno_t,heap) *ira_conflict_id_allocno_map_vec;
/* Initialize data concerning allocnos. */
static void
@@ -463,33 +465,35 @@ initiate_allocnos (void)
{
allocno_live_range_pool
= create_alloc_pool ("allocno live ranges",
- sizeof (struct allocno_live_range), 100);
- allocno_pool = create_alloc_pool ("allocnos", sizeof (struct allocno), 100);
- allocno_vec = VEC_alloc (allocno_t, heap, max_reg_num () * 2);
- allocnos = NULL;
- allocnos_num = 0;
- conflict_id_allocno_map_vec
- = VEC_alloc (allocno_t, heap, max_reg_num () * 2);
- conflict_id_allocno_map = NULL;
- regno_allocno_map = ira_allocate (max_reg_num () * sizeof (allocno_t));
- memset (regno_allocno_map, 0, max_reg_num () * sizeof (allocno_t));
+ sizeof (struct ira_allocno_live_range), 100);
+ allocno_pool
+ = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
+ allocno_vec = VEC_alloc (ira_allocno_t, heap, max_reg_num () * 2);
+ ira_allocnos = NULL;
+ ira_allocnos_num = 0;
+ ira_conflict_id_allocno_map_vec
+ = VEC_alloc (ira_allocno_t, heap, max_reg_num () * 2);
+ ira_conflict_id_allocno_map = NULL;
+ ira_regno_allocno_map
+ = ira_allocate (max_reg_num () * sizeof (ira_allocno_t));
+ memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
}
/* Create and return the allocno corresponding to REGNO in
LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
same regno if CAP_P is FALSE. */
-allocno_t
-create_allocno (int regno, bool cap_p, loop_tree_node_t loop_tree_node)
+ira_allocno_t
+ira_create_allocno (int regno, bool cap_p, ira_loop_tree_node_t loop_tree_node)
{
- allocno_t a;
+ ira_allocno_t a;
a = pool_alloc (allocno_pool);
ALLOCNO_REGNO (a) = regno;
ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
if (! cap_p)
{
- ALLOCNO_NEXT_REGNO_ALLOCNO (a) = regno_allocno_map[regno];
- regno_allocno_map[regno] = a;
+ ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
+ ira_regno_allocno_map[regno] = a;
if (loop_tree_node->regno_allocno_map[regno] == NULL)
/* Remember that we can create temporary allocnos to break
cycles in register shuffle on region borders (see
@@ -498,11 +502,12 @@ create_allocno (int regno, bool cap_p, loop_tree_node_t loop_tree_node)
}
ALLOCNO_CAP (a) = NULL;
ALLOCNO_CAP_MEMBER (a) = NULL;
- ALLOCNO_NUM (a) = allocnos_num;
+ ALLOCNO_NUM (a) = ira_allocnos_num;
ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) = NULL;
ALLOCNO_CONFLICT_ALLOCNOS_NUM (a) = 0;
- COPY_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a), no_alloc_regs);
- COPY_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), no_alloc_regs);
+ COPY_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a), ira_no_alloc_regs);
+ COPY_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
+ ira_no_alloc_regs);
ALLOCNO_NREFS (a) = 0;
ALLOCNO_FREQ (a) = 1;
ALLOCNO_HARD_REGNO (a) = -1;
@@ -511,7 +516,7 @@ create_allocno (int regno, bool cap_p, loop_tree_node_t loop_tree_node)
ALLOCNO_CALLS_CROSSED_START (a) = -1;
#ifdef STACK_REGS
ALLOCNO_NO_STACK_REG_P (a) = false;
- ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
+ IRA_ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
#endif
ALLOCNO_MEM_OPTIMIZED_DEST (a) = NULL;
ALLOCNO_MEM_OPTIMIZED_DEST_P (a) = false;
@@ -542,31 +547,31 @@ create_allocno (int regno, bool cap_p, loop_tree_node_t loop_tree_node)
ALLOCNO_LIVE_RANGES (a) = NULL;
ALLOCNO_MIN (a) = INT_MAX;
ALLOCNO_MAX (a) = -1;
- ALLOCNO_CONFLICT_ID (a) = allocnos_num;
- VEC_safe_push (allocno_t, heap, allocno_vec, a);
- allocnos = VEC_address (allocno_t, allocno_vec);
- allocnos_num = VEC_length (allocno_t, allocno_vec);
- VEC_safe_push (allocno_t, heap, conflict_id_allocno_map_vec, a);
- conflict_id_allocno_map
- = VEC_address (allocno_t, conflict_id_allocno_map_vec);
+ ALLOCNO_CONFLICT_ID (a) = ira_allocnos_num;
+ VEC_safe_push (ira_allocno_t, heap, allocno_vec, a);
+ ira_allocnos = VEC_address (ira_allocno_t, allocno_vec);
+ ira_allocnos_num = VEC_length (ira_allocno_t, allocno_vec);
+ VEC_safe_push (ira_allocno_t, heap, ira_conflict_id_allocno_map_vec, a);
+ ira_conflict_id_allocno_map
+ = VEC_address (ira_allocno_t, ira_conflict_id_allocno_map_vec);
return a;
}
/* Set up cover class for A and update its conflict hard registers. */
void
-set_allocno_cover_class (allocno_t a, enum reg_class cover_class)
+ira_set_allocno_cover_class (ira_allocno_t a, enum reg_class cover_class)
{
ALLOCNO_COVER_CLASS (a) = cover_class;
IOR_COMPL_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a),
reg_class_contents[cover_class]);
- IOR_COMPL_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
+ IOR_COMPL_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
reg_class_contents[cover_class]);
}
/* Return TRUE if the conflict vector with NUM elements is more
profitable than conflict bit vector for A. */
bool
-conflict_vector_profitable_p (allocno_t a, int num)
+ira_conflict_vector_profitable_p (ira_allocno_t a, int num)
{
int nw;
@@ -575,21 +580,22 @@ conflict_vector_profitable_p (allocno_t a, int num)
allocation. */
return false;
- nw = (ALLOCNO_MAX (a) - ALLOCNO_MIN (a) + INT_BITS) / INT_BITS;
- return 2 * sizeof (allocno_t) * (num + 1) < 3 * nw * sizeof (INT_TYPE);
+ nw = (ALLOCNO_MAX (a) - ALLOCNO_MIN (a) + IRA_INT_BITS) / IRA_INT_BITS;
+ return (2 * sizeof (ira_allocno_t) * (num + 1)
+ < 3 * nw * sizeof (IRA_INT_TYPE));
}
/* Allocates and initialize the conflict vector of A for NUM
conflicting allocnos. */
void
-allocate_allocno_conflict_vec (allocno_t a, int num)
+ira_allocate_allocno_conflict_vec (ira_allocno_t a, int num)
{
int size;
- allocno_t *vec;
+ ira_allocno_t *vec;
ira_assert (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) == NULL);
num++; /* for NULL end marker */
- size = sizeof (allocno_t) * num;
+ size = sizeof (ira_allocno_t) * num;
vec = ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) = ira_allocate (size);
vec[0] = NULL;
ALLOCNO_CONFLICT_ALLOCNOS_NUM (a) = 0;
@@ -599,13 +605,13 @@ allocate_allocno_conflict_vec (allocno_t a, int num)
/* Allocate and initialize the conflict bit vector of A. */
static void
-allocate_allocno_conflict_bit_vec (allocno_t a)
+allocate_allocno_conflict_bit_vec (ira_allocno_t a)
{
unsigned int size;
ira_assert (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) == NULL);
- size = ((ALLOCNO_MAX (a) - ALLOCNO_MIN (a) + INT_BITS)
- / INT_BITS * sizeof (INT_TYPE));
+ size = ((ALLOCNO_MAX (a) - ALLOCNO_MIN (a) + IRA_INT_BITS)
+ / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) = ira_allocate (size);
memset (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a), 0, size);
ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a) = size;
@@ -615,35 +621,35 @@ allocate_allocno_conflict_bit_vec (allocno_t a)
/* Allocate and initialize the conflict vector or conflict bit vector
of A for NUM conflicting allocnos whatever is more profitable. */
void
-allocate_allocno_conflicts (allocno_t a, int num)
+ira_allocate_allocno_conflicts (ira_allocno_t a, int num)
{
- if (conflict_vector_profitable_p (a, num))
- allocate_allocno_conflict_vec (a, num);
+ if (ira_conflict_vector_profitable_p (a, num))
+ ira_allocate_allocno_conflict_vec (a, num);
else
allocate_allocno_conflict_bit_vec (a);
}
/* Add A2 to the conflicts of A1. */
static void
-add_to_allocno_conflicts (allocno_t a1, allocno_t a2)
+add_to_allocno_conflicts (ira_allocno_t a1, ira_allocno_t a2)
{
int num;
unsigned int size;
if (ALLOCNO_CONFLICT_VEC_P (a1))
{
- allocno_t *vec;
+ ira_allocno_t *vec;
num = ALLOCNO_CONFLICT_ALLOCNOS_NUM (a1) + 2;
if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1)
- >= num * sizeof (allocno_t))
+ >= num * sizeof (ira_allocno_t))
vec = ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1);
else
{
- size = (3 * num / 2 + 1) * sizeof (allocno_t);
+ size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
vec = ira_allocate (size);
memcpy (vec, ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1),
- sizeof (allocno_t) * ALLOCNO_CONFLICT_ALLOCNOS_NUM (a1));
+ sizeof (ira_allocno_t) * ALLOCNO_CONFLICT_ALLOCNOS_NUM (a1));
ira_free (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1));
ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1) = vec;
ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1) = size;
@@ -655,46 +661,46 @@ add_to_allocno_conflicts (allocno_t a1, allocno_t a2)
else
{
int nw, added_head_nw, id;
- INT_TYPE *vec;
+ IRA_INT_TYPE *vec;
id = ALLOCNO_CONFLICT_ID (a2);
vec = ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1);
if (ALLOCNO_MIN (a1) > id)
{
/* Expand head of the bit vector. */
- added_head_nw = (ALLOCNO_MIN (a1) - id - 1) / INT_BITS + 1;
- nw = (ALLOCNO_MAX (a1) - ALLOCNO_MIN (a1)) / INT_BITS + 1;
- size = (nw + added_head_nw) * sizeof (INT_TYPE);
+ added_head_nw = (ALLOCNO_MIN (a1) - id - 1) / IRA_INT_BITS + 1;
+ nw = (ALLOCNO_MAX (a1) - ALLOCNO_MIN (a1)) / IRA_INT_BITS + 1;
+ size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1) >= size)
{
- memmove ((char *) vec + added_head_nw * sizeof (INT_TYPE),
- vec, nw * sizeof (INT_TYPE));
- memset (vec, 0, added_head_nw * sizeof (INT_TYPE));
+ memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
+ vec, nw * sizeof (IRA_INT_TYPE));
+ memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
}
else
{
- size = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (INT_TYPE);
+ size = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
vec = ira_allocate (size);
memcpy
- ((char *) vec + added_head_nw * sizeof (INT_TYPE),
- ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1), nw * sizeof (INT_TYPE));
- memset (vec, 0, added_head_nw * sizeof (INT_TYPE));
- memset ((char *) vec + (nw + added_head_nw) * sizeof (INT_TYPE),
- 0, size - (nw + added_head_nw) * sizeof (INT_TYPE));
+ ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
+ ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1), nw * sizeof (IRA_INT_TYPE));
+ memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
+ memset ((char *) vec + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
+ 0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
ira_free (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1));
ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1) = vec;
ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1) = size;
}
- ALLOCNO_MIN (a1) -= added_head_nw * INT_BITS;
+ ALLOCNO_MIN (a1) -= added_head_nw * IRA_INT_BITS;
}
else if (ALLOCNO_MAX (a1) < id)
{
- nw = (id - ALLOCNO_MIN (a1)) / INT_BITS + 1;
- size = nw * sizeof (INT_TYPE);
+ nw = (id - ALLOCNO_MIN (a1)) / IRA_INT_BITS + 1;
+ size = nw * sizeof (IRA_INT_TYPE);
if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1) < size)
{
/* Expand tail of the bit vector. */
- size = (3 * nw / 2 + 1) * sizeof (INT_TYPE);
+ size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
vec = ira_allocate (size);
memcpy (vec, ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a1),
ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a1));
@@ -712,7 +718,7 @@ add_to_allocno_conflicts (allocno_t a1, allocno_t a2)
/* Add A1 to the conflicts of A2 and vise versa. */
void
-add_allocno_conflict (allocno_t a1, allocno_t a2)
+ira_add_allocno_conflict (ira_allocno_t a1, ira_allocno_t a2)
{
add_to_allocno_conflicts (a1, a2);
add_to_allocno_conflicts (a2, a1);
@@ -720,19 +726,20 @@ add_allocno_conflict (allocno_t a1, allocno_t a2)
/* Clear all conflicts of allocno A. */
static void
-clear_allocno_conflicts (allocno_t a)
+clear_allocno_conflicts (ira_allocno_t a)
{
if (ALLOCNO_CONFLICT_VEC_P (a))
{
ALLOCNO_CONFLICT_ALLOCNOS_NUM (a) = 0;
- ((allocno_t *) ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a))[0] = NULL;
+ ((ira_allocno_t *) ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a))[0] = NULL;
}
else if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a) != 0)
{
int nw;
- nw = (ALLOCNO_MAX (a) - ALLOCNO_MIN (a)) / INT_BITS + 1;
- memset (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a), 0, nw * sizeof (INT_TYPE));
+ nw = (ALLOCNO_MAX (a) - ALLOCNO_MIN (a)) / IRA_INT_BITS + 1;
+ memset (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a), 0,
+ nw * sizeof (IRA_INT_TYPE));
}
}
@@ -746,9 +753,9 @@ static int curr_allocno_conflict_check_tick;
/* Remove duplications in conflict vector of A. */
static void
-compress_allocno_conflict_vec (allocno_t a)
+compress_allocno_conflict_vec (ira_allocno_t a)
{
- allocno_t *vec, conflict_a;
+ ira_allocno_t *vec, conflict_a;
int i, j;
ira_assert (ALLOCNO_CONFLICT_VEC_P (a));
@@ -772,11 +779,11 @@ compress_allocno_conflict_vec (allocno_t a)
static void
compress_conflict_vecs (void)
{
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
- allocno_conflict_check = ira_allocate (sizeof (int) * allocnos_num);
- memset (allocno_conflict_check, 0, sizeof (int) * allocnos_num);
+ allocno_conflict_check = ira_allocate (sizeof (int) * ira_allocnos_num);
+ memset (allocno_conflict_check, 0, sizeof (int) * ira_allocnos_num);
curr_allocno_conflict_check_tick = 0;
FOR_EACH_ALLOCNO (a, ai)
if (ALLOCNO_CONFLICT_VEC_P (a))
@@ -787,7 +794,7 @@ compress_conflict_vecs (void)
/* This recursive function outputs allocno A and if it is a cap the
function outputs its members. */
void
-print_expanded_allocno (allocno_t a)
+ira_print_expanded_allocno (ira_allocno_t a)
{
basic_block bb;
@@ -799,25 +806,25 @@ print_expanded_allocno (allocno_t a)
if (ALLOCNO_CAP_MEMBER (a) != NULL)
{
fprintf (ira_dump_file, ":");
- print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
+ ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
}
fprintf (ira_dump_file, ")");
}
/* Create and return the cap representing allocno A in the
parent loop. */
-static allocno_t
-create_cap_allocno (allocno_t a)
+static ira_allocno_t
+create_cap_allocno (ira_allocno_t a)
{
- allocno_t cap;
- loop_tree_node_t parent;
+ ira_allocno_t cap;
+ ira_loop_tree_node_t parent;
ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a
&& ALLOCNO_NEXT_COALESCED_ALLOCNO (a) == a);
parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
- cap = create_allocno (ALLOCNO_REGNO (a), true, parent);
+ cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
- set_allocno_cover_class (cap, ALLOCNO_COVER_CLASS (a));
+ ira_set_allocno_cover_class (cap, ALLOCNO_COVER_CLASS (a));
ALLOCNO_AVAILABLE_REGS_NUM (cap) = ALLOCNO_AVAILABLE_REGS_NUM (a);
ALLOCNO_CAP_MEMBER (cap) = a;
bitmap_set_bit (parent->mentioned_allocnos, ALLOCNO_NUM (cap));
@@ -828,7 +835,7 @@ create_cap_allocno (allocno_t a)
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
{
fprintf (ira_dump_file, " Creating cap ");
- print_expanded_allocno (cap);
+ ira_print_expanded_allocno (cap);
fprintf (ira_dump_file, "\n");
}
return cap;
@@ -837,15 +844,15 @@ create_cap_allocno (allocno_t a)
/* Propagates info to the CAP from its member. We must propagate info
which is not available at the cap creation time. */
static void
-propagate_info_to_cap (allocno_t cap)
+propagate_info_to_cap (ira_allocno_t cap)
{
int regno, conflicts_num;
enum reg_class cover_class;
- allocno_t a, conflict_allocno, conflict_parent_allocno;
- allocno_t another_a, parent_a;
- loop_tree_node_t parent;
- copy_t cp, next_cp;
- allocno_conflict_iterator aci;
+ ira_allocno_t a, conflict_allocno, conflict_parent_allocno;
+ ira_allocno_t another_a, parent_a;
+ ira_loop_tree_node_t parent;
+ ira_copy_t cp, next_cp;
+ ira_allocno_conflict_iterator aci;
ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (cap) == cap
&& ALLOCNO_NEXT_COALESCED_ALLOCNO (cap) == cap
@@ -854,9 +861,9 @@ propagate_info_to_cap (allocno_t cap)
a = ALLOCNO_CAP_MEMBER (cap);
parent = ALLOCNO_LOOP_TREE_NODE (cap);
cover_class = ALLOCNO_COVER_CLASS (cap);
- allocate_and_copy_costs
+ ira_allocate_and_copy_costs
(&ALLOCNO_HARD_REG_COSTS (cap), cover_class, ALLOCNO_HARD_REG_COSTS (a));
- allocate_and_copy_costs
+ ira_allocate_and_copy_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), cover_class,
ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
@@ -864,13 +871,14 @@ propagate_info_to_cap (allocno_t cap)
ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (cap),
ALLOCNO_CONFLICT_HARD_REGS (a));
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (cap),
- ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
+ IOR_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (cap),
+ IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
ALLOCNO_CALLS_CROSSED_START (cap) = ALLOCNO_CALLS_CROSSED_START (a);
#ifdef STACK_REGS
ALLOCNO_NO_STACK_REG_P (cap) = ALLOCNO_NO_STACK_REG_P (a);
- ALLOCNO_TOTAL_NO_STACK_REG_P (cap) = ALLOCNO_TOTAL_NO_STACK_REG_P (a);
+ IRA_ALLOCNO_TOTAL_NO_STACK_REG_P (cap)
+ = IRA_ALLOCNO_TOTAL_NO_STACK_REG_P (a);
#endif
/* Add copies to the cap. */
for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
@@ -896,15 +904,15 @@ propagate_info_to_cap (allocno_t cap)
/* Upper level allocno might be not existing because it is not
mentioned or lived on the region border. It is just living
on BB start of the loop. */
- add_allocno_copy (cap, parent_a, cp->freq, cp->insn,
- cp->loop_tree_node);
+ ira_add_allocno_copy (cap, parent_a, cp->freq, cp->insn,
+ cp->loop_tree_node);
}
if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) != NULL)
{
conflicts_num = 0;
FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
conflicts_num++;
- allocate_allocno_conflicts (cap, conflicts_num);
+ ira_allocate_allocno_conflicts (cap, conflicts_num);
FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
{
regno = ALLOCNO_REGNO (conflict_allocno);
@@ -914,13 +922,13 @@ propagate_info_to_cap (allocno_t cap)
if (conflict_parent_allocno != NULL
&& (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (conflict_parent_allocno)
!= NULL))
- add_allocno_conflict (cap, conflict_parent_allocno);
+ ira_add_allocno_conflict (cap, conflict_parent_allocno);
}
}
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
{
fprintf (ira_dump_file, " Propagate info to cap ");
- print_expanded_allocno (cap);
+ ira_print_expanded_allocno (cap);
if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (cap) != NULL)
{
fprintf (ira_dump_file, "\n Conflicts:");
@@ -942,8 +950,8 @@ propagate_info_to_cap (allocno_t cap)
/* Create and return allocno live range with given attributes. */
allocno_live_range_t
-create_allocno_live_range (allocno_t a, int start, int finish,
- allocno_live_range_t next)
+ira_create_allocno_live_range (ira_allocno_t a, int start, int finish,
+ allocno_live_range_t next)
{
allocno_live_range_t p;
@@ -989,51 +997,51 @@ copy_allocno_live_range_list (allocno_live_range_t r)
/* Free allocno live range R. */
void
-finish_allocno_live_range (allocno_live_range_t r)
+ira_finish_allocno_live_range (allocno_live_range_t r)
{
pool_free (allocno_live_range_pool, r);
}
/* Free updated register costs of allocno A. */
void
-free_allocno_updated_costs (allocno_t a)
+ira_free_allocno_updated_costs (ira_allocno_t a)
{
enum reg_class cover_class;
cover_class = ALLOCNO_COVER_CLASS (a);
if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
- free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
+ ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
- free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
- cover_class);
+ ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
+ cover_class);
ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
}
/* Free the memory allocated for allocno A. */
static void
-finish_allocno (allocno_t a)
+finish_allocno (ira_allocno_t a)
{
allocno_live_range_t r, next_r;
enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
- allocnos[ALLOCNO_NUM (a)] = NULL;
- conflict_id_allocno_map[ALLOCNO_CONFLICT_ID (a)] = NULL;
+ ira_allocnos[ALLOCNO_NUM (a)] = NULL;
+ ira_conflict_id_allocno_map[ALLOCNO_CONFLICT_ID (a)] = NULL;
if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) != NULL)
ira_free (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a));
if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
- free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), cover_class);
+ ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), cover_class);
if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
- free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), cover_class);
+ ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), cover_class);
if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
- free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
+ ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
- free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
- cover_class);
+ ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
+ cover_class);
for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = next_r)
{
next_r = r->next;
- finish_allocno_live_range (r);
+ ira_finish_allocno_live_range (r);
}
pool_free (allocno_pool, a);
}
@@ -1042,14 +1050,14 @@ finish_allocno (allocno_t a)
static void
finish_allocnos (void)
{
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
FOR_EACH_ALLOCNO (a, ai)
finish_allocno (a);
- ira_free (regno_allocno_map);
- VEC_free (allocno_t, heap, conflict_id_allocno_map_vec);
- VEC_free (allocno_t, heap, allocno_vec);
+ ira_free (ira_regno_allocno_map);
+ VEC_free (ira_allocno_t, heap, ira_conflict_id_allocno_map_vec);
+ VEC_free (ira_allocno_t, heap, allocno_vec);
free_alloc_pool (allocno_pool);
free_alloc_pool (allocno_live_range_pool);
}
@@ -1060,27 +1068,28 @@ finish_allocnos (void)
static alloc_pool copy_pool;
/* Vec containing references to all created copies. It is a
- container of array copies. */
-static VEC(copy_t,heap) *copy_vec;
+ container of array ira_copies. */
+static VEC(ira_copy_t,heap) *copy_vec;
/* The function initializes data concerning allocno copies. */
static void
initiate_copies (void)
{
- copy_pool = create_alloc_pool ("copies", sizeof (struct allocno_copy), 100);
- copy_vec = VEC_alloc (copy_t, heap, get_max_uid ());
- copies = NULL;
- copies_num = 0;
+ copy_pool
+ = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
+ copy_vec = VEC_alloc (ira_copy_t, heap, get_max_uid ());
+ ira_copies = NULL;
+ ira_copies_num = 0;
}
/* Return copy connecting A1 and A2 and originated from INSN of
LOOP_TREE_NODE if any. */
-static copy_t
-find_allocno_copy (allocno_t a1, allocno_t a2, rtx insn,
- loop_tree_node_t loop_tree_node)
+static ira_copy_t
+find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx insn,
+ ira_loop_tree_node_t loop_tree_node)
{
- copy_t cp, next_cp;
- allocno_t another_a;
+ ira_copy_t cp, next_cp;
+ ira_allocno_t another_a;
for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
{
@@ -1103,32 +1112,32 @@ find_allocno_copy (allocno_t a1, allocno_t a2, rtx insn,
return NULL;
}
-/* The function creates and returns created in LOOP_TREE_NODE copy of
- allocnos FIRST and SECOND with frequency FREQ, insn INSN. */
-copy_t
-create_copy (allocno_t first, allocno_t second, int freq, rtx insn,
- loop_tree_node_t loop_tree_node)
+/* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
+ SECOND, FREQ, and INSN. */
+ira_copy_t
+ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq, rtx insn,
+ ira_loop_tree_node_t loop_tree_node)
{
- copy_t cp;
+ ira_copy_t cp;
cp = pool_alloc (copy_pool);
- cp->num = copies_num;
+ cp->num = ira_copies_num;
cp->first = first;
cp->second = second;
cp->freq = freq;
cp->insn = insn;
cp->loop_tree_node = loop_tree_node;
- VEC_safe_push (copy_t, heap, copy_vec, cp);
- copies = VEC_address (copy_t, copy_vec);
- copies_num = VEC_length (copy_t, copy_vec);
+ VEC_safe_push (ira_copy_t, heap, copy_vec, cp);
+ ira_copies = VEC_address (ira_copy_t, copy_vec);
+ ira_copies_num = VEC_length (ira_copy_t, copy_vec);
return cp;
}
/* Attach a copy CP to allocnos involved into the copy. */
void
-add_allocno_copy_to_list (copy_t cp)
+ira_add_allocno_copy_to_list (ira_copy_t cp)
{
- allocno_t first = cp->first, second = cp->second;
+ ira_allocno_t first = cp->first, second = cp->second;
cp->prev_first_allocno_copy = NULL;
cp->prev_second_allocno_copy = NULL;
@@ -1154,10 +1163,10 @@ add_allocno_copy_to_list (copy_t cp)
/* Detach a copy CP from allocnos involved into the copy. */
void
-remove_allocno_copy_from_list (copy_t cp)
+ira_remove_allocno_copy_from_list (ira_copy_t cp)
{
- allocno_t first = cp->first, second = cp->second;
- copy_t prev, next;
+ ira_allocno_t first = cp->first, second = cp->second;
+ ira_copy_t prev, next;
next = cp->next_first_allocno_copy;
prev = cp->prev_first_allocno_copy;
@@ -1197,10 +1206,10 @@ remove_allocno_copy_from_list (copy_t cp)
/* Make a copy CP a canonical copy where number of the
first allocno is less than the second one. */
void
-swap_allocno_copy_ends_if_necessary (copy_t cp)
+ira_swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
{
- allocno_t temp;
- copy_t temp_cp;
+ ira_allocno_t temp;
+ ira_copy_t temp_cp;
if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
return;
@@ -1222,30 +1231,30 @@ swap_allocno_copy_ends_if_necessary (copy_t cp)
the copy of allocnos FIRST and SECOND with frequency FREQ
corresponding to move insn INSN (if any) and originated from
LOOP_TREE_NODE. */
-copy_t
-add_allocno_copy (allocno_t first, allocno_t second, int freq, rtx insn,
- loop_tree_node_t loop_tree_node)
+ira_copy_t
+ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
+ rtx insn, ira_loop_tree_node_t loop_tree_node)
{
- copy_t cp;
+ ira_copy_t cp;
if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
{
cp->freq += freq;
return cp;
}
- cp = create_copy (first, second, freq, insn, loop_tree_node);
+ cp = ira_create_copy (first, second, freq, insn, loop_tree_node);
ira_assert (first != NULL && second != NULL);
- add_allocno_copy_to_list (cp);
- swap_allocno_copy_ends_if_necessary (cp);
+ ira_add_allocno_copy_to_list (cp);
+ ira_swap_allocno_copy_ends_if_necessary (cp);
return cp;
}
/* Print info about copies involving allocno A into file F. */
static void
-print_allocno_copies (FILE *f, allocno_t a)
+print_allocno_copies (FILE *f, ira_allocno_t a)
{
- allocno_t another_a;
- copy_t cp, next_cp;
+ ira_allocno_t another_a;
+ ira_copy_t cp, next_cp;
fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
@@ -1270,14 +1279,14 @@ print_allocno_copies (FILE *f, allocno_t a)
/* Print info about copies involving allocno A into stderr. */
void
-debug_allocno_copies (allocno_t a)
+ira_debug_allocno_copies (ira_allocno_t a)
{
print_allocno_copies (stderr, a);
}
/* The function frees memory allocated for copy CP. */
static void
-finish_copy (copy_t cp)
+finish_copy (ira_copy_t cp)
{
pool_free (copy_pool, cp);
}
@@ -1287,12 +1296,12 @@ finish_copy (copy_t cp)
static void
finish_copies (void)
{
- copy_t cp;
- copy_iterator ci;
+ ira_copy_t cp;
+ ira_copy_iterator ci;
FOR_EACH_COPY (cp, ci)
finish_copy (cp);
- VEC_free (copy_t, heap, copy_vec);
+ VEC_free (ira_copy_t, heap, copy_vec);
free_alloc_pool (copy_pool);
}
@@ -1309,26 +1318,27 @@ initiate_cost_vectors (void)
int i;
enum reg_class cover_class;
- for (i = 0; i < reg_class_cover_size; i++)
+ for (i = 0; i < ira_reg_class_cover_size; i++)
{
- cover_class = reg_class_cover[i];
+ cover_class = ira_reg_class_cover[i];
cost_vector_pool[cover_class]
= create_alloc_pool ("cost vectors",
- sizeof (int) * class_hard_regs_num[cover_class],
+ sizeof (int)
+ * ira_class_hard_regs_num[cover_class],
100);
}
}
/* Allocate and return a cost vector VEC for COVER_CLASS. */
int *
-allocate_cost_vector (enum reg_class cover_class)
+ira_allocate_cost_vector (enum reg_class cover_class)
{
return pool_alloc (cost_vector_pool[cover_class]);
}
/* Free a cost vector VEC for COVER_CLASS. */
void
-free_cost_vector (int *vec, enum reg_class cover_class)
+ira_free_cost_vector (int *vec, enum reg_class cover_class)
{
ira_assert (vec != NULL);
pool_free (cost_vector_pool[cover_class], vec);
@@ -1342,9 +1352,9 @@ finish_cost_vectors (void)
int i;
enum reg_class cover_class;
- for (i = 0; i < reg_class_cover_size; i++)
+ for (i = 0; i < ira_reg_class_cover_size; i++)
{
- cover_class = reg_class_cover[i];
+ cover_class = ira_reg_class_cover[i];
free_alloc_pool (cost_vector_pool[cover_class]);
}
}
@@ -1352,8 +1362,8 @@ finish_cost_vectors (void)
/* The current loop tree node and its regno allocno map. */
-loop_tree_node_t ira_curr_loop_tree_node;
-allocno_t *ira_curr_regno_allocno_map;
+ira_loop_tree_node_t ira_curr_loop_tree_node;
+ira_allocno_t *ira_curr_regno_allocno_map;
/* This recursive function traverses loop tree with root LOOP_NODE
calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
@@ -1362,11 +1372,11 @@ allocno_t *ira_curr_regno_allocno_map;
basic block nodes of LOOP_NODE is also processed (before its
subloop nodes). */
void
-traverse_loop_tree (bool bb_p, loop_tree_node_t loop_node,
- void (*preorder_func) (loop_tree_node_t),
- void (*postorder_func) (loop_tree_node_t))
+ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
+ void (*preorder_func) (ira_loop_tree_node_t),
+ void (*postorder_func) (ira_loop_tree_node_t))
{
- loop_tree_node_t subloop_node;
+ ira_loop_tree_node_t subloop_node;
ira_assert (loop_node->bb == NULL);
ira_curr_loop_tree_node = loop_node;
@@ -1393,8 +1403,8 @@ traverse_loop_tree (bool bb_p, loop_tree_node_t loop_node,
subloop_node = subloop_node->subloop_next)
{
ira_assert (subloop_node->bb == NULL);
- traverse_loop_tree (bb_p, subloop_node,
- preorder_func, postorder_func);
+ ira_traverse_loop_tree (bb_p, subloop_node,
+ preorder_func, postorder_func);
}
ira_curr_loop_tree_node = loop_node;
@@ -1425,10 +1435,10 @@ create_insn_allocnos (rtx x, bool output_p)
if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
{
- allocno_t a;
+ ira_allocno_t a;
if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
- a = create_allocno (regno, false, ira_curr_loop_tree_node);
+ a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
ALLOCNO_NREFS (a)++;
ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
@@ -1478,7 +1488,7 @@ create_insn_allocnos (rtx x, bool output_p)
basic block represented by the corresponding loop tree node
BB_NODE. */
static void
-create_bb_allocnos (loop_tree_node_t bb_node)
+create_bb_allocnos (ira_loop_tree_node_t bb_node)
{
basic_block bb;
rtx insn;
@@ -1494,7 +1504,7 @@ create_bb_allocnos (loop_tree_node_t bb_node)
another. */
EXECUTE_IF_SET_IN_REG_SET (DF_LR_IN (bb), FIRST_PSEUDO_REGISTER, i, bi)
if (ira_curr_regno_allocno_map[i] == NULL)
- create_allocno (i, false, ira_curr_loop_tree_node);
+ ira_create_allocno (i, false, ira_curr_loop_tree_node);
}
/* Create allocnos corresponding to pseudo-registers living on edge E
@@ -1514,7 +1524,7 @@ create_loop_allocnos (edge e)
if (bitmap_bit_p (live_in_regs, i))
{
if (ira_curr_regno_allocno_map[i] == NULL)
- create_allocno (i, false, ira_curr_loop_tree_node);
+ ira_create_allocno (i, false, ira_curr_loop_tree_node);
bitmap_set_bit (border_allocnos,
ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
}
@@ -1522,9 +1532,9 @@ create_loop_allocnos (edge e)
/* Create allocnos corresponding to pseudo-registers living in loop
represented by the corresponding loop tree node LOOP_NODE. This
- function is called by traverse_loop_tree. */
+ function is called by ira_traverse_loop_tree. */
static void
-create_loop_tree_node_allocnos (loop_tree_node_t loop_node)
+create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
{
if (loop_node->bb != NULL)
create_bb_allocnos (loop_node);
@@ -1552,20 +1562,20 @@ static void
create_allocnos (void)
{
int i;
- allocno_t a, parent_a;
- loop_tree_node_t parent;
+ ira_allocno_t a, parent_a;
+ ira_loop_tree_node_t parent;
/* We need to process BB first to correctly link allocnos by member
next_regno_allocno. */
- traverse_loop_tree (true, ira_loop_tree_root,
- create_loop_tree_node_allocnos, NULL);
+ ira_traverse_loop_tree (true, ira_loop_tree_root,
+ create_loop_tree_node_allocnos, NULL);
if (flag_ira_algorithm != IRA_ALGORITHM_REGIONAL
&& flag_ira_algorithm != IRA_ALGORITHM_MIXED)
return;
/* Propagate number of references and frequencies for regional
register allocation. */
for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
- for (a = regno_allocno_map[i];
+ for (a = ira_regno_allocno_map[i];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
@@ -1583,10 +1593,10 @@ static void
setup_min_max_allocno_live_range_point (void)
{
int i;
- allocno_t a, parent_a, cap;
- allocno_iterator ai;
+ ira_allocno_t a, parent_a, cap;
+ ira_allocno_iterator ai;
allocno_live_range_t r;
- loop_tree_node_t parent;
+ ira_loop_tree_node_t parent;
FOR_EACH_ALLOCNO (a, ai)
{
@@ -1599,7 +1609,7 @@ setup_min_max_allocno_live_range_point (void)
ALLOCNO_MIN (a) = r->start;
}
for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
- for (a = regno_allocno_map[i];
+ for (a = ira_regno_allocno_map[i];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
@@ -1627,8 +1637,8 @@ setup_min_max_allocno_live_range_point (void)
#ifdef ENABLE_IRA_CHECKING
FOR_EACH_ALLOCNO (a, ai)
{
- if ((0 <= ALLOCNO_MIN (a) && ALLOCNO_MIN (a) <= max_point)
- && (0 <= ALLOCNO_MAX (a) && ALLOCNO_MAX (a) <= max_point))
+ if ((0 <= ALLOCNO_MIN (a) && ALLOCNO_MIN (a) <= ira_max_point)
+ && (0 <= ALLOCNO_MAX (a) && ALLOCNO_MAX (a) <= ira_max_point))
continue;
gcc_unreachable ();
}
@@ -1643,7 +1653,8 @@ static int
allocno_range_compare_func (const void *v1p, const void *v2p)
{
int diff;
- allocno_t a1 = *(const allocno_t *) v1p, a2 = *(const allocno_t *) v2p;
+ ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
+ ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
if ((diff = ALLOCNO_COVER_CLASS (a1) - ALLOCNO_COVER_CLASS (a2)) != 0)
return diff;
@@ -1654,25 +1665,25 @@ allocno_range_compare_func (const void *v1p, const void *v2p)
return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
}
-/* Sort conflict_id_allocno_map and set up conflict id of
+/* Sort ira_conflict_id_allocno_map and set up conflict id of
allocnos. */
static void
sort_conflict_id_allocno_map (void)
{
int i, num;
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
num = 0;
FOR_EACH_ALLOCNO (a, ai)
- conflict_id_allocno_map[num++] = a;
- qsort (conflict_id_allocno_map, num, sizeof (allocno_t),
+ ira_conflict_id_allocno_map[num++] = a;
+ qsort (ira_conflict_id_allocno_map, num, sizeof (ira_allocno_t),
allocno_range_compare_func);
for (i = 0; i < num; i++)
- if ((a = conflict_id_allocno_map[i]) != NULL)
+ if ((a = ira_conflict_id_allocno_map[i]) != NULL)
ALLOCNO_CONFLICT_ID (a) = i;
- for (i = num; i < allocnos_num; i++)
- conflict_id_allocno_map[i] = NULL;
+ for (i = num; i < ira_allocnos_num; i++)
+ ira_conflict_id_allocno_map[i] = NULL;
}
/* Set up minimal and maximal conflict ids of allocnos with which
@@ -1683,14 +1694,14 @@ setup_min_max_conflict_allocno_ids (void)
enum reg_class cover_class;
int i, j, min, max, start, finish, first_not_finished, filled_area_start;
int *live_range_min, *last_lived;
- allocno_t a;
+ ira_allocno_t a;
- live_range_min = ira_allocate (sizeof (int) * allocnos_num);
+ live_range_min = ira_allocate (sizeof (int) * ira_allocnos_num);
cover_class = -1;
first_not_finished = -1;
- for (i = 0; i < allocnos_num; i++)
+ for (i = 0; i < ira_allocnos_num; i++)
{
- a = conflict_id_allocno_map[i];
+ a = ira_conflict_id_allocno_map[i];
if (a == NULL)
continue;
if (cover_class != ALLOCNO_COVER_CLASS (a))
@@ -1707,8 +1718,8 @@ setup_min_max_conflict_allocno_ids (void)
range finishes (see function
allocno_range_compare_func). */
while (first_not_finished < i
- && start
- > ALLOCNO_MAX (conflict_id_allocno_map[first_not_finished]))
+ && start > ALLOCNO_MAX (ira_conflict_id_allocno_map
+ [first_not_finished]))
first_not_finished++;
min = first_not_finished;
}
@@ -1719,20 +1730,20 @@ setup_min_max_conflict_allocno_ids (void)
live_range_min[i] = ALLOCNO_MIN (a);
ALLOCNO_MIN (a) = min;
}
- last_lived = ira_allocate (sizeof (int) * max_point);
+ last_lived = ira_allocate (sizeof (int) * ira_max_point);
cover_class = -1;
filled_area_start = -1;
- for (i = allocnos_num - 1; i >= 0; i--)
+ for (i = ira_allocnos_num - 1; i >= 0; i--)
{
- a = conflict_id_allocno_map[i];
+ a = ira_conflict_id_allocno_map[i];
if (a == NULL)
continue;
if (cover_class != ALLOCNO_COVER_CLASS (a))
{
cover_class = ALLOCNO_COVER_CLASS (a);
- for (j = 0; j < max_point; j++)
+ for (j = 0; j < ira_max_point; j++)
last_lived[j] = -1;
- filled_area_start = max_point;
+ filled_area_start = ira_max_point;
}
min = live_range_min[i];
finish = ALLOCNO_MAX (a);
@@ -1762,11 +1773,11 @@ setup_min_max_conflict_allocno_ids (void)
/* Create caps representing allocnos living only inside the loop given
by LOOP_NODE on higher loop level. */
static void
-create_loop_tree_node_caps (loop_tree_node_t loop_node)
+create_loop_tree_node_caps (ira_loop_tree_node_t loop_node)
{
unsigned int i;
bitmap_iterator bi;
- loop_tree_node_t parent;
+ ira_loop_tree_node_t parent;
if (loop_node == ira_loop_tree_root)
return;
@@ -1775,23 +1786,23 @@ create_loop_tree_node_caps (loop_tree_node_t loop_node)
loop_node->border_allocnos);
parent = loop_node->parent;
EXECUTE_IF_SET_IN_BITMAP (allocnos_bitmap, 0, i, bi)
- if (parent->regno_allocno_map[ALLOCNO_REGNO (allocnos[i])] == NULL)
- create_cap_allocno (allocnos[i]);
+ if (parent->regno_allocno_map[ALLOCNO_REGNO (ira_allocnos[i])] == NULL)
+ create_cap_allocno (ira_allocnos[i]);
}
/* Propagate info (not available at the cap creation time) to caps
mentioned in LOOP_NODE. */
static void
-propagate_info_to_loop_tree_node_caps (loop_tree_node_t loop_node)
+propagate_info_to_loop_tree_node_caps (ira_loop_tree_node_t loop_node)
{
unsigned int i;
bitmap_iterator bi;
- allocno_t a;
+ ira_allocno_t a;
ira_assert (loop_node->bb == NULL);
EXECUTE_IF_SET_IN_BITMAP (loop_node->mentioned_allocnos, 0, i, bi)
{
- a = allocnos[i];
+ a = ira_allocnos[i];
if (ALLOCNO_CAP_MEMBER (a) != NULL)
propagate_info_to_cap (a);
}
@@ -1833,7 +1844,7 @@ merge_ranges (allocno_live_range_t r1, allocno_live_range_t r2)
r1->finish = r2->finish;
temp = r2;
r2 = r2->next;
- finish_allocno_live_range (temp);
+ ira_finish_allocno_live_range (temp);
if (r2 == NULL)
{
/* To try to merge with subsequent ranges in r1. */
@@ -1885,8 +1896,9 @@ merge_ranges (allocno_live_range_t r1, allocno_live_range_t r2)
/* This recursive function returns immediate common dominator of two
loop tree nodes N1 and N2. */
-static loop_tree_node_t
-common_loop_tree_node_dominator (loop_tree_node_t n1, loop_tree_node_t n2)
+static ira_loop_tree_node_t
+common_loop_tree_node_dominator (ira_loop_tree_node_t n1,
+ ira_loop_tree_node_t n2)
{
ira_assert (n1 != NULL && n2 != NULL);
if (n1 == n2)
@@ -1901,7 +1913,7 @@ common_loop_tree_node_dominator (loop_tree_node_t n1, loop_tree_node_t n2)
/* The function changes allocno in range list given by R onto A. */
static void
-change_allocno_in_range_list (allocno_live_range_t r, allocno_t a)
+change_allocno_in_range_list (allocno_live_range_t r, ira_allocno_t a)
{
for (; r != NULL; r = r->next)
r->allocno = a;
@@ -1911,10 +1923,10 @@ change_allocno_in_range_list (allocno_live_range_t r, allocno_t a)
it were built with one region (without loops). We could make it
much simpler by rebuilding IR with one region, but unfortunately it
takes a lot of time. MAX_REGNO_BEFORE_EMIT and
- MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
- MAX_POINT before emitting insns on the loop borders. */
+ IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
+ IRA_MAX_POINT before emitting insns on the loop borders. */
void
-ira_flattening (int max_regno_before_emit, int max_point_before_emit)
+ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
{
int i, j, num;
bool propagate_p, stop_p, keep_p;
@@ -1923,31 +1935,32 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
unsigned int n;
enum reg_class cover_class;
rtx call, *allocno_calls;
- allocno_t a, parent_a, first, second, node_first, node_second;
- allocno_t dominator_a;
- copy_t cp;
- loop_tree_node_t parent, node, dominator;
+ ira_allocno_t a, parent_a, first, second, node_first, node_second;
+ ira_allocno_t dominator_a;
+ ira_copy_t cp;
+ ira_loop_tree_node_t parent, node, dominator;
allocno_live_range_t r;
- allocno_iterator ai;
- copy_iterator ci;
+ ira_allocno_iterator ai;
+ ira_copy_iterator ci;
sparseset allocnos_live;
/* Map: regno -> allocnos which will finally represent the regno for
IR with one region. */
- allocno_t *regno_top_level_allocno_map;
+ ira_allocno_t *regno_top_level_allocno_map;
bool *allocno_propagated_p;
regno_top_level_allocno_map
- = ira_allocate (max_reg_num () * sizeof (allocno_t));
- memset (regno_top_level_allocno_map, 0, max_reg_num () * sizeof (allocno_t));
- allocno_propagated_p = ira_allocate (allocnos_num * sizeof (bool));
- memset (allocno_propagated_p, 0, allocnos_num * sizeof (bool));
+ = ira_allocate (max_reg_num () * sizeof (ira_allocno_t));
+ memset (regno_top_level_allocno_map, 0,
+ max_reg_num () * sizeof (ira_allocno_t));
+ allocno_propagated_p = ira_allocate (ira_allocnos_num * sizeof (bool));
+ memset (allocno_propagated_p, 0, ira_allocnos_num * sizeof (bool));
expand_calls ();
new_allocnos_p = renamed_p = merged_p = false;
/* Fix final allocno attributes. */
for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
{
propagate_p = false;
- for (a = regno_allocno_map[i];
+ for (a = ira_regno_allocno_map[i];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
@@ -1959,14 +1972,14 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
&& ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
{
allocno_calls = (VEC_address (rtx,
- regno_calls[ALLOCNO_REGNO (a)])
+ ira_regno_calls[ALLOCNO_REGNO (a)])
+ ALLOCNO_CALLS_CROSSED_START (a));
for (j = ALLOCNO_CALLS_CROSSED_NUM (a) - 1; j >= 0; j--)
{
call = allocno_calls[j];
if (call == NULL_RTX)
continue;
- add_regno_call (REGNO (ALLOCNO_REG (a)), call);
+ ira_add_regno_call (REGNO (ALLOCNO_REG (a)), call);
allocno_calls[j] = NULL_RTX;
}
}
@@ -1982,17 +1995,17 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
if (propagate_p)
{
if (!allocno_propagated_p [ALLOCNO_NUM (parent_a)])
- COPY_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (parent_a),
+ COPY_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (parent_a),
ALLOCNO_CONFLICT_HARD_REGS (parent_a));
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (parent_a),
- ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
+ IOR_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (parent_a),
+ IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
#ifdef STACK_REGS
if (!allocno_propagated_p [ALLOCNO_NUM (parent_a)])
- ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a)
+ IRA_ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a)
= ALLOCNO_NO_STACK_REG_P (parent_a);
- ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a)
- = (ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a)
- || ALLOCNO_TOTAL_NO_STACK_REG_P (a));
+ IRA_ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a)
+ = (IRA_ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a)
+ || IRA_ALLOCNO_TOTAL_NO_STACK_REG_P (a));
#endif
allocno_propagated_p [ALLOCNO_NUM (parent_a)] = true;
}
@@ -2005,8 +2018,8 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
ALLOCNO_NUM (a), REGNO (ALLOCNO_REG (a)),
ALLOCNO_NUM (parent_a),
REGNO (ALLOCNO_REG (parent_a)));
- print_live_range_list (ira_dump_file,
- ALLOCNO_LIVE_RANGES (a));
+ ira_print_live_range_list (ira_dump_file,
+ ALLOCNO_LIVE_RANGES (a));
}
change_allocno_in_range_list (ALLOCNO_LIVE_RANGES (a), parent_a);
ALLOCNO_LIVE_RANGES (parent_a)
@@ -2031,7 +2044,7 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
cover_class = ALLOCNO_COVER_CLASS (parent_a);
- hard_regs_num = class_hard_regs_num[cover_class];
+ hard_regs_num = ira_class_hard_regs_num[cover_class];
if (ALLOCNO_HARD_REG_COSTS (a) != NULL
&& ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
for (j = 0; j < hard_regs_num; j++)
@@ -2073,8 +2086,8 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
ALLOCNO_NUM (first), REGNO (ALLOCNO_REG (first)),
ALLOCNO_NUM (parent_a),
REGNO (ALLOCNO_REG (parent_a)));
- print_live_range_list (ira_dump_file,
- ALLOCNO_LIVE_RANGES (first));
+ ira_print_live_range_list (ira_dump_file,
+ ALLOCNO_LIVE_RANGES (first));
}
r = copy_allocno_live_range_list (ALLOCNO_LIVE_RANGES
(first));
@@ -2098,7 +2111,7 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
}
ira_free (allocno_propagated_p);
ira_assert (new_allocnos_p || renamed_p
- || max_point_before_emit == max_point);
+ || ira_max_point_before_emit == ira_max_point);
if (new_allocnos_p)
{
/* Fix final allocnos attributes concerning calls. */
@@ -2110,11 +2123,11 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
continue;
ALLOCNO_CALLS_CROSSED_START (a) = 0;
ALLOCNO_CALLS_CROSSED_NUM (a)
- = VEC_length (rtx, regno_calls[REGNO (ALLOCNO_REG (a))]);
+ = VEC_length (rtx, ira_regno_calls[REGNO (ALLOCNO_REG (a))]);
}
}
- if (merged_p || max_point_before_emit != max_point)
- rebuild_start_finish_chains ();
+ if (merged_p || ira_max_point_before_emit != ira_max_point)
+ ira_rebuild_start_finish_chains ();
/* We should rebuild conflicts even if there are no new allocnos in
situation when a pseudo used locally in loops and locally in the
subloop because some allocnos are in conflict with the subloop
@@ -2132,10 +2145,10 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
ira_assert (r->allocno == a);
clear_allocno_conflicts (a);
}
- allocnos_live = sparseset_alloc (allocnos_num);
- for (i = 0; i < max_point; i++)
+ allocnos_live = sparseset_alloc (ira_allocnos_num);
+ for (i = 0; i < ira_max_point; i++)
{
- for (r = start_point_ranges[i]; r != NULL; r = r->start_next)
+ for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
{
a = r->allocno;
if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
@@ -2146,16 +2159,16 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
sparseset_set_bit (allocnos_live, num);
EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, n)
{
- allocno_t live_a = allocnos[n];
+ ira_allocno_t live_a = ira_allocnos[n];
if (cover_class == ALLOCNO_COVER_CLASS (live_a)
/* Don't set up conflict for the allocno with itself. */
&& num != (int) n)
- add_allocno_conflict (a, live_a);
+ ira_add_allocno_conflict (a, live_a);
}
}
- for (r = finish_point_ranges[i]; r != NULL; r = r->finish_next)
+ for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
sparseset_clear_bit (allocnos_live, ALLOCNO_NUM (r->allocno));
}
sparseset_free (allocnos_live);
@@ -2227,7 +2240,7 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
ALLOCNO_CAP (a) = NULL;
ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
if (! ALLOCNO_ASSIGNED_P (a))
- free_allocno_updated_costs (a);
+ ira_free_allocno_updated_costs (a);
ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
}
@@ -2236,15 +2249,15 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
{
if (cp->loop_tree_node == NULL)
{
- copies[cp->num] = NULL;
+ ira_copies[cp->num] = NULL;
finish_copy (cp);
continue;
}
ira_assert
(ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
&& ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
- add_allocno_copy_to_list (cp);
- swap_allocno_copy_ends_if_necessary (cp);
+ ira_add_allocno_copy_to_list (cp);
+ ira_swap_allocno_copy_ends_if_necessary (cp);
}
rebuild_regno_allocno_maps ();
ira_free (regno_top_level_allocno_map);
@@ -2257,23 +2270,24 @@ ira_flattening (int max_regno_before_emit, int max_point_before_emit)
static int all_loops = 0, high_pressure_loops = 0;
static void
-calculate_high_pressure_loops (loop_tree_node_t loop_node,
+calculate_high_pressure_loops (ira_loop_tree_node_t loop_node,
int *all_loops, int *high_pressure_loops)
{
- loop_tree_node_t subloop_node;
+ ira_loop_tree_node_t subloop_node;
int i;
enum reg_class class;
(*all_loops)++;
- for (i = 0; i < reg_class_cover_size; i++)
+ for (i = 0; i < ira_reg_class_cover_size; i++)
{
- class = reg_class_cover[i];
- if (loop_node->reg_pressure[class] > available_class_regs[class]
+ class = ira_reg_class_cover[i];
+ if (loop_node->reg_pressure[class] > ira_available_class_regs[class]
|| (loop_node->parent != NULL
- && loop_node->parent->reg_pressure[class] > available_class_regs[class]))
+ && (loop_node->parent->reg_pressure[class]
+ > ira_available_class_regs[class])))
break;
}
- if (i < reg_class_cover_size)
+ if (i < ira_reg_class_cover_size)
(*high_pressure_loops)++;
for (subloop_node = loop_node->subloops;
subloop_node != NULL;
@@ -2313,14 +2327,14 @@ ira_build (bool loops_p)
form_loop_tree ();
create_allocnos ();
ira_costs ();
- create_allocno_live_ranges ();
+ ira_create_allocno_live_ranges ();
if (optimize && (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
|| flag_ira_algorithm == IRA_ALGORITHM_MIXED))
{
bitmap_clear (allocnos_bitmap);
- traverse_loop_tree (false, ira_loop_tree_root, NULL,
- create_loop_tree_node_caps);
+ ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
+ create_loop_tree_node_caps);
}
setup_min_max_allocno_live_range_point ();
sort_conflict_id_allocno_map ();
@@ -2329,9 +2343,9 @@ ira_build (bool loops_p)
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
{
int n, nr;
- allocno_t a;
+ ira_allocno_t a;
allocno_live_range_t r;
- allocno_iterator ai;
+ ira_allocno_iterator ai;
n = 0;
FOR_EACH_ALLOCNO (a, ai)
@@ -2342,18 +2356,18 @@ ira_build (bool loops_p)
nr++;
fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
VEC_length (loop_p, ira_loops.larray), n_basic_blocks,
- max_point);
+ ira_max_point);
fprintf (ira_dump_file,
" allocnos=%d, copies=%d, conflicts=%d, ranges=%d\n",
- allocnos_num, copies_num, n, nr);
+ ira_allocnos_num, ira_copies_num, n, nr);
}
if (optimize)
{
if (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
|| flag_ira_algorithm == IRA_ALGORITHM_MIXED)
- traverse_loop_tree (false, ira_loop_tree_root, NULL,
- propagate_info_to_loop_tree_node_caps);
- tune_allocno_costs_and_cover_classes ();
+ ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
+ propagate_info_to_loop_tree_node_caps);
+ ira_tune_allocno_costs_and_cover_classes ();
if (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
|| flag_ira_algorithm == IRA_ALGORITHM_MIXED)
{
@@ -2375,6 +2389,6 @@ ira_destroy (void)
finish_allocnos ();
finish_calls ();
finish_cost_vectors ();
- finish_allocno_live_ranges ();
+ ira_finish_allocno_live_ranges ();
ira_free_bitmap (allocnos_bitmap);
}
diff --git a/gcc/ira-color.c b/gcc/ira-color.c
index b53183425a9..a729ac62cd7 100644
--- a/gcc/ira-color.c
+++ b/gcc/ira-color.c
@@ -63,13 +63,13 @@ static bool allocno_coalesced_p;
static bitmap processed_coalesced_allocno_bitmap;
/* All allocnos sorted according their priorities. */
-static allocno_t *sorted_allocnos;
+static ira_allocno_t *sorted_allocnos;
/* Vec representing the stack of allocnos used during coloring. */
-static VEC(allocno_t,heap) *allocno_stack_vec;
+static VEC(ira_allocno_t,heap) *allocno_stack_vec;
/* Array used to choose an allocno for spilling. */
-static allocno_t *allocnos_for_spilling;
+static ira_allocno_t *allocnos_for_spilling;
/* Pool for splay tree nodes. */
static alloc_pool splay_tree_node_pool;
@@ -80,7 +80,7 @@ static alloc_pool splay_tree_node_pool;
could be removed from and inserted to the splay tree every time
when its spilling priority is changed but such solution would be
more costly although simpler. */
-static VEC(allocno_t,heap) *removed_splay_allocno_vec;
+static VEC(ira_allocno_t,heap) *removed_splay_allocno_vec;
@@ -103,8 +103,8 @@ static int update_cost_check;
static void
initiate_cost_update (void)
{
- allocno_update_cost_check = ira_allocate (allocnos_num * sizeof (int));
- memset (allocno_update_cost_check, 0, allocnos_num * sizeof (int));
+ allocno_update_cost_check = ira_allocate (ira_allocnos_num * sizeof (int));
+ memset (allocno_update_cost_check, 0, ira_allocnos_num * sizeof (int));
update_cost_check = 0;
}
@@ -120,14 +120,14 @@ finish_cost_update (void)
increases chances to remove some copies. Copy cost is proportional
the copy frequency divided by DIVISOR. */
static void
-update_copy_costs_1 (allocno_t allocno, int hard_regno,
+update_copy_costs_1 (ira_allocno_t allocno, int hard_regno,
bool decr_p, int divisor)
{
int i, cost, update_cost;
enum machine_mode mode;
enum reg_class class, cover_class;
- allocno_t another_allocno;
- copy_t cp, next_cp;
+ ira_allocno_t another_allocno;
+ ira_copy_t cp, next_cp;
cover_class = ALLOCNO_COVER_CLASS (allocno);
if (cover_class == NO_REGS)
@@ -136,7 +136,7 @@ update_copy_costs_1 (allocno_t allocno, int hard_regno,
return;
allocno_update_cost_check[ALLOCNO_NUM (allocno)] = update_cost_check;
ira_assert (hard_regno >= 0);
- i = class_hard_reg_index[cover_class][hard_regno];
+ i = ira_class_hard_reg_index[cover_class][hard_regno];
ira_assert (i >= 0);
class = REGNO_REG_CLASS (hard_regno);
mode = ALLOCNO_MODE (allocno);
@@ -159,17 +159,17 @@ update_copy_costs_1 (allocno_t allocno, int hard_regno,
|| ALLOCNO_ASSIGNED_P (another_allocno))
continue;
cost = (cp->second == allocno
- ? register_move_cost[mode][class]
+ ? ira_register_move_cost[mode][class]
[ALLOCNO_COVER_CLASS (another_allocno)]
- : register_move_cost[mode]
+ : ira_register_move_cost[mode]
[ALLOCNO_COVER_CLASS (another_allocno)][class]);
if (decr_p)
cost = -cost;
- allocate_and_set_or_copy_costs
+ ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), cover_class,
ALLOCNO_COVER_CLASS_COST (another_allocno),
ALLOCNO_HARD_REG_COSTS (another_allocno));
- allocate_and_set_or_copy_costs
+ ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
cover_class, 0,
ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
@@ -186,7 +186,7 @@ update_copy_costs_1 (allocno_t allocno, int hard_regno,
/* Update the cost of allocnos to increase chances to remove some
copies as the result of subsequent assignment. */
static void
-update_copy_costs (allocno_t allocno, bool decr_p)
+update_copy_costs (ira_allocno_t allocno, bool decr_p)
{
update_cost_check++;
update_copy_costs_1 (allocno, ALLOCNO_HARD_REGNO (allocno), decr_p, 1);
@@ -197,7 +197,8 @@ update_copy_costs (allocno_t allocno, bool decr_p)
static int
allocno_cost_compare_func (const void *v1p, const void *v2p)
{
- allocno_t p1 = *(const allocno_t *) v1p, p2 = *(const allocno_t *) v2p;
+ ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
+ ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
int c1, c2;
c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_COVER_CLASS_COST (p1);
@@ -212,14 +213,14 @@ allocno_cost_compare_func (const void *v1p, const void *v2p)
/* Print all allocnos coalesced with ALLOCNO. */
static void
-print_coalesced_allocno (allocno_t allocno)
+print_coalesced_allocno (ira_allocno_t allocno)
{
- allocno_t a;
+ ira_allocno_t a;
for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{
- print_expanded_allocno (a);
+ ira_print_expanded_allocno (a);
if (a == allocno)
break;
fprintf (ira_dump_file, "+");
@@ -228,13 +229,13 @@ print_coalesced_allocno (allocno_t allocno)
/* Choose a hard register for ALLOCNO (or for all coalesced allocnos
represented by ALLOCNO). If RETRY_P is TRUE, it means that the
- function called from function `reassign_conflict_allocnos' and
+ function called from function `ira_reassign_conflict_allocnos' and
`allocno_reload_assign'. This function implements the optimistic
coalescing too: if we failed to assign a hard register to set of
the coalesced allocnos, we put them onto the coloring stack for
subsequent separate assigning. */
static bool
-assign_hard_reg (allocno_t allocno, bool retry_p)
+assign_hard_reg (ira_allocno_t allocno, bool retry_p)
{
HARD_REG_SET conflicting_regs;
int i, j, hard_regno, best_hard_regno, class_size;
@@ -243,10 +244,10 @@ assign_hard_reg (allocno_t allocno, bool retry_p)
int *conflict_costs;
enum reg_class cover_class, class;
enum machine_mode mode;
- allocno_t a, conflict_allocno;
- allocno_t another_allocno;
- allocno_conflict_iterator aci;
- copy_t cp, next_cp;
+ ira_allocno_t a, conflict_allocno;
+ ira_allocno_t another_allocno;
+ ira_allocno_conflict_iterator aci;
+ ira_copy_t cp, next_cp;
static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
#ifdef STACK_REGS
bool no_stack_reg_p;
@@ -254,7 +255,7 @@ assign_hard_reg (allocno_t allocno, bool retry_p)
ira_assert (! ALLOCNO_ASSIGNED_P (allocno));
cover_class = ALLOCNO_COVER_CLASS (allocno);
- class_size = class_hard_regs_num[cover_class];
+ class_size = ira_class_hard_regs_num[cover_class];
mode = ALLOCNO_MODE (allocno);
CLEAR_HARD_REG_SET (conflicting_regs);
best_hard_regno = -1;
@@ -272,12 +273,12 @@ assign_hard_reg (allocno_t allocno, bool retry_p)
{
mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
IOR_HARD_REG_SET (conflicting_regs,
- ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
- allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
- cover_class, ALLOCNO_HARD_REG_COSTS (a));
+ IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
+ ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
+ cover_class, ALLOCNO_HARD_REG_COSTS (a));
a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
#ifdef STACK_REGS
- no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
+ no_stack_reg_p = no_stack_reg_p || IRA_ALLOCNO_TOTAL_NO_STACK_REG_P (a);
#endif
for (cost = ALLOCNO_COVER_CLASS_COST (a), i = 0; i < class_size; i++)
if (a_costs != NULL)
@@ -312,7 +313,7 @@ assign_hard_reg (allocno_t allocno, bool retry_p)
{
IOR_HARD_REG_SET
(conflicting_regs,
- reg_mode_hard_regset
+ ira_reg_mode_hard_regset
[hard_regno][ALLOCNO_MODE (conflict_allocno)]);
if (hard_reg_set_subset_p (reg_class_contents[cover_class],
conflicting_regs))
@@ -322,7 +323,7 @@ assign_hard_reg (allocno_t allocno, bool retry_p)
}
else if (! ALLOCNO_MAY_BE_SPILLED_P (conflict_allocno))
{
- allocate_and_copy_costs
+ ira_allocate_and_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_allocno),
cover_class,
ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_allocno));
@@ -357,7 +358,7 @@ assign_hard_reg (allocno_t allocno, bool retry_p)
if (cover_class != ALLOCNO_COVER_CLASS (another_allocno)
|| ALLOCNO_ASSIGNED_P (another_allocno))
continue;
- allocate_and_copy_costs
+ ira_allocate_and_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
cover_class, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
conflict_costs
@@ -377,27 +378,27 @@ assign_hard_reg (allocno_t allocno, bool retry_p)
REG_ALLOC_ORDER). */
for (i = 0; i < class_size; i++)
{
- hard_regno = class_hard_regs[cover_class][i];
+ hard_regno = ira_class_hard_regs[cover_class][i];
#ifdef STACK_REGS
if (no_stack_reg_p
&& FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
continue;
#endif
- if (! hard_reg_not_in_set_p (hard_regno, mode, conflicting_regs)
+ if (! ira_hard_reg_not_in_set_p (hard_regno, mode, conflicting_regs)
|| TEST_HARD_REG_BIT (prohibited_class_mode_regs[cover_class][mode],
hard_regno))
continue;
cost = costs[i];
full_cost = full_costs[i];
if (! allocated_hardreg_p[hard_regno]
- && hard_reg_not_in_set_p (hard_regno, mode, call_used_reg_set))
+ && ira_hard_reg_not_in_set_p (hard_regno, mode, call_used_reg_set))
/* We need to save/restore the hard register in
epilogue/prologue. Therefore we increase the cost. */
{
/* ??? If only part is call clobbered. */
class = REGNO_REG_CLASS (hard_regno);
- add_cost = (memory_move_cost[mode][class][0]
- + memory_move_cost[mode][class][1] - 1);
+ add_cost = (ira_memory_move_cost[mode][class][0]
+ + ira_memory_move_cost[mode][class][1] - 1);
cost += add_cost;
full_cost += add_cost;
}
@@ -428,14 +429,14 @@ assign_hard_reg (allocno_t allocno, bool retry_p)
if (a == allocno)
break;
}
- qsort (sorted_allocnos, j, sizeof (allocno_t),
+ qsort (sorted_allocnos, j, sizeof (ira_allocno_t),
allocno_cost_compare_func);
for (i = 0; i < j; i++)
{
a = sorted_allocnos[i];
ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
- VEC_safe_push (allocno_t, heap, allocno_stack_vec, a);
+ VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, a);
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
{
fprintf (ira_dump_file, " Pushing");
@@ -456,7 +457,7 @@ assign_hard_reg (allocno_t allocno, bool retry_p)
update_copy_costs (a, true);
ira_assert (ALLOCNO_COVER_CLASS (a) == cover_class);
/* We don't need updated costs anymore: */
- free_allocno_updated_costs (a);
+ ira_free_allocno_updated_costs (a);
if (a == allocno)
break;
}
@@ -468,11 +469,11 @@ assign_hard_reg (allocno_t allocno, bool retry_p)
/* This page contains the allocator based on the Chaitin-Briggs algorithm. */
/* Bucket of allocnos that can colored currently without spilling. */
-static allocno_t colorable_allocno_bucket;
+static ira_allocno_t colorable_allocno_bucket;
/* Bucket of allocnos that might be not colored currently without
spilling. */
-static allocno_t uncolorable_allocno_bucket;
+static ira_allocno_t uncolorable_allocno_bucket;
/* Each element of the array contains the current number of allocnos
of given *cover* class in the uncolorable_bucket. */
@@ -481,9 +482,9 @@ static int uncolorable_allocnos_num[N_REG_CLASSES];
/* Add ALLOCNO to bucket *BUCKET_PTR. ALLOCNO should be not in a bucket
before the call. */
static void
-add_allocno_to_bucket (allocno_t allocno, allocno_t *bucket_ptr)
+add_ira_allocno_to_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
{
- allocno_t first_allocno;
+ ira_allocno_t first_allocno;
enum reg_class cover_class;
if (bucket_ptr == &uncolorable_allocno_bucket
@@ -503,9 +504,9 @@ add_allocno_to_bucket (allocno_t allocno, allocno_t *bucket_ptr)
/* The function returns frequency and number of available hard
registers for allocnos coalesced with ALLOCNO. */
static void
-get_coalesced_allocnos_attributes (allocno_t allocno, int *freq, int *num)
+get_coalesced_allocnos_attributes (ira_allocno_t allocno, int *freq, int *num)
{
- allocno_t a;
+ ira_allocno_t a;
*freq = 0;
*num = 0;
@@ -529,7 +530,8 @@ get_coalesced_allocnos_attributes (allocno_t allocno, int *freq, int *num)
static int
bucket_allocno_compare_func (const void *v1p, const void *v2p)
{
- allocno_t a1 = *(const allocno_t *) v1p, a2 = *(const allocno_t *) v2p;
+ ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
+ ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
int diff, a1_freq, a2_freq, a1_num, a2_num;
if ((diff = (int) ALLOCNO_COVER_CLASS (a2) - ALLOCNO_COVER_CLASS (a1)) != 0)
@@ -546,16 +548,17 @@ bucket_allocno_compare_func (const void *v1p, const void *v2p)
/* Sort bucket *BUCKET_PTR and return the result through
BUCKET_PTR. */
static void
-sort_bucket (allocno_t *bucket_ptr)
+sort_bucket (ira_allocno_t *bucket_ptr)
{
- allocno_t a, head;
+ ira_allocno_t a, head;
int n;
for (n = 0, a = *bucket_ptr; a != NULL; a = ALLOCNO_NEXT_BUCKET_ALLOCNO (a))
sorted_allocnos[n++] = a;
if (n <= 1)
return;
- qsort (sorted_allocnos, n, sizeof (allocno_t), bucket_allocno_compare_func);
+ qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
+ bucket_allocno_compare_func);
head = NULL;
for (n--; n >= 0; n--)
{
@@ -573,9 +576,10 @@ sort_bucket (allocno_t *bucket_ptr)
their priority. ALLOCNO should be not in a bucket before the
call. */
static void
-add_allocno_to_ordered_bucket (allocno_t allocno, allocno_t *bucket_ptr)
+add_ira_allocno_to_ordered_bucket (ira_allocno_t allocno,
+ ira_allocno_t *bucket_ptr)
{
- allocno_t before, after;
+ ira_allocno_t before, after;
enum reg_class cover_class;
if (bucket_ptr == &uncolorable_allocno_bucket
@@ -602,9 +606,9 @@ add_allocno_to_ordered_bucket (allocno_t allocno, allocno_t *bucket_ptr)
/* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
the call. */
static void
-delete_allocno_from_bucket (allocno_t allocno, allocno_t *bucket_ptr)
+delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
{
- allocno_t prev_allocno, next_allocno;
+ ira_allocno_t prev_allocno, next_allocno;
enum reg_class cover_class;
if (bucket_ptr == &uncolorable_allocno_bucket
@@ -646,19 +650,19 @@ static splay_tree uncolorable_allocnos_splay_tree[N_REG_CLASSES];
conflicting allocnos from the uncolorable bucket to the colorable
one. */
static void
-push_allocno_to_stack (allocno_t allocno)
+push_ira_allocno_to_stack (ira_allocno_t allocno)
{
int conflicts_num, conflict_size, size;
- allocno_t a, conflict_allocno;
+ ira_allocno_t a, conflict_allocno;
enum reg_class cover_class;
- allocno_conflict_iterator aci;
+ ira_allocno_conflict_iterator aci;
ALLOCNO_IN_GRAPH_P (allocno) = false;
- VEC_safe_push (allocno_t, heap, allocno_stack_vec, allocno);
+ VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, allocno);
cover_class = ALLOCNO_COVER_CLASS (allocno);
if (cover_class == NO_REGS)
return;
- size = reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)];
+ size = ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)];
if (allocno_coalesced_p)
bitmap_clear (processed_coalesced_allocno_bitmap);
for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
@@ -682,7 +686,7 @@ push_allocno_to_stack (allocno_t allocno)
{
conflicts_num = ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno);
conflict_size
- = (reg_class_nregs
+ = (ira_reg_class_nregs
[cover_class][ALLOCNO_MODE (conflict_allocno)]);
ira_assert
(ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) >= size);
@@ -706,7 +710,7 @@ push_allocno_to_stack (allocno_t allocno)
(uncolorable_allocnos_splay_tree[cover_class],
(splay_tree_key) conflict_allocno);
ALLOCNO_SPLAY_REMOVED_P (conflict_allocno) = true;
- VEC_safe_push (allocno_t, heap, removed_splay_allocno_vec,
+ VEC_safe_push (ira_allocno_t, heap, removed_splay_allocno_vec,
conflict_allocno);
}
ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) = conflicts_num;
@@ -715,7 +719,7 @@ push_allocno_to_stack (allocno_t allocno)
{
delete_allocno_from_bucket (conflict_allocno,
&uncolorable_allocno_bucket);
- add_allocno_to_ordered_bucket (conflict_allocno,
+ add_ira_allocno_to_ordered_bucket (conflict_allocno,
&colorable_allocno_bucket);
}
}
@@ -728,7 +732,7 @@ push_allocno_to_stack (allocno_t allocno)
/* Put ALLOCNO onto the coloring stack and remove it from its bucket.
The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
static void
-remove_allocno_from_bucket_and_push (allocno_t allocno, bool colorable_p)
+remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
{
enum reg_class cover_class;
@@ -745,15 +749,16 @@ remove_allocno_from_bucket_and_push (allocno_t allocno, bool colorable_p)
cover_class = ALLOCNO_COVER_CLASS (allocno);
ira_assert ((colorable_p
&& (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
- + reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
+ + ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
<= ALLOCNO_AVAILABLE_REGS_NUM (allocno)))
|| (! colorable_p
&& (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
- + reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
+ + ira_reg_class_nregs[cover_class][ALLOCNO_MODE
+ (allocno)]
> ALLOCNO_AVAILABLE_REGS_NUM (allocno))));
if (! colorable_p)
ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
- push_allocno_to_stack (allocno);
+ push_ira_allocno_to_stack (allocno);
}
/* Put all allocnos from colorable bucket onto the coloring stack. */
@@ -768,20 +773,20 @@ push_only_colorable (void)
/* Puts ALLOCNO chosen for potential spilling onto the coloring
stack. */
static void
-push_allocno_to_spill (allocno_t allocno)
+push_ira_allocno_to_spill (ira_allocno_t allocno)
{
delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Pushing p%d(%d) (potential spill)\n",
ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno));
- push_allocno_to_stack (allocno);
+ push_ira_allocno_to_stack (allocno);
}
/* Return the frequency of exit edges (if EXIT_P) or entry from/to the
loop given by its LOOP_NODE. */
int
-loop_edge_freq (loop_tree_node_t loop_node, int regno, bool exit_p)
+ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
{
int freq, i;
edge_iterator ei;
@@ -816,13 +821,13 @@ loop_edge_freq (loop_tree_node_t loop_node, int regno, bool exit_p)
/* Calculate and return the cost of putting allocno A into memory. */
static int
-calculate_allocno_spill_cost (allocno_t a)
+calculate_allocno_spill_cost (ira_allocno_t a)
{
int regno, cost;
enum machine_mode mode;
enum reg_class class;
- allocno_t parent_allocno;
- loop_tree_node_t parent_node, loop_node;
+ ira_allocno_t parent_allocno;
+ ira_loop_tree_node_t parent_node, loop_node;
regno = ALLOCNO_REGNO (a);
cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a);
@@ -836,18 +841,18 @@ calculate_allocno_spill_cost (allocno_t a)
mode = ALLOCNO_MODE (a);
class = ALLOCNO_COVER_CLASS (a);
if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
- cost -= (memory_move_cost[mode][class][0]
- * loop_edge_freq (loop_node, regno, true)
- + memory_move_cost[mode][class][1]
- * loop_edge_freq (loop_node, regno, false));
+ cost -= (ira_memory_move_cost[mode][class][0]
+ * ira_loop_edge_freq (loop_node, regno, true)
+ + ira_memory_move_cost[mode][class][1]
+ * ira_loop_edge_freq (loop_node, regno, false));
else
- cost += ((memory_move_cost[mode][class][1]
- * loop_edge_freq (loop_node, regno, true)
- + memory_move_cost[mode][class][0]
- * loop_edge_freq (loop_node, regno, false))
- - (register_move_cost[mode][class][class]
- * (loop_edge_freq (loop_node, regno, false)
- + loop_edge_freq (loop_node, regno, true))));
+ cost += ((ira_memory_move_cost[mode][class][1]
+ * ira_loop_edge_freq (loop_node, regno, true)
+ + ira_memory_move_cost[mode][class][0]
+ * ira_loop_edge_freq (loop_node, regno, false))
+ - (ira_register_move_cost[mode][class][class]
+ * (ira_loop_edge_freq (loop_node, regno, false)
+ + ira_loop_edge_freq (loop_node, regno, true))));
return cost;
}
@@ -857,19 +862,19 @@ static int
allocno_spill_priority_compare (splay_tree_key k1, splay_tree_key k2)
{
int pri1, pri2, diff;
- allocno_t a1 = (allocno_t) k1, a2 = (allocno_t) k2;
+ ira_allocno_t a1 = (ira_allocno_t) k1, a2 = (ira_allocno_t) k2;
- pri1 = (ALLOCNO_TEMP (a1)
+ pri1 = (IRA_ALLOCNO_TEMP (a1)
/ (ALLOCNO_LEFT_CONFLICTS_NUM (a1)
- * reg_class_nregs[ALLOCNO_COVER_CLASS (a1)][ALLOCNO_MODE (a1)]
+ * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a1)][ALLOCNO_MODE (a1)]
+ 1));
- pri2 = (ALLOCNO_TEMP (a2)
+ pri2 = (IRA_ALLOCNO_TEMP (a2)
/ (ALLOCNO_LEFT_CONFLICTS_NUM (a2)
- * reg_class_nregs[ALLOCNO_COVER_CLASS (a2)][ALLOCNO_MODE (a2)]
+ * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a2)][ALLOCNO_MODE (a2)]
+ 1));
if ((diff = pri1 - pri2) != 0)
return diff;
- if ((diff = ALLOCNO_TEMP (a1) - ALLOCNO_TEMP (a2)) != 0)
+ if ((diff = IRA_ALLOCNO_TEMP (a1) - IRA_ALLOCNO_TEMP (a2)) != 0)
return diff;
return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
}
@@ -894,9 +899,9 @@ splay_tree_free (void *node, void *data ATTRIBUTE_UNUSED)
int i;
enum reg_class cover_class;
- for (i = 0; i < reg_class_cover_size; i++)
+ for (i = 0; i < ira_reg_class_cover_size; i++)
{
- cover_class = reg_class_cover[i];
+ cover_class = ira_reg_class_cover[i];
if (node == uncolorable_allocnos_splay_tree[cover_class])
{
ira_free (node);
@@ -911,17 +916,17 @@ splay_tree_free (void *node, void *data ATTRIBUTE_UNUSED)
static void
push_allocnos_to_stack (void)
{
- allocno_t allocno, a, i_allocno, *allocno_vec;
+ ira_allocno_t allocno, a, i_allocno, *allocno_vec;
enum reg_class cover_class, class;
int allocno_pri, i_allocno_pri, allocno_cost, i_allocno_cost;
int i, j, num, cover_class_allocnos_num[N_REG_CLASSES];
- allocno_t *cover_class_allocnos[N_REG_CLASSES];
+ ira_allocno_t *cover_class_allocnos[N_REG_CLASSES];
int cost;
/* Initialize. */
- for (i = 0; i < reg_class_cover_size; i++)
+ for (i = 0; i < ira_reg_class_cover_size; i++)
{
- cover_class = reg_class_cover[i];
+ cover_class = ira_reg_class_cover[i];
cover_class_allocnos_num[cover_class] = 0;
cover_class_allocnos[cover_class] = NULL;
uncolorable_allocnos_splay_tree[cover_class] = NULL;
@@ -943,13 +948,13 @@ push_allocnos_to_stack (void)
}
/* ??? Remove cost of copies between the coalesced
allocnos. */
- ALLOCNO_TEMP (allocno) = cost;
+ IRA_ALLOCNO_TEMP (allocno) = cost;
}
/* Define place where to put uncolorable allocnos of the same cover
class. */
- for (num = i = 0; i < reg_class_cover_size; i++)
+ for (num = i = 0; i < ira_reg_class_cover_size; i++)
{
- cover_class = reg_class_cover[i];
+ cover_class = ira_reg_class_cover[i];
ira_assert (cover_class_allocnos_num[cover_class]
== uncolorable_allocnos_num[cover_class]);
if (cover_class_allocnos_num[cover_class] != 0)
@@ -987,26 +992,27 @@ push_allocnos_to_stack (void)
cover_class = ALLOCNO_COVER_CLASS (allocno);
if (cover_class == NO_REGS)
{
- push_allocno_to_spill (allocno);
+ push_ira_allocno_to_spill (allocno);
continue;
}
/* Potential spilling. */
- ira_assert (reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)] > 0);
+ ira_assert
+ (ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)] > 0);
if (USE_SPLAY_P (cover_class))
{
- for (;VEC_length (allocno_t, removed_splay_allocno_vec) != 0;)
+ for (;VEC_length (ira_allocno_t, removed_splay_allocno_vec) != 0;)
{
- allocno = VEC_pop (allocno_t, removed_splay_allocno_vec);
+ allocno = VEC_pop (ira_allocno_t, removed_splay_allocno_vec);
ALLOCNO_SPLAY_REMOVED_P (allocno) = false;
class = ALLOCNO_COVER_CLASS (allocno);
if (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
- + reg_class_nregs [class][ALLOCNO_MODE (allocno)]
+ + ira_reg_class_nregs [class][ALLOCNO_MODE (allocno)]
> ALLOCNO_AVAILABLE_REGS_NUM (allocno))
splay_tree_insert
(uncolorable_allocnos_splay_tree[class],
(splay_tree_key) allocno, (splay_tree_value) allocno);
}
- allocno = ((allocno_t)
+ allocno = ((ira_allocno_t)
splay_tree_min
(uncolorable_allocnos_splay_tree[cover_class])->key);
splay_tree_remove (uncolorable_allocnos_splay_tree[cover_class],
@@ -1034,9 +1040,9 @@ push_allocnos_to_stack (void)
if (ALLOCNO_IN_GRAPH_P (i_allocno))
{
i++;
- if (ALLOCNO_TEMP (i_allocno) == INT_MAX)
+ if (IRA_ALLOCNO_TEMP (i_allocno) == INT_MAX)
{
- allocno_t a;
+ ira_allocno_t a;
int cost = 0;
for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (i_allocno);;
@@ -1048,13 +1054,14 @@ push_allocnos_to_stack (void)
}
/* ??? Remove cost of copies between the coalesced
allocnos. */
- ALLOCNO_TEMP (i_allocno) = cost;
+ IRA_ALLOCNO_TEMP (i_allocno) = cost;
}
- i_allocno_cost = ALLOCNO_TEMP (i_allocno);
+ i_allocno_cost = IRA_ALLOCNO_TEMP (i_allocno);
i_allocno_pri
= (i_allocno_cost
/ (ALLOCNO_LEFT_CONFLICTS_NUM (i_allocno)
- * reg_class_nregs[ALLOCNO_COVER_CLASS (i_allocno)]
+ * ira_reg_class_nregs[ALLOCNO_COVER_CLASS
+ (i_allocno)]
[ALLOCNO_MODE (i_allocno)] + 1));
if (allocno == NULL || allocno_pri > i_allocno_pri
|| (allocno_pri == i_allocno_pri
@@ -1077,15 +1084,16 @@ push_allocnos_to_stack (void)
ira_assert (ALLOCNO_IN_GRAPH_P (allocno)
&& ALLOCNO_COVER_CLASS (allocno) == cover_class
&& (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
- + reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
+ + ira_reg_class_nregs[cover_class][ALLOCNO_MODE
+ (allocno)]
> ALLOCNO_AVAILABLE_REGS_NUM (allocno)));
remove_allocno_from_bucket_and_push (allocno, false);
}
ira_assert (colorable_allocno_bucket == NULL
&& uncolorable_allocno_bucket == NULL);
- for (i = 0; i < reg_class_cover_size; i++)
+ for (i = 0; i < ira_reg_class_cover_size; i++)
{
- cover_class = reg_class_cover[i];
+ cover_class = ira_reg_class_cover[i];
ira_assert (uncolorable_allocnos_num[cover_class] == 0);
if (uncolorable_allocnos_splay_tree[cover_class] != NULL)
splay_tree_delete (uncolorable_allocnos_splay_tree[cover_class]);
@@ -1097,12 +1105,12 @@ push_allocnos_to_stack (void)
static void
pop_allocnos_from_stack (void)
{
- allocno_t allocno;
+ ira_allocno_t allocno;
enum reg_class cover_class;
- for (;VEC_length (allocno_t, allocno_stack_vec) != 0;)
+ for (;VEC_length (ira_allocno_t, allocno_stack_vec) != 0;)
{
- allocno = VEC_pop (allocno_t, allocno_stack_vec);
+ allocno = VEC_pop (ira_allocno_t, allocno_stack_vec);
cover_class = ALLOCNO_COVER_CLASS (allocno);
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
{
@@ -1137,29 +1145,29 @@ pop_allocnos_from_stack (void)
/* Set up number of available hard registers for ALLOCNO. */
static void
-setup_allocno_available_regs_num (allocno_t allocno)
+setup_allocno_available_regs_num (ira_allocno_t allocno)
{
int i, n, hard_regs_num;
enum reg_class cover_class;
- allocno_t a;
+ ira_allocno_t a;
HARD_REG_SET temp_set;
cover_class = ALLOCNO_COVER_CLASS (allocno);
- ALLOCNO_AVAILABLE_REGS_NUM (allocno) = available_class_regs[cover_class];
+ ALLOCNO_AVAILABLE_REGS_NUM (allocno) = ira_available_class_regs[cover_class];
if (cover_class == NO_REGS)
return;
CLEAR_HARD_REG_SET (temp_set);
ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) == allocno);
- hard_regs_num = class_hard_regs_num[cover_class];
+ hard_regs_num = ira_class_hard_regs_num[cover_class];
for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{
- IOR_HARD_REG_SET (temp_set, ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
+ IOR_HARD_REG_SET (temp_set, IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
if (a == allocno)
break;
}
for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
- if (TEST_HARD_REG_BIT (temp_set, class_hard_regs[cover_class][i]))
+ if (TEST_HARD_REG_BIT (temp_set, ira_class_hard_regs[cover_class][i]))
n++;
if (internal_flag_ira_verbose > 2 && n > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Reg %d of %s has %d regs less\n",
@@ -1169,37 +1177,37 @@ setup_allocno_available_regs_num (allocno_t allocno)
/* Set up ALLOCNO_LEFT_CONFLICTS_NUM for ALLOCNO. */
static void
-setup_allocno_left_conflicts_num (allocno_t allocno)
+setup_allocno_left_conflicts_num (ira_allocno_t allocno)
{
int i, hard_regs_num, hard_regno, conflict_allocnos_size;
- allocno_t a, conflict_allocno;
+ ira_allocno_t a, conflict_allocno;
enum reg_class cover_class;
HARD_REG_SET temp_set;
- allocno_conflict_iterator aci;
+ ira_allocno_conflict_iterator aci;
cover_class = ALLOCNO_COVER_CLASS (allocno);
- hard_regs_num = class_hard_regs_num[cover_class];
+ hard_regs_num = ira_class_hard_regs_num[cover_class];
CLEAR_HARD_REG_SET (temp_set);
ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) == allocno);
for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
{
- IOR_HARD_REG_SET (temp_set, ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
+ IOR_HARD_REG_SET (temp_set, IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
if (a == allocno)
break;
}
AND_HARD_REG_SET (temp_set, reg_class_contents[cover_class]);
- AND_COMPL_HARD_REG_SET (temp_set, no_alloc_regs);
+ AND_COMPL_HARD_REG_SET (temp_set, ira_no_alloc_regs);
conflict_allocnos_size = 0;
- if (! hard_reg_set_equal_p (temp_set, zero_hard_reg_set))
+ if (! hard_reg_set_equal_p (temp_set, ira_zero_hard_reg_set))
for (i = 0; i < (int) hard_regs_num; i++)
{
- hard_regno = class_hard_regs[cover_class][i];
+ hard_regno = ira_class_hard_regs[cover_class][i];
if (TEST_HARD_REG_BIT (temp_set, hard_regno))
{
conflict_allocnos_size++;
CLEAR_HARD_REG_BIT (temp_set, hard_regno);
- if (hard_reg_set_equal_p (temp_set, zero_hard_reg_set))
+ if (hard_reg_set_equal_p (temp_set, ira_zero_hard_reg_set))
break;
}
}
@@ -1226,7 +1234,7 @@ setup_allocno_left_conflicts_num (allocno_t allocno)
}
if (! ALLOCNO_ASSIGNED_P (conflict_allocno))
conflict_allocnos_size
- += (reg_class_nregs
+ += (ira_reg_class_nregs
[cover_class][ALLOCNO_MODE (conflict_allocno)]);
else if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_allocno))
>= 0)
@@ -1255,24 +1263,24 @@ setup_allocno_left_conflicts_num (allocno_t allocno)
/* Put ALLOCNO in a bucket corresponding to its number and size of its
conflicting allocnos and hard registers. */
static void
-put_allocno_into_bucket (allocno_t allocno)
+put_allocno_into_bucket (ira_allocno_t allocno)
{
int hard_regs_num;
enum reg_class cover_class;
cover_class = ALLOCNO_COVER_CLASS (allocno);
- hard_regs_num = class_hard_regs_num[cover_class];
+ hard_regs_num = ira_class_hard_regs_num[cover_class];
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
return;
ALLOCNO_IN_GRAPH_P (allocno) = true;
setup_allocno_left_conflicts_num (allocno);
setup_allocno_available_regs_num (allocno);
if (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
- + reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
+ + ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
<= ALLOCNO_AVAILABLE_REGS_NUM (allocno))
- add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
+ add_ira_allocno_to_bucket (allocno, &colorable_allocno_bucket);
else
- add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
+ add_ira_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
}
/* The function is used to sort allocnos according to their execution
@@ -1280,7 +1288,7 @@ put_allocno_into_bucket (allocno_t allocno)
static int
copy_freq_compare_func (const void *v1p, const void *v2p)
{
- copy_t cp1 = *(const copy_t *) v1p, cp2 = *(const copy_t *) v2p;
+ ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
int pri1, pri2;
pri1 = cp1->freq;
@@ -1297,9 +1305,9 @@ copy_freq_compare_func (const void *v1p, const void *v2p)
allocnos A1 and A2 (more accurately merging A2 set into A1
set). */
static void
-merge_allocnos (allocno_t a1, allocno_t a2)
+merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
{
- allocno_t a, first, last, next;
+ ira_allocno_t a, first, last, next;
first = ALLOCNO_FIRST_COALESCED_ALLOCNO (a1);
if (first == ALLOCNO_FIRST_COALESCED_ALLOCNO (a2))
@@ -1324,10 +1332,11 @@ merge_allocnos (allocno_t a1, allocno_t a2)
and during the reload pass we coalesce allocnos for sharing stack
memory slots. */
static bool
-coalesced_allocno_conflict_p (allocno_t a1, allocno_t a2, bool reload_p)
+coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2,
+ bool reload_p)
{
- allocno_t a, conflict_allocno;
- allocno_conflict_iterator aci;
+ ira_allocno_t a, conflict_allocno;
+ ira_allocno_conflict_iterator aci;
if (allocno_coalesced_p)
{
@@ -1349,7 +1358,7 @@ coalesced_allocno_conflict_p (allocno_t a1, allocno_t a2, bool reload_p)
conflict_allocno
= ALLOCNO_NEXT_COALESCED_ALLOCNO (conflict_allocno))
{
- if (allocno_live_ranges_intersect_p (a, conflict_allocno))
+ if (ira_allocno_live_ranges_intersect_p (a, conflict_allocno))
return true;
if (conflict_allocno == a1)
break;
@@ -1377,27 +1386,27 @@ coalesced_allocno_conflict_p (allocno_t a1, allocno_t a2, bool reload_p)
static void
coalesce_allocnos (bool reload_p)
{
- allocno_t a;
- copy_t cp, next_cp, *sorted_copies;
+ ira_allocno_t a;
+ ira_copy_t cp, next_cp, *sorted_copies;
enum reg_class cover_class;
enum machine_mode mode;
unsigned int j;
int i, n, cp_num, regno;
bitmap_iterator bi;
- sorted_copies = ira_allocate (copies_num * sizeof (copy_t));
+ sorted_copies = ira_allocate (ira_copies_num * sizeof (ira_copy_t));
cp_num = 0;
/* Collect copies. */
EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
{
- a = allocnos[j];
+ a = ira_allocnos[j];
regno = ALLOCNO_REGNO (a);
if ((! reload_p && ALLOCNO_ASSIGNED_P (a))
|| (reload_p
&& (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
- || (regno < reg_equiv_len
- && (reg_equiv_const[regno] != NULL_RTX
- || reg_equiv_invariant_p[regno])))))
+ || (regno < ira_reg_equiv_len
+ && (ira_reg_equiv_const[regno] != NULL_RTX
+ || ira_reg_equiv_invariant_p[regno])))))
continue;
cover_class = ALLOCNO_COVER_CLASS (a);
mode = ALLOCNO_MODE (a);
@@ -1415,9 +1424,9 @@ coalesce_allocnos (bool reload_p)
|| (reload_p
&& ALLOCNO_ASSIGNED_P (cp->second)
&& ALLOCNO_HARD_REGNO (cp->second) < 0
- && (regno >= reg_equiv_len
- || (! reg_equiv_invariant_p[regno]
- && reg_equiv_const[regno] == NULL_RTX)))))
+ && (regno >= ira_reg_equiv_len
+ || (! ira_reg_equiv_invariant_p[regno]
+ && ira_reg_equiv_const[regno] == NULL_RTX)))))
sorted_copies[cp_num++] = cp;
}
else if (cp->second == a)
@@ -1426,7 +1435,7 @@ coalesce_allocnos (bool reload_p)
gcc_unreachable ();
}
}
- qsort (sorted_copies, cp_num, sizeof (copy_t), copy_freq_compare_func);
+ qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
/* Coalesced copies, most frequently executed first. */
for (; cp_num != 0;)
{
@@ -1468,7 +1477,7 @@ color_allocnos (void)
{
unsigned int i;
bitmap_iterator bi;
- allocno_t a;
+ ira_allocno_t a;
allocno_coalesced_p = false;
processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
@@ -1479,7 +1488,7 @@ color_allocnos (void)
uncolorable_allocno_bucket = NULL;
EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
{
- a = allocnos[i];
+ a = ira_allocnos[i];
if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
{
ALLOCNO_HARD_REGNO (a) = -1;
@@ -1499,10 +1508,10 @@ color_allocnos (void)
push_allocnos_to_stack ();
pop_allocnos_from_stack ();
if (flag_ira_coalesce)
- /* We don't need coalesced allocnos for reassign_pseudos. */
+ /* We don't need coalesced allocnos for ira_reassign_pseudos. */
EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
{
- a = allocnos[i];
+ a = ira_allocnos[i];
ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
}
@@ -1514,7 +1523,7 @@ color_allocnos (void)
/* Output information about the loop given by its LOOP_TREE_NODE. */
static void
-print_loop_title (loop_tree_node_t loop_tree_node)
+print_loop_title (ira_loop_tree_node_t loop_tree_node)
{
unsigned int j;
bitmap_iterator bi;
@@ -1528,19 +1537,19 @@ print_loop_title (loop_tree_node_t loop_tree_node)
loop_tree_node->loop->header->index,
loop_depth (loop_tree_node->loop));
EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->mentioned_allocnos, 0, j, bi)
- fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (allocnos[j]));
+ fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
fprintf (ira_dump_file, "\n modified regnos:");
EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
fprintf (ira_dump_file, " %d", j);
fprintf (ira_dump_file, "\n border:");
EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
- fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (allocnos[j]));
+ fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
fprintf (ira_dump_file, "\n Pressure:");
- for (j = 0; (int) j < reg_class_cover_size; j++)
+ for (j = 0; (int) j < ira_reg_class_cover_size; j++)
{
enum reg_class cover_class;
- cover_class = reg_class_cover[j];
+ cover_class = ira_reg_class_cover[j];
if (loop_tree_node->reg_pressure[cover_class] == 0)
continue;
fprintf (ira_dump_file, " %s=%d", reg_class_names[cover_class],
@@ -1554,7 +1563,7 @@ print_loop_title (loop_tree_node_t loop_tree_node)
function is called for each loop during top-down traverse of the
loop tree. */
static void
-color_pass (loop_tree_node_t loop_tree_node)
+color_pass (ira_loop_tree_node_t loop_tree_node)
{
int regno, hard_regno, index = -1;
int cost, exit_freq, enter_freq;
@@ -1562,8 +1571,8 @@ color_pass (loop_tree_node_t loop_tree_node)
bitmap_iterator bi;
enum machine_mode mode;
enum reg_class class, cover_class;
- allocno_t a, subloop_allocno;
- loop_tree_node_t subloop_node;
+ ira_allocno_t a, subloop_allocno;
+ ira_loop_tree_node_t subloop_node;
ira_assert (loop_tree_node->bb == NULL);
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
@@ -1574,7 +1583,7 @@ color_pass (loop_tree_node_t loop_tree_node)
bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
{
- a = allocnos[j];
+ a = ira_allocnos[j];
if (! ALLOCNO_ASSIGNED_P (a))
continue;
bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
@@ -1589,13 +1598,13 @@ color_pass (loop_tree_node_t loop_tree_node)
ira_assert (subloop_node->bb == NULL);
EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
{
- a = allocnos[j];
+ a = ira_allocnos[j];
mode = ALLOCNO_MODE (a);
class = ALLOCNO_COVER_CLASS (a);
hard_regno = ALLOCNO_HARD_REGNO (a);
if (hard_regno >= 0)
{
- index = class_hard_reg_index[class][hard_regno];
+ index = ira_class_hard_reg_index[class][hard_regno];
ira_assert (index >= 0);
}
regno = ALLOCNO_REGNO (a);
@@ -1607,7 +1616,7 @@ color_pass (loop_tree_node_t loop_tree_node)
continue;
if ((flag_ira_algorithm == IRA_ALGORITHM_MIXED
&& (loop_tree_node->reg_pressure[class]
- <= available_class_regs[class]))
+ <= ira_available_class_regs[class]))
|| (hard_regno < 0
&& ! bitmap_bit_p (subloop_node->mentioned_allocnos,
ALLOCNO_NUM (subloop_allocno))))
@@ -1619,15 +1628,15 @@ color_pass (loop_tree_node_t loop_tree_node)
if (hard_regno >= 0)
update_copy_costs (subloop_allocno, true);
/* We don't need updated costs anymore: */
- free_allocno_updated_costs (subloop_allocno);
+ ira_free_allocno_updated_costs (subloop_allocno);
}
continue;
}
- exit_freq = loop_edge_freq (subloop_node, regno, true);
- enter_freq = loop_edge_freq (subloop_node, regno, false);
- ira_assert (regno < reg_equiv_len);
- if (reg_equiv_invariant_p[regno]
- || reg_equiv_const[regno] != NULL_RTX)
+ exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
+ enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
+ ira_assert (regno < ira_reg_equiv_len);
+ if (ira_reg_equiv_invariant_p[regno]
+ || ira_reg_equiv_const[regno] != NULL_RTX)
{
if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
{
@@ -1636,32 +1645,32 @@ color_pass (loop_tree_node_t loop_tree_node)
if (hard_regno >= 0)
update_copy_costs (subloop_allocno, true);
/* We don't need updated costs anymore: */
- free_allocno_updated_costs (subloop_allocno);
+ ira_free_allocno_updated_costs (subloop_allocno);
}
}
else if (hard_regno < 0)
{
ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
- -= ((memory_move_cost[mode][class][1] * enter_freq)
- + (memory_move_cost[mode][class][0] * exit_freq));
+ -= ((ira_memory_move_cost[mode][class][1] * enter_freq)
+ + (ira_memory_move_cost[mode][class][0] * exit_freq));
}
else
{
cover_class = ALLOCNO_COVER_CLASS (subloop_allocno);
- allocate_and_set_costs
+ ira_allocate_and_set_costs
(&ALLOCNO_HARD_REG_COSTS (subloop_allocno), cover_class,
ALLOCNO_COVER_CLASS_COST (subloop_allocno));
- allocate_and_set_costs
+ ira_allocate_and_set_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno),
cover_class, 0);
- cost = (register_move_cost[mode][class][class]
+ cost = (ira_register_move_cost[mode][class][class]
* (exit_freq + enter_freq));
ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
-= cost;
ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
- += (memory_move_cost[mode][class][0] * enter_freq
- + memory_move_cost[mode][class][1] * exit_freq);
+ += (ira_memory_move_cost[mode][class][0] * enter_freq
+ + ira_memory_move_cost[mode][class][1] * exit_freq);
if (ALLOCNO_COVER_CLASS_COST (subloop_allocno)
> ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index])
ALLOCNO_COVER_CLASS_COST (subloop_allocno)
@@ -1675,7 +1684,7 @@ color_pass (loop_tree_node_t loop_tree_node)
continue;
if ((flag_ira_algorithm == IRA_ALGORITHM_MIXED
&& loop_tree_node->reg_pressure[class]
- <= available_class_regs[class])
+ <= ira_available_class_regs[class])
|| (hard_regno < 0
&& ! bitmap_bit_p (subloop_node->mentioned_allocnos,
ALLOCNO_NUM (subloop_allocno))))
@@ -1687,28 +1696,28 @@ color_pass (loop_tree_node_t loop_tree_node)
if (hard_regno >= 0)
update_copy_costs (subloop_allocno, true);
/* We don't need updated costs anymore: */
- free_allocno_updated_costs (subloop_allocno);
+ ira_free_allocno_updated_costs (subloop_allocno);
}
}
else if (flag_ira_propagate_cost && hard_regno >= 0)
{
- exit_freq = loop_edge_freq (subloop_node, -1, true);
- enter_freq = loop_edge_freq (subloop_node, -1, false);
- cost = (register_move_cost[mode][class][class]
+ exit_freq = ira_loop_edge_freq (subloop_node, -1, true);
+ enter_freq = ira_loop_edge_freq (subloop_node, -1, false);
+ cost = (ira_register_move_cost[mode][class][class]
* (exit_freq + enter_freq));
cover_class = ALLOCNO_COVER_CLASS (subloop_allocno);
- allocate_and_set_costs
+ ira_allocate_and_set_costs
(&ALLOCNO_HARD_REG_COSTS (subloop_allocno), cover_class,
ALLOCNO_COVER_CLASS_COST (subloop_allocno));
- allocate_and_set_costs
+ ira_allocate_and_set_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno),
cover_class, 0);
ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
-= cost;
ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
- += (memory_move_cost[mode][class][0] * enter_freq
- + memory_move_cost[mode][class][1] * exit_freq);
+ += (ira_memory_move_cost[mode][class][0] * enter_freq
+ + ira_memory_move_cost[mode][class][1] * exit_freq);
if (ALLOCNO_COVER_CLASS_COST (subloop_allocno)
> ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index])
ALLOCNO_COVER_CLASS_COST (subloop_allocno)
@@ -1725,17 +1734,18 @@ static void
do_coloring (void)
{
coloring_allocno_bitmap = ira_allocate_bitmap ();
- allocnos_for_spilling = ira_allocate (sizeof (allocno_t) * allocnos_num);
+ allocnos_for_spilling
+ = ira_allocate (sizeof (ira_allocno_t) * ira_allocnos_num);
splay_tree_node_pool = create_alloc_pool ("splay tree nodes",
sizeof (struct splay_tree_node_s),
100);
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
- traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
+ ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
- print_disposition (ira_dump_file);
+ ira_print_disposition (ira_dump_file);
free_alloc_pool (splay_tree_node_pool);
ira_free_bitmap (coloring_allocno_bitmap);
@@ -1757,9 +1767,9 @@ move_spill_restore (void)
int enter_freq, exit_freq;
enum machine_mode mode;
enum reg_class class;
- allocno_t a, parent_allocno, subloop_allocno;
- loop_tree_node_t parent, loop_node, subloop_node;
- allocno_iterator ai;
+ ira_allocno_t a, parent_allocno, subloop_allocno;
+ ira_loop_tree_node_t parent, loop_node, subloop_node;
+ ira_allocno_iterator ai;
for (;;)
{
@@ -1777,12 +1787,12 @@ move_spill_restore (void)
copies and the reload pass can spill the allocno set
by copy although the allocno will not get memory
slot. */
- || reg_equiv_invariant_p[regno]
- || reg_equiv_const[regno] != NULL_RTX)
+ || ira_reg_equiv_invariant_p[regno]
+ || ira_reg_equiv_const[regno] != NULL_RTX)
continue;
mode = ALLOCNO_MODE (a);
class = ALLOCNO_COVER_CLASS (a);
- index = class_hard_reg_index[class][hard_regno];
+ index = ira_class_hard_reg_index[class][hard_regno];
ira_assert (index >= 0);
cost = (ALLOCNO_MEMORY_COST (a)
- (ALLOCNO_HARD_REG_COSTS (a) == NULL
@@ -1803,34 +1813,36 @@ move_spill_restore (void)
- (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
? ALLOCNO_COVER_CLASS_COST (subloop_allocno)
: ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
- exit_freq = loop_edge_freq (subloop_node, regno, true);
- enter_freq = loop_edge_freq (subloop_node, regno, false);
+ exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
+ enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
- cost -= (memory_move_cost[mode][class][0] * exit_freq
- + memory_move_cost[mode][class][1] * enter_freq);
+ cost -= (ira_memory_move_cost[mode][class][0] * exit_freq
+ + ira_memory_move_cost[mode][class][1] * enter_freq);
else
{
- cost += (memory_move_cost[mode][class][0] * exit_freq
- + memory_move_cost[mode][class][1] * enter_freq);
+ cost
+ += (ira_memory_move_cost[mode][class][0] * exit_freq
+ + ira_memory_move_cost[mode][class][1] * enter_freq);
if (hard_regno2 != hard_regno)
- cost -= (register_move_cost[mode][class][class]
+ cost -= (ira_register_move_cost[mode][class][class]
* (exit_freq + enter_freq));
}
}
if ((parent = loop_node->parent) != NULL
&& (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
{
- exit_freq = loop_edge_freq (loop_node, regno, true);
- enter_freq = loop_edge_freq (loop_node, regno, false);
+ exit_freq = ira_loop_edge_freq (loop_node, regno, true);
+ enter_freq = ira_loop_edge_freq (loop_node, regno, false);
if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
- cost -= (memory_move_cost[mode][class][0] * exit_freq
- + memory_move_cost[mode][class][1] * enter_freq);
+ cost -= (ira_memory_move_cost[mode][class][0] * exit_freq
+ + ira_memory_move_cost[mode][class][1] * enter_freq);
else
{
- cost += (memory_move_cost[mode][class][1] * exit_freq
- + memory_move_cost[mode][class][0] * enter_freq);
+ cost
+ += (ira_memory_move_cost[mode][class][1] * exit_freq
+ + ira_memory_move_cost[mode][class][0] * enter_freq);
if (hard_regno2 != hard_regno)
- cost -= (register_move_cost[mode][class][class]
+ cost -= (ira_register_move_cost[mode][class][class]
* (exit_freq + enter_freq));
}
}
@@ -1859,13 +1871,13 @@ move_spill_restore (void)
for allocno A. It is done by processing its copies containing
other allocnos already assigned. */
static void
-update_curr_costs (allocno_t a)
+update_curr_costs (ira_allocno_t a)
{
int i, hard_regno, cost;
enum machine_mode mode;
enum reg_class cover_class, class;
- allocno_t another_a;
- copy_t cp, next_cp;
+ ira_allocno_t another_a;
+ ira_copy_t cp, next_cp;
ira_assert (! ALLOCNO_ASSIGNED_P (a));
cover_class = ALLOCNO_COVER_CLASS (a);
@@ -1891,16 +1903,16 @@ update_curr_costs (allocno_t a)
|| (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
continue;
class = REGNO_REG_CLASS (hard_regno);
- i = class_hard_reg_index[cover_class][hard_regno];
+ i = ira_class_hard_reg_index[cover_class][hard_regno];
ira_assert (i >= 0);
cost = (cp->first == a
- ? register_move_cost[mode][class][cover_class]
- : register_move_cost[mode][cover_class][class]);
- allocate_and_set_or_copy_costs
+ ? ira_register_move_cost[mode][class][cover_class]
+ : ira_register_move_cost[mode][cover_class][class]);
+ ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
cover_class, ALLOCNO_COVER_CLASS_COST (a),
ALLOCNO_HARD_REG_COSTS (a));
- allocate_and_set_or_copy_costs
+ ira_allocate_and_set_or_copy_costs
(&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
cover_class, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
@@ -1914,10 +1926,10 @@ static int *allocno_priorities;
/* Allocate array ALLOCNO_PRIORITIES and set up priorities for N allocnos in
array CONSIDERATION_ALLOCNOS. */
static void
-start_allocno_priorities (allocno_t *consideration_allocnos, int n)
+start_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
{
int i, length;
- allocno_t a;
+ ira_allocno_t a;
allocno_live_range_t r;
for (i = 0; i < n; i++)
@@ -1943,7 +1955,8 @@ start_allocno_priorities (allocno_t *consideration_allocnos, int n)
static int
allocno_priority_compare_func (const void *v1p, const void *v2p)
{
- allocno_t a1 = *(const allocno_t *) v1p, a2 = *(const allocno_t *) v2p;
+ ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
+ ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
int pri1, pri2;
pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
@@ -1963,14 +1976,14 @@ allocno_priority_compare_func (const void *v1p, const void *v2p)
chance to get a hard register. We use simple assignment algorithm
based on priorities. */
void
-reassign_conflict_allocnos (int start_regno)
+ira_reassign_conflict_allocnos (int start_regno)
{
int i, allocnos_to_color_num;
- allocno_t a, conflict_a;
- allocno_conflict_iterator aci;
+ ira_allocno_t a, conflict_a;
+ ira_allocno_conflict_iterator aci;
enum reg_class cover_class;
bitmap allocnos_to_color;
- allocno_iterator ai;
+ ira_allocno_iterator ai;
allocnos_to_color = ira_allocate_bitmap ();
allocnos_to_color_num = 0;
@@ -2006,7 +2019,7 @@ reassign_conflict_allocnos (int start_regno)
if (allocnos_to_color_num > 1)
{
start_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
- qsort (sorted_allocnos, allocnos_to_color_num, sizeof (allocno_t),
+ qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
allocno_priority_compare_func);
}
for (i = 0; i < allocnos_to_color_num; i++)
@@ -2076,8 +2089,8 @@ coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
{
const int regno1 = *(const int *) v1p;
const int regno2 = *(const int *) v2p;
- allocno_t a1 = regno_allocno_map[regno1];
- allocno_t a2 = regno_allocno_map[regno2];
+ ira_allocno_t a1 = ira_regno_allocno_map[regno1];
+ ira_allocno_t a2 = ira_regno_allocno_map[regno2];
int diff, slot_num1, slot_num2;
int total_size1, total_size2;
@@ -2107,12 +2120,12 @@ static void
setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
{
int i, num, regno, cost;
- allocno_t allocno, a;
+ ira_allocno_t allocno, a;
for (num = i = 0; i < n; i++)
{
regno = pseudo_regnos[i];
- allocno = regno_allocno_map[regno];
+ allocno = ira_regno_allocno_map[regno];
if (allocno == NULL)
{
regno_coalesced_allocno_cost[regno] = 0;
@@ -2147,15 +2160,15 @@ setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
regnos in array PSEUDO_REGNOS of length N. */
static int
collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
- allocno_t *spilled_coalesced_allocnos)
+ ira_allocno_t *spilled_coalesced_allocnos)
{
int i, num, regno;
- allocno_t allocno;
+ ira_allocno_t allocno;
for (num = i = 0; i < n; i++)
{
regno = pseudo_regnos[i];
- allocno = regno_allocno_map[regno];
+ allocno = ira_regno_allocno_map[regno];
if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
|| ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
continue;
@@ -2170,10 +2183,10 @@ collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
some allocnos were coalesced in the function. */
static bool
-coalesce_spill_slots (allocno_t *spilled_coalesced_allocnos, int num)
+coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
{
int i, j;
- allocno_t allocno, a;
+ ira_allocno_t allocno, a;
bool merged_p = false;
/* Coalesce non-conflicting spilled allocnos preferring most
@@ -2182,17 +2195,17 @@ coalesce_spill_slots (allocno_t *spilled_coalesced_allocnos, int num)
{
allocno = spilled_coalesced_allocnos[i];
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
- || (ALLOCNO_REGNO (allocno) < reg_equiv_len
- && (reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)]
- || reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX)))
+ || (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
+ && (ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)]
+ || ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX)))
continue;
for (j = 0; j < i; j++)
{
a = spilled_coalesced_allocnos[j];
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) != a
- || (ALLOCNO_REGNO (a) < reg_equiv_len
- && (reg_equiv_invariant_p[ALLOCNO_REGNO (a)]
- || reg_equiv_const[ALLOCNO_REGNO (a)] != NULL_RTX))
+ || (ALLOCNO_REGNO (a) < ira_reg_equiv_len
+ && (ira_reg_equiv_invariant_p[ALLOCNO_REGNO (a)]
+ || ira_reg_equiv_const[ALLOCNO_REGNO (a)] != NULL_RTX))
|| coalesced_allocno_conflict_p (allocno, a, true))
continue;
allocno_coalesced_p = true;
@@ -2215,14 +2228,14 @@ coalesce_spill_slots (allocno_t *spilled_coalesced_allocnos, int num)
memory-memory move insns. This function is called by the
reload. */
void
-sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
- unsigned int *reg_max_ref_width)
+ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
+ unsigned int *reg_max_ref_width)
{
int max_regno = max_reg_num ();
int i, regno, num, slot_num;
- allocno_t allocno, a;
- allocno_iterator ai;
- allocno_t *spilled_coalesced_allocnos;
+ ira_allocno_t allocno, a;
+ ira_allocno_iterator ai;
+ ira_allocno_t *spilled_coalesced_allocnos;
processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
/* Set up allocnos can be coalesced. */
@@ -2230,7 +2243,7 @@ sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
for (i = 0; i < n; i++)
{
regno = pseudo_regnos[i];
- allocno = regno_allocno_map[regno];
+ allocno = ira_regno_allocno_map[regno];
if (allocno != NULL)
bitmap_set_bit (coloring_allocno_bitmap,
ALLOCNO_NUM (allocno));
@@ -2246,7 +2259,7 @@ sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
allocno sets. */
qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
spilled_coalesced_allocnos
- = ira_allocate (allocnos_num * sizeof (allocno_t));
+ = ira_allocate (ira_allocnos_num * sizeof (ira_allocno_t));
/* Collect allocnos representing the spilled coalesced allocno
sets. */
num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
@@ -2255,7 +2268,8 @@ sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
&& coalesce_spill_slots (spilled_coalesced_allocnos, num))
{
setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
- qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
+ qsort (pseudo_regnos, n, sizeof (int),
+ coalesced_pseudo_reg_freq_compare);
num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
spilled_coalesced_allocnos);
}
@@ -2271,9 +2285,9 @@ sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
allocno = spilled_coalesced_allocnos[i];
if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
|| ALLOCNO_HARD_REGNO (allocno) >= 0
- || (ALLOCNO_REGNO (allocno) < reg_equiv_len
- && (reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)]
- || reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX)))
+ || (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
+ && (ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)]
+ || ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX)))
continue;
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
@@ -2295,7 +2309,7 @@ sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file, "\n");
}
- spilled_reg_stack_slots_num = slot_num - 1;
+ ira_spilled_reg_stack_slots_num = slot_num - 1;
ira_free (spilled_coalesced_allocnos);
/* Sort regnos according the slot numbers. */
regno_max_ref_width = reg_max_ref_width;
@@ -2320,9 +2334,9 @@ sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
allocation of REGNO made by the reload. Remember that reg_renumber
reflects the change result. */
void
-mark_allocation_change (int regno)
+ira_mark_allocation_change (int regno)
{
- allocno_t a = regno_allocno_map[regno];
+ ira_allocno_t a = ira_regno_allocno_map[regno];
int old_hard_regno, hard_regno, cost;
enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
@@ -2334,32 +2348,32 @@ mark_allocation_change (int regno)
cost = -ALLOCNO_MEMORY_COST (a);
else
{
- ira_assert (class_hard_reg_index[cover_class][old_hard_regno] >= 0);
+ ira_assert (ira_class_hard_reg_index[cover_class][old_hard_regno] >= 0);
cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
? ALLOCNO_COVER_CLASS_COST (a)
: ALLOCNO_HARD_REG_COSTS (a)
- [class_hard_reg_index[cover_class][old_hard_regno]]);
+ [ira_class_hard_reg_index[cover_class][old_hard_regno]]);
update_copy_costs (a, false);
}
- overall_cost -= cost;
+ ira_overall_cost -= cost;
ALLOCNO_HARD_REGNO (a) = hard_regno;
if (hard_regno < 0)
{
ALLOCNO_HARD_REGNO (a) = -1;
cost += ALLOCNO_MEMORY_COST (a);
}
- else if (class_hard_reg_index[cover_class][hard_regno] >= 0)
+ else if (ira_class_hard_reg_index[cover_class][hard_regno] >= 0)
{
cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
? ALLOCNO_COVER_CLASS_COST (a)
: ALLOCNO_HARD_REG_COSTS (a)
- [class_hard_reg_index[cover_class][hard_regno]]);
+ [ira_class_hard_reg_index[cover_class][hard_regno]]);
update_copy_costs (a, true);
}
else
/* Reload changed class of the allocno. */
cost = 0;
- overall_cost += cost;
+ ira_overall_cost += cost;
}
/* This function is called when reload deletes memory-memory move. In
@@ -2367,10 +2381,10 @@ mark_allocation_change (int regno)
allocnos should be not changed in future. Otherwise we risk to get
a wrong code. */
void
-mark_memory_move_deletion (int dst_regno, int src_regno)
+ira_mark_memory_move_deletion (int dst_regno, int src_regno)
{
- allocno_t dst = regno_allocno_map[dst_regno];
- allocno_t src = regno_allocno_map[src_regno];
+ ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
+ ira_allocno_t src = ira_regno_allocno_map[src_regno];
ira_assert (dst != NULL && src != NULL
&& ALLOCNO_HARD_REGNO (dst) < 0
@@ -2383,15 +2397,16 @@ mark_memory_move_deletion (int dst_regno, int src_regno)
allocno A and return TRUE in the case of success. That is an
analog of retry_global_alloc for IRA. */
static bool
-allocno_reload_assign (allocno_t a, HARD_REG_SET forbidden_regs)
+allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
{
int hard_regno;
enum reg_class cover_class;
int regno = ALLOCNO_REGNO (a);
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), forbidden_regs);
+ IOR_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), forbidden_regs);
if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), call_used_reg_set);
+ IOR_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
+ call_used_reg_set);
ALLOCNO_ASSIGNED_P (a) = false;
ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
@@ -2404,15 +2419,16 @@ allocno_reload_assign (allocno_t a, HARD_REG_SET forbidden_regs)
ALLOCNO_HARD_REGNO (a) = -1;
else
{
- ira_assert (class_hard_reg_index[cover_class][hard_regno] >= 0);
- overall_cost -= (ALLOCNO_MEMORY_COST (a)
- - (ALLOCNO_HARD_REG_COSTS (a) == NULL
- ? ALLOCNO_COVER_CLASS_COST (a)
- : ALLOCNO_HARD_REG_COSTS (a)
- [class_hard_reg_index[cover_class][hard_regno]]));
+ ira_assert (ira_class_hard_reg_index[cover_class][hard_regno] >= 0);
+ ira_overall_cost -= (ALLOCNO_MEMORY_COST (a)
+ - (ALLOCNO_HARD_REG_COSTS (a) == NULL
+ ? ALLOCNO_COVER_CLASS_COST (a)
+ : ALLOCNO_HARD_REG_COSTS (a)
+ [ira_class_hard_reg_index
+ [cover_class][hard_regno]]));
if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
- && ! hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
- call_used_reg_set))
+ && ! ira_hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
+ call_used_reg_set))
{
ira_assert (flag_caller_saves);
caller_save_needed = 1;
@@ -2458,16 +2474,16 @@ pseudo_reg_compare (const void *v1p, const void *v2p)
is called by the reload pass at the end of each reload
iteration. */
bool
-reassign_pseudos (int *spilled_pseudo_regs, int num,
- HARD_REG_SET bad_spill_regs,
- HARD_REG_SET *pseudo_forbidden_regs,
- HARD_REG_SET *pseudo_previous_regs, bitmap spilled)
+ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
+ HARD_REG_SET bad_spill_regs,
+ HARD_REG_SET *pseudo_forbidden_regs,
+ HARD_REG_SET *pseudo_previous_regs, bitmap spilled)
{
int i, m, n, regno;
bool changed_p;
- allocno_t a, conflict_a;
+ ira_allocno_t a, conflict_a;
HARD_REG_SET forbidden_regs;
- allocno_conflict_iterator aci;
+ ira_allocno_conflict_iterator aci;
if (num > 1)
qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
@@ -2481,8 +2497,8 @@ reassign_pseudos (int *spilled_pseudo_regs, int num,
IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
gcc_assert (reg_renumber[regno] < 0);
- a = regno_allocno_map[regno];
- mark_allocation_change (regno);
+ a = ira_regno_allocno_map[regno];
+ ira_mark_allocation_change (regno);
ira_assert (reg_renumber[regno] < 0);
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file,
@@ -2512,7 +2528,7 @@ reassign_pseudos (int *spilled_pseudo_regs, int num,
for (i = n = 0; i < m; i++)
{
regno = spilled_pseudo_regs[i];
- a = regno_allocno_map[regno];
+ a = ira_regno_allocno_map[regno];
FOR_EACH_ALLOCNO_CONFLICT (a, conflict_a, aci)
if (ALLOCNO_HARD_REGNO (conflict_a) < 0
&& ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
@@ -2527,7 +2543,7 @@ reassign_pseudos (int *spilled_pseudo_regs, int num,
if (n != 0)
{
start_allocno_priorities (sorted_allocnos, n);
- qsort (sorted_allocnos, n, sizeof (allocno_t),
+ qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
allocno_priority_compare_func);
for (i = 0; i < n; i++)
{
@@ -2557,17 +2573,17 @@ reassign_pseudos (int *spilled_pseudo_regs, int num,
TOTAL_SIZE. In the case of failure to find a slot which can be
used for REGNO, the function returns NULL. */
rtx
-reuse_stack_slot (int regno, unsigned int inherent_size,
- unsigned int total_size)
+ira_reuse_stack_slot (int regno, unsigned int inherent_size,
+ unsigned int total_size)
{
unsigned int i;
int slot_num, best_slot_num;
int cost, best_cost;
- copy_t cp, next_cp;
- allocno_t another_allocno, allocno = regno_allocno_map[regno];
+ ira_copy_t cp, next_cp;
+ ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
rtx x;
bitmap_iterator bi;
- struct spilled_reg_stack_slot *slot = NULL;
+ struct ira_spilled_reg_stack_slot *slot = NULL;
ira_assert (flag_ira && inherent_size == PSEUDO_REGNO_BYTES (regno)
&& inherent_size <= total_size
@@ -2577,7 +2593,7 @@ reuse_stack_slot (int regno, unsigned int inherent_size,
slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
if (slot_num != -1)
{
- slot = &spilled_reg_stack_slots[slot_num];
+ slot = &ira_spilled_reg_stack_slots[slot_num];
x = slot->mem;
}
else
@@ -2586,9 +2602,11 @@ reuse_stack_slot (int regno, unsigned int inherent_size,
x = NULL_RTX;
/* It means that the pseudo was spilled in the reload pass, try
to reuse a slot. */
- for (slot_num = 0; slot_num < spilled_reg_stack_slots_num; slot_num++)
+ for (slot_num = 0;
+ slot_num < ira_spilled_reg_stack_slots_num;
+ slot_num++)
{
- slot = &spilled_reg_stack_slots[slot_num];
+ slot = &ira_spilled_reg_stack_slots[slot_num];
if (slot->mem == NULL_RTX)
continue;
if (slot->width < total_size
@@ -2598,8 +2616,8 @@ reuse_stack_slot (int regno, unsigned int inherent_size,
EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
FIRST_PSEUDO_REGISTER, i, bi)
{
- another_allocno = regno_allocno_map[i];
- if (allocno_live_ranges_intersect_p (allocno, another_allocno))
+ another_allocno = ira_regno_allocno_map[i];
+ if (ira_allocno_live_ranges_intersect_p (allocno, another_allocno))
goto cont;
}
for (cost = 0, cp = ALLOCNO_COPIES (allocno);
@@ -2634,7 +2652,7 @@ reuse_stack_slot (int regno, unsigned int inherent_size,
}
if (best_cost >= 0)
{
- slot = &spilled_reg_stack_slots[best_slot_num];
+ slot = &ira_spilled_reg_stack_slots[best_slot_num];
SET_REGNO_REG_SET (&slot->spilled_regs, regno);
x = slot->mem;
ALLOCNO_HARD_REGNO (allocno) = -best_slot_num - 2;
@@ -2646,7 +2664,7 @@ reuse_stack_slot (int regno, unsigned int inherent_size,
EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
FIRST_PSEUDO_REGISTER, i, bi)
{
- ira_assert (! pseudo_live_ranges_intersect_p (regno, i));
+ ira_assert (! ira_pseudo_live_ranges_intersect_p (regno, i));
}
SET_REGNO_REG_SET (&slot->spilled_regs, regno);
if (internal_flag_ira_verbose > 3 && ira_dump_file)
@@ -2667,23 +2685,23 @@ reuse_stack_slot (int regno, unsigned int inherent_size,
/* This is called by reload every time a new stack slot X with
TOTAL_SIZE was allocated for REGNO. We store this info for
- subsequent reuse_stack_slot calls. */
+ subsequent ira_reuse_stack_slot calls. */
void
-mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
+ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
{
- struct spilled_reg_stack_slot *slot;
+ struct ira_spilled_reg_stack_slot *slot;
int slot_num;
- allocno_t allocno;
+ ira_allocno_t allocno;
ira_assert (flag_ira && PSEUDO_REGNO_BYTES (regno) <= total_size);
- allocno = regno_allocno_map[regno];
+ allocno = ira_regno_allocno_map[regno];
slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
if (slot_num == -1)
{
- slot_num = spilled_reg_stack_slots_num++;
+ slot_num = ira_spilled_reg_stack_slots_num++;
ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
}
- slot = &spilled_reg_stack_slots[slot_num];
+ slot = &ira_spilled_reg_stack_slots[slot_num];
INIT_REG_SET (&slot->spilled_regs);
SET_REGNO_REG_SET (&slot->spilled_regs, regno);
slot->mem = x;
@@ -2711,7 +2729,7 @@ calculate_spill_cost (int *regnos, rtx in, rtx out, rtx insn,
int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
bool in_p, out_p;
int length;
- allocno_t a;
+ ira_allocno_t a;
*nrefs = 0;
for (length = count = cost = i = 0;; i++)
@@ -2722,7 +2740,7 @@ calculate_spill_cost (int *regnos, rtx in, rtx out, rtx insn,
*nrefs += REG_N_REFS (regno);
hard_regno = reg_renumber[regno];
ira_assert (hard_regno >= 0);
- a = regno_allocno_map[regno];
+ a = ira_regno_allocno_map[regno];
length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a);
nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
@@ -2738,11 +2756,11 @@ calculate_spill_cost (int *regnos, rtx in, rtx out, rtx insn,
{
saved_cost = 0;
if (in_p)
- saved_cost += memory_move_cost
+ saved_cost += ira_memory_move_cost
[ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)][1];
if (out_p)
saved_cost
- += memory_move_cost
+ += ira_memory_move_cost
[ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)][0];
cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
}
@@ -2764,8 +2782,8 @@ calculate_spill_cost (int *regnos, rtx in, rtx out, rtx insn,
function used by the reload pass to make better register spilling
decisions. */
bool
-better_spill_reload_regno_p (int *regnos, int *other_regnos,
- rtx in, rtx out, rtx insn)
+ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
+ rtx in, rtx out, rtx insn)
{
int cost, other_cost;
int length, other_length;
@@ -2805,17 +2823,17 @@ better_spill_reload_regno_p (int *regnos, int *other_regnos,
code for saving/restore callee-clobbered hard registers around
calls (see caller-saves.c). */
void
-collect_pseudo_call_clobbered_regs (int regno,
- HARD_REG_SET (*call_clobbered_regs))
+ira_collect_pseudo_call_clobbered_regs (int regno,
+ HARD_REG_SET (*call_clobbered_regs))
{
int i;
- allocno_t a;
+ ira_allocno_t a;
HARD_REG_SET clobbered_regs;
rtx call, *allocno_calls;
- a = regno_allocno_map[regno];
+ a = ira_regno_allocno_map[regno];
CLEAR_HARD_REG_SET (*call_clobbered_regs);
- allocno_calls = (VEC_address (rtx, regno_calls[regno])
+ allocno_calls = (VEC_address (rtx, ira_regno_calls[regno])
+ ALLOCNO_CALLS_CROSSED_START (a));
for (i = ALLOCNO_CALLS_CROSSED_NUM (a) - 1; i >= 0; i--)
{
@@ -2829,17 +2847,17 @@ collect_pseudo_call_clobbered_regs (int regno,
/* Allocate and initialize data necessary for assign_hard_reg. */
void
-initiate_ira_assign (void)
+ira_initiate_assign (void)
{
- sorted_allocnos = ira_allocate (sizeof (allocno_t) * allocnos_num);
+ sorted_allocnos = ira_allocate (sizeof (ira_allocno_t) * ira_allocnos_num);
consideration_allocno_bitmap = ira_allocate_bitmap ();
initiate_cost_update ();
- allocno_priorities = ira_allocate (sizeof (int) * allocnos_num);
+ allocno_priorities = ira_allocate (sizeof (int) * ira_allocnos_num);
}
/* Deallocate data used by assign_hard_reg. */
void
-finish_ira_assign (void)
+ira_finish_assign (void)
{
ira_free (sorted_allocnos);
ira_free_bitmap (consideration_allocno_bitmap);
@@ -2853,14 +2871,15 @@ finish_ira_assign (void)
void
ira_color (void)
{
- allocno_stack_vec = VEC_alloc (allocno_t, heap, allocnos_num);
- removed_splay_allocno_vec = VEC_alloc (allocno_t, heap, allocnos_num);
+ allocno_stack_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
+ removed_splay_allocno_vec
+ = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
- initiate_ira_assign ();
+ ira_initiate_assign ();
do_coloring ();
- finish_ira_assign ();
- VEC_free (allocno_t, heap, removed_splay_allocno_vec);
- VEC_free (allocno_t, heap, allocno_stack_vec);
+ ira_finish_assign ();
+ VEC_free (ira_allocno_t, heap, removed_splay_allocno_vec);
+ VEC_free (ira_allocno_t, heap, allocno_stack_vec);
move_spill_restore ();
}
@@ -2881,12 +2900,12 @@ ira_fast_allocation (void)
#endif
enum reg_class cover_class;
enum machine_mode mode;
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
allocno_live_range_t r;
HARD_REG_SET conflict_hard_regs, *used_hard_regs;
- allocno_priorities = ira_allocate (sizeof (int) * allocnos_num);
+ allocno_priorities = ira_allocate (sizeof (int) * ira_allocnos_num);
FOR_EACH_ALLOCNO (a, ai)
{
l = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
@@ -2897,16 +2916,16 @@ ira_fast_allocation (void)
* (ALLOCNO_MEMORY_COST (a)
- ALLOCNO_COVER_CLASS_COST (a))) / l)
* (10000 / REG_FREQ_MAX)
- * reg_class_nregs [ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]);
+ * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]);
}
- used_hard_regs = ira_allocate (sizeof (HARD_REG_SET) * max_point);
- for (i = 0; i < max_point; i++)
+ used_hard_regs = ira_allocate (sizeof (HARD_REG_SET) * ira_max_point);
+ for (i = 0; i < ira_max_point; i++)
CLEAR_HARD_REG_SET (used_hard_regs[i]);
- sorted_allocnos = ira_allocate (sizeof (allocno_t) * allocnos_num);
+ sorted_allocnos = ira_allocate (sizeof (ira_allocno_t) * ira_allocnos_num);
num = 0;
FOR_EACH_ALLOCNO (a, ai)
sorted_allocnos[num++] = a;
- qsort (sorted_allocnos, allocnos_num, sizeof (allocno_t),
+ qsort (sorted_allocnos, ira_allocnos_num, sizeof (ira_allocno_t),
allocno_priority_compare_func);
for (i = 0; i < num; i++)
{
@@ -2925,16 +2944,16 @@ ira_fast_allocation (void)
#ifdef STACK_REGS
no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
#endif
- class_size = class_hard_regs_num[cover_class];
+ class_size = ira_class_hard_regs_num[cover_class];
for (j = 0; j < class_size; j++)
{
- hard_regno = class_hard_regs[cover_class][j];
+ hard_regno = ira_class_hard_regs[cover_class][j];
#ifdef STACK_REGS
if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
&& hard_regno <= LAST_STACK_REG)
continue;
#endif
- if (!hard_reg_not_in_set_p (hard_regno, mode, conflict_hard_regs)
+ if (!ira_hard_reg_not_in_set_p (hard_regno, mode, conflict_hard_regs)
|| (TEST_HARD_REG_BIT
(prohibited_class_mode_regs[cover_class][mode], hard_regno)))
continue;
@@ -2942,7 +2961,7 @@ ira_fast_allocation (void)
for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
for (k = r->start; k <= r->finish; k++)
IOR_HARD_REG_SET (used_hard_regs[k],
- reg_mode_hard_regset[hard_regno][mode]);
+ ira_reg_mode_hard_regset[hard_regno][mode]);
break;
}
}
@@ -2950,5 +2969,5 @@ ira_fast_allocation (void)
ira_free (used_hard_regs);
ira_free (allocno_priorities);
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
- print_disposition (ira_dump_file);
+ ira_print_disposition (ira_dump_file);
}
diff --git a/gcc/ira-conflicts.c b/gcc/ira-conflicts.c
index 0e278b25ee7..a480d9086f0 100644
--- a/gcc/ira-conflicts.c
+++ b/gcc/ira-conflicts.c
@@ -42,12 +42,12 @@ along with GCC; see the file COPYING3. If not see
allocno copy creation and allocno info accumulation on upper level
regions. */
-/* allocnos_num array of arrays of bits, recording whether two
+/* ira_allocnos_num array of arrays of bits, recording whether two
allocno's conflict (can't go in the same hardware register).
Some arrays will be used as conflict bit vector of the
corresponding allocnos see function build_allocno_conflicts. */
-static INT_TYPE **conflicts;
+static IRA_INT_TYPE **conflicts;
/* Macro to test a conflict of A1 and A2 in `conflicts'. */
#define CONFLICT_ALLOCNO_P(A1, A2) \
@@ -67,15 +67,15 @@ build_conflict_bit_table (void)
int i, num, id, allocated_words_num, conflict_bit_vec_words_num;
unsigned int j;
enum reg_class cover_class;
- allocno_t allocno, live_a;
+ ira_allocno_t allocno, live_a;
allocno_live_range_t r;
- allocno_iterator ai;
+ ira_allocno_iterator ai;
sparseset allocnos_live;
int allocno_set_words;
- allocno_set_words = (allocnos_num + INT_BITS - 1) / INT_BITS;
- allocnos_live = sparseset_alloc (allocnos_num);
- conflicts = ira_allocate (sizeof (INT_TYPE *) * allocnos_num);
+ allocno_set_words = (ira_allocnos_num + IRA_INT_BITS - 1) / IRA_INT_BITS;
+ allocnos_live = sparseset_alloc (ira_allocnos_num);
+ conflicts = ira_allocate (sizeof (IRA_INT_TYPE *) * ira_allocnos_num);
allocated_words_num = 0;
FOR_EACH_ALLOCNO (allocno, ai)
{
@@ -86,22 +86,23 @@ build_conflict_bit_table (void)
continue;
}
conflict_bit_vec_words_num
- = (ALLOCNO_MAX (allocno) - ALLOCNO_MIN (allocno) + INT_BITS) / INT_BITS;
+ = ((ALLOCNO_MAX (allocno) - ALLOCNO_MIN (allocno) + IRA_INT_BITS)
+ / IRA_INT_BITS);
allocated_words_num += conflict_bit_vec_words_num;
conflicts[num]
- = ira_allocate (sizeof (INT_TYPE) * conflict_bit_vec_words_num);
+ = ira_allocate (sizeof (IRA_INT_TYPE) * conflict_bit_vec_words_num);
memset (conflicts[num], 0,
- sizeof (INT_TYPE) * conflict_bit_vec_words_num);
+ sizeof (IRA_INT_TYPE) * conflict_bit_vec_words_num);
}
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf
(ira_dump_file,
"+++Allocating %ld bytes for conflict table (uncompressed size %ld)\n",
- (long) allocated_words_num * sizeof (INT_TYPE),
- (long) allocno_set_words * allocnos_num * sizeof (INT_TYPE));
- for (i = 0; i < max_point; i++)
+ (long) allocated_words_num * sizeof (IRA_INT_TYPE),
+ (long) allocno_set_words * ira_allocnos_num * sizeof (IRA_INT_TYPE));
+ for (i = 0; i < ira_max_point; i++)
{
- for (r = start_point_ranges[i]; r != NULL; r = r->start_next)
+ for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
{
allocno = r->allocno;
num = ALLOCNO_NUM (allocno);
@@ -110,7 +111,7 @@ build_conflict_bit_table (void)
sparseset_set_bit (allocnos_live, num);
EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, j)
{
- live_a = allocnos[j];
+ live_a = ira_allocnos[j];
if (cover_class == ALLOCNO_COVER_CLASS (live_a)
/* Don't set up conflict for the allocno with itself. */
&& num != (int) j)
@@ -126,7 +127,7 @@ build_conflict_bit_table (void)
}
}
- for (r = finish_point_ranges[i]; r != NULL; r = r->finish_next)
+ for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
sparseset_clear_bit (allocnos_live, ALLOCNO_NUM (r->allocno));
}
sparseset_free (allocnos_live);
@@ -304,10 +305,10 @@ static bool
process_regs_for_copy (rtx reg1, rtx reg2, rtx insn, int freq)
{
int hard_regno, cost, index;
- allocno_t a;
+ ira_allocno_t a;
enum reg_class class, cover_class;
enum machine_mode mode;
- copy_t cp;
+ ira_copy_t cp;
gcc_assert (REG_P (reg1) && REG_P (reg2));
if (HARD_REGISTER_P (reg1))
@@ -324,31 +325,31 @@ process_regs_for_copy (rtx reg1, rtx reg2, rtx insn, int freq)
}
else
{
- cp = add_allocno_copy (ira_curr_regno_allocno_map[REGNO (reg1)],
- ira_curr_regno_allocno_map[REGNO (reg2)],
- freq, insn, ira_curr_loop_tree_node);
+ cp = ira_add_allocno_copy (ira_curr_regno_allocno_map[REGNO (reg1)],
+ ira_curr_regno_allocno_map[REGNO (reg2)],
+ freq, insn, ira_curr_loop_tree_node);
bitmap_set_bit (ira_curr_loop_tree_node->local_copies, cp->num);
return true;
}
class = REGNO_REG_CLASS (hard_regno);
mode = ALLOCNO_MODE (a);
cover_class = ALLOCNO_COVER_CLASS (a);
- if (! class_subset_p[class][cover_class])
+ if (! ira_class_subset_p[class][cover_class])
return false;
if (reg_class_size[class] <= (unsigned) CLASS_MAX_NREGS (class, mode))
/* It is already taken into account in ira-costs.c. */
return false;
- index = class_hard_reg_index[cover_class][hard_regno];
+ index = ira_class_hard_reg_index[cover_class][hard_regno];
if (index < 0)
return false;
if (HARD_REGISTER_P (reg1))
- cost = register_move_cost[mode][cover_class][class] * freq;
+ cost = ira_register_move_cost[mode][cover_class][class] * freq;
else
- cost = register_move_cost[mode][class][cover_class] * freq;
- allocate_and_set_costs
+ cost = ira_register_move_cost[mode][class][cover_class] * freq;
+ ira_allocate_and_set_costs
(&ALLOCNO_HARD_REG_COSTS (a), cover_class,
ALLOCNO_COVER_CLASS_COST (a));
- allocate_and_set_costs
+ ira_allocate_and_set_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (a), cover_class, 0);
ALLOCNO_HARD_REG_COSTS (a)[index] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index] -= cost;
@@ -431,7 +432,7 @@ add_insn_allocno_copies (rtx insn)
/* Add copies originated from BB given by LOOP_TREE_NODE. */
static void
-add_copies (loop_tree_node_t loop_tree_node)
+add_copies (ira_loop_tree_node_t loop_tree_node)
{
basic_block bb;
rtx insn;
@@ -448,12 +449,12 @@ add_copies (loop_tree_node_t loop_tree_node)
upper loop tree level. So allocnos on upper levels accumulate
information about the corresponding allocnos in nested regions. */
static void
-propagate_allocno_copy_info (allocno_t a)
+propagate_allocno_copy_info (ira_allocno_t a)
{
int regno;
- allocno_t parent_a, another_a, another_parent_a;
- loop_tree_node_t parent;
- copy_t cp, next_cp;
+ ira_allocno_t parent_a, another_a, another_parent_a;
+ ira_loop_tree_node_t parent;
+ ira_copy_t cp, next_cp;
regno = ALLOCNO_REGNO (a);
if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
@@ -475,8 +476,8 @@ propagate_allocno_copy_info (allocno_t a)
gcc_unreachable ();
if ((another_parent_a = (parent->regno_allocno_map
[ALLOCNO_REGNO (another_a)])) != NULL)
- add_allocno_copy (parent_a, another_parent_a, cp->freq,
- cp->insn, cp->loop_tree_node);
+ ira_add_allocno_copy (parent_a, another_parent_a, cp->freq,
+ cp->insn, cp->loop_tree_node);
}
}
}
@@ -487,10 +488,10 @@ static void
propagate_copy_info (void)
{
int i;
- allocno_t a;
+ ira_allocno_t a;
for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
- for (a = regno_allocno_map[i];
+ for (a = ira_regno_allocno_map[i];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
propagate_allocno_copy_info (a);
@@ -500,7 +501,7 @@ propagate_copy_info (void)
used to find a conflict for new allocnos or allocnos with the
different cover classes. */
bool
-allocno_live_ranges_intersect_p (allocno_t a1, allocno_t a2)
+ira_allocno_live_ranges_intersect_p (ira_allocno_t a1, ira_allocno_t a2)
{
allocno_live_range_t r1, r2;
@@ -528,9 +529,9 @@ allocno_live_ranges_intersect_p (allocno_t a1, allocno_t a2)
intersect. This should be used when there is only one region.
Currently this is used during reload. */
bool
-pseudo_live_ranges_intersect_p (int regno1, int regno2)
+ira_pseudo_live_ranges_intersect_p (int regno1, int regno2)
{
- allocno_t a1, a2;
+ ira_allocno_t a1, a2;
ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
&& regno2 >= FIRST_PSEUDO_REGISTER);
@@ -539,7 +540,7 @@ pseudo_live_ranges_intersect_p (int regno1, int regno2)
if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
|| (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
return false;
- return allocno_live_ranges_intersect_p (a1, a2);
+ return ira_allocno_live_ranges_intersect_p (a1, a2);
}
/* Remove copies involving conflicting allocnos. We can not do this
@@ -549,12 +550,12 @@ static void
remove_conflict_allocno_copies (void)
{
int i;
- allocno_t a;
- allocno_iterator ai;
- copy_t cp, next_cp;
- VEC(copy_t,heap) *conflict_allocno_copy_vec;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
+ ira_copy_t cp, next_cp;
+ VEC(ira_copy_t,heap) *conflict_allocno_copy_vec;
- conflict_allocno_copy_vec = VEC_alloc (copy_t, heap, get_max_uid ());
+ conflict_allocno_copy_vec = VEC_alloc (ira_copy_t, heap, get_max_uid ());
FOR_EACH_ALLOCNO (a, ai)
{
for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
@@ -563,13 +564,13 @@ remove_conflict_allocno_copies (void)
else
{
next_cp = cp->next_second_allocno_copy;
- VEC_safe_push (copy_t, heap, conflict_allocno_copy_vec, cp);
+ VEC_safe_push (ira_copy_t, heap, conflict_allocno_copy_vec, cp);
}
}
- for (i = 0; VEC_iterate (copy_t, conflict_allocno_copy_vec, i, cp); i++)
+ for (i = 0; VEC_iterate (ira_copy_t, conflict_allocno_copy_vec, i, cp); i++)
if (CONFLICT_ALLOCNO_P (cp->first, cp->second))
- remove_allocno_copy_from_list (cp);
- VEC_free (copy_t, heap, conflict_allocno_copy_vec);
+ ira_remove_allocno_copy_from_list (cp);
+ VEC_free (ira_copy_t, heap, conflict_allocno_copy_vec);
}
/* Build conflict vectors or bit conflict vectors (whatever is more
@@ -580,14 +581,15 @@ build_allocno_conflicts (void)
int i, j, px, parent_num;
bool free_p;
int conflict_bit_vec_words_num;
- loop_tree_node_t parent;
- allocno_t a, parent_a, another_a, another_parent_a, *conflict_allocnos, *vec;
- INT_TYPE *allocno_conflicts;
- allocno_set_iterator asi;
+ ira_loop_tree_node_t parent;
+ ira_allocno_t a, parent_a, another_a, another_parent_a;
+ ira_allocno_t *conflict_allocnos, *vec;
+ IRA_INT_TYPE *allocno_conflicts;
+ ira_allocno_set_iterator asi;
- conflict_allocnos = ira_allocate (sizeof (allocno_t) * allocnos_num);
+ conflict_allocnos = ira_allocate (sizeof (ira_allocno_t) * ira_allocnos_num);
for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
- for (a = regno_allocno_map[i];
+ for (a = ira_regno_allocno_map[i];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
@@ -596,17 +598,17 @@ build_allocno_conflicts (void)
FOR_EACH_ALLOCNO_IN_SET (allocno_conflicts,
ALLOCNO_MIN (a), ALLOCNO_MAX (a), j, asi)
{
- another_a = conflict_id_allocno_map[j];
+ another_a = ira_conflict_id_allocno_map[j];
ira_assert (ALLOCNO_COVER_CLASS (a)
== ALLOCNO_COVER_CLASS (another_a));
conflict_allocnos[px++] = another_a;
}
- if (conflict_vector_profitable_p (a, px))
+ if (ira_conflict_vector_profitable_p (a, px))
{
free_p = true;
- allocate_allocno_conflict_vec (a, px);
+ ira_allocate_allocno_conflict_vec (a, px);
vec = ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a);
- memcpy (vec, conflict_allocnos, sizeof (allocno_t) * px);
+ memcpy (vec, conflict_allocnos, sizeof (ira_allocno_t) * px);
vec[px] = NULL;
ALLOCNO_CONFLICT_ALLOCNOS_NUM (a) = px;
}
@@ -618,9 +620,9 @@ build_allocno_conflicts (void)
conflict_bit_vec_words_num = 0;
else
conflict_bit_vec_words_num
- = (ALLOCNO_MAX (a) - ALLOCNO_MIN (a) + INT_BITS) / INT_BITS;
+ = (ALLOCNO_MAX (a) - ALLOCNO_MIN (a) + IRA_INT_BITS) / IRA_INT_BITS;
ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a)
- = conflict_bit_vec_words_num * sizeof (INT_TYPE);
+ = conflict_bit_vec_words_num * sizeof (IRA_INT_TYPE);
}
if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
|| (parent_a = parent->regno_allocno_map[i]) == NULL)
@@ -634,7 +636,7 @@ build_allocno_conflicts (void)
FOR_EACH_ALLOCNO_IN_SET (allocno_conflicts,
ALLOCNO_MIN (a), ALLOCNO_MAX (a), j, asi)
{
- another_a = conflict_id_allocno_map[j];
+ another_a = ira_conflict_id_allocno_map[j];
ira_assert (ALLOCNO_COVER_CLASS (a)
== ALLOCNO_COVER_CLASS (another_a));
if ((another_parent_a = (parent->regno_allocno_map
@@ -660,7 +662,7 @@ build_allocno_conflicts (void)
/* Propagate information about allocnos modified inside the loop given
by its LOOP_TREE_NODE to its parent. */
static void
-propagate_modified_regnos (loop_tree_node_t loop_tree_node)
+propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
{
if (loop_tree_node == ira_loop_tree_root)
return;
@@ -705,14 +707,14 @@ print_hard_reg_set (FILE *file, const char *title, HARD_REG_SET set)
static void
print_conflicts (FILE *file, bool reg_p)
{
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
HARD_REG_SET conflicting_hard_regs;
FOR_EACH_ALLOCNO (a, ai)
{
- allocno_t conflict_a;
- allocno_conflict_iterator aci;
+ ira_allocno_t conflict_a;
+ ira_allocno_conflict_iterator aci;
basic_block bb;
if (reg_p)
@@ -744,15 +746,15 @@ print_conflicts (FILE *file, bool reg_p)
}
}
COPY_HARD_REG_SET (conflicting_hard_regs,
- ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
- AND_COMPL_HARD_REG_SET (conflicting_hard_regs, no_alloc_regs);
+ IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
+ AND_COMPL_HARD_REG_SET (conflicting_hard_regs, ira_no_alloc_regs);
AND_HARD_REG_SET (conflicting_hard_regs,
reg_class_contents[ALLOCNO_COVER_CLASS (a)]);
print_hard_reg_set (file, "\n;; total conflict hard regs:",
conflicting_hard_regs);
COPY_HARD_REG_SET (conflicting_hard_regs,
ALLOCNO_CONFLICT_HARD_REGS (a));
- AND_COMPL_HARD_REG_SET (conflicting_hard_regs, no_alloc_regs);
+ AND_COMPL_HARD_REG_SET (conflicting_hard_regs, ira_no_alloc_regs);
AND_HARD_REG_SET (conflicting_hard_regs,
reg_class_contents[ALLOCNO_COVER_CLASS (a)]);
print_hard_reg_set (file, ";; conflict hard regs:",
@@ -764,7 +766,7 @@ print_conflicts (FILE *file, bool reg_p)
/* Print information about allocno or only regno (if REG_P) conflicts
to stderr. */
void
-debug_conflicts (bool reg_p)
+ira_debug_conflicts (bool reg_p)
{
print_conflicts (stderr, reg_p);
}
@@ -776,13 +778,13 @@ debug_conflicts (bool reg_p)
void
ira_build_conflicts (void)
{
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
if (optimize)
{
build_conflict_bit_table ();
- traverse_loop_tree (true, ira_loop_tree_root, NULL, add_copies);
+ ira_traverse_loop_tree (true, ira_loop_tree_root, NULL, add_copies);
if (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
|| flag_ira_algorithm == IRA_ALGORITHM_MIXED)
propagate_copy_info ();
@@ -796,7 +798,7 @@ ira_build_conflicts (void)
continue;
if (! flag_caller_saves)
{
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
+ IOR_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
call_used_reg_set);
if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a),
@@ -804,7 +806,7 @@ ira_build_conflicts (void)
}
else
{
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
+ IOR_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
no_caller_save_reg_set);
if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a),
@@ -813,8 +815,8 @@ ira_build_conflicts (void)
}
if (optimize)
{
- traverse_loop_tree (false, ira_loop_tree_root, NULL,
- propagate_modified_regnos);
+ ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
+ propagate_modified_regnos);
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
print_conflicts (ira_dump_file, false);
}
diff --git a/gcc/ira-costs.c b/gcc/ira-costs.c
index b66ee094817..4c0f9260b58 100644
--- a/gcc/ira-costs.c
+++ b/gcc/ira-costs.c
@@ -136,8 +136,8 @@ copy_cost (rtx x, enum machine_mode mode, enum reg_class class, bool to_p,
sri.extra_cost = 0;
secondary_class = targetm.secondary_reload (to_p, x, class, mode, &sri);
- if (register_move_cost[mode] == NULL)
- init_register_move_cost (mode);
+ if (ira_register_move_cost[mode] == NULL)
+ ira_init_register_move_cost (mode);
if (secondary_class != NO_REGS)
return (move_cost[mode][secondary_class][class] + sri.extra_cost
@@ -147,7 +147,7 @@ copy_cost (rtx x, enum machine_mode mode, enum reg_class class, bool to_p,
the cost to move between the register classes, and use 2 for
everything else (constants). */
if (MEM_P (x) || class == NO_REGS)
- return sri.extra_cost + memory_move_cost[mode][class][to_p != 0];
+ return sri.extra_cost + ira_memory_move_cost[mode][class][to_p != 0];
else if (REG_P (x))
return
(sri.extra_cost + move_cost[mode][REGNO_REG_CLASS (REGNO (x))][class]);
@@ -279,18 +279,18 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
needs to do a copy, which is one insn. */
struct costs *pp = this_op_costs[i];
- if (register_move_cost[mode] == NULL)
- init_register_move_cost (mode);
+ if (ira_register_move_cost[mode] == NULL)
+ ira_init_register_move_cost (mode);
for (k = 0; k < cost_classes_num; k++)
{
class = cost_classes[k];
pp->cost[k]
= ((recog_data.operand_type[i] != OP_OUT
- ? register_may_move_in_cost[mode][class]
+ ? ira_may_move_in_cost[mode][class]
[classes[i]] * frequency : 0)
+ (recog_data.operand_type[i] != OP_IN
- ? register_may_move_out_cost[mode][classes[i]]
+ ? ira_may_move_out_cost[mode][classes[i]]
[class] * frequency : 0));
}
@@ -299,9 +299,9 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
insn to load it. */
pp->mem_cost
= ((recog_data.operand_type[i] != OP_IN
- ? memory_move_cost[mode][classes[i]][0] : 0)
+ ? ira_memory_move_cost[mode][classes[i]][0] : 0)
+ (recog_data.operand_type[i] != OP_OUT
- ? memory_move_cost[mode][classes[i]][1] : 0)
+ ? ira_memory_move_cost[mode][classes[i]][1] : 0)
- allows_mem[i]) * frequency;
/* If we have assigned a class to this allocno in our
first pass, add a cost to this alternative
@@ -319,13 +319,15 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
if (pref_class == NO_REGS)
alt_cost
+= ((recog_data.operand_type[i] != OP_IN
- ? memory_move_cost[mode][classes[i]][0] : 0)
+ ? ira_memory_move_cost[mode][classes[i]][0]
+ : 0)
+ (recog_data.operand_type[i] != OP_OUT
- ? memory_move_cost[mode][classes[i]][1] : 0));
- else if (reg_class_intersect
+ ? ira_memory_move_cost[mode][classes[i]][1]
+ : 0));
+ else if (ira_reg_class_intersect
[pref_class][classes[i]] == NO_REGS)
- alt_cost
- += register_move_cost[mode][pref_class][classes[i]];
+ alt_cost += (ira_register_move_cost
+ [mode][pref_class][classes[i]]);
}
if (REGNO (ops[i]) != REGNO (ops[j])
&& ! find_reg_note (insn, REG_DEAD, op))
@@ -371,7 +373,7 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
to be allocated to a register that can be the
base of an address, i.e. BASE_REG_CLASS. */
classes[i]
- = reg_class_union[classes[i]]
+ = ira_reg_class_union[classes[i]]
[base_reg_class (VOIDmode, ADDRESS, SCRATCH)];
break;
@@ -457,12 +459,12 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
win = 1;
allows_mem[i] = 1;
case 'r':
- classes[i] = reg_class_union[classes[i]][GENERAL_REGS];
+ classes[i] = ira_reg_class_union[classes[i]][GENERAL_REGS];
break;
default:
if (REG_CLASS_FROM_CONSTRAINT (c, p) != NO_REGS)
- classes[i] = reg_class_union[classes[i]]
+ classes[i] = ira_reg_class_union[classes[i]]
[REG_CLASS_FROM_CONSTRAINT (c, p)];
#ifdef EXTRA_CONSTRAINT_STR
else if (EXTRA_CONSTRAINT_STR (op, c, p))
@@ -486,7 +488,7 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
that can be the base of an address,
i.e. BASE_REG_CLASS. */
classes[i]
- = reg_class_union[classes[i]]
+ = ira_reg_class_union[classes[i]]
[base_reg_class (VOIDmode, ADDRESS, SCRATCH)];
}
#endif
@@ -522,18 +524,18 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
{
struct costs *pp = this_op_costs[i];
- if (register_move_cost[mode] == NULL)
- init_register_move_cost (mode);
+ if (ira_register_move_cost[mode] == NULL)
+ ira_init_register_move_cost (mode);
for (k = 0; k < cost_classes_num; k++)
{
class = cost_classes[k];
pp->cost[k]
= ((recog_data.operand_type[i] != OP_OUT
- ? register_may_move_in_cost[mode][class]
+ ? ira_may_move_in_cost[mode][class]
[classes[i]] * frequency : 0)
+ (recog_data.operand_type[i] != OP_IN
- ? register_may_move_out_cost[mode][classes[i]]
+ ? ira_may_move_out_cost[mode][classes[i]]
[class] * frequency : 0));
}
@@ -542,9 +544,9 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
insn to load it. */
pp->mem_cost
= ((recog_data.operand_type[i] != OP_IN
- ? memory_move_cost[mode][classes[i]][0] : 0)
+ ? ira_memory_move_cost[mode][classes[i]][0] : 0)
+ (recog_data.operand_type[i] != OP_OUT
- ? memory_move_cost[mode][classes[i]][1] : 0)
+ ? ira_memory_move_cost[mode][classes[i]][1] : 0)
- allows_mem[i]) * frequency;
/* If we have assigned a class to this allocno in our
first pass, add a cost to this alternative
@@ -562,13 +564,15 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
if (pref_class == NO_REGS)
alt_cost
+= ((recog_data.operand_type[i] != OP_IN
- ? memory_move_cost[mode][classes[i]][0] : 0)
+ ? ira_memory_move_cost[mode][classes[i]][0]
+ : 0)
+ (recog_data.operand_type[i] != OP_OUT
- ? memory_move_cost[mode][classes[i]][1] : 0));
- else if (reg_class_intersect[pref_class][classes[i]]
+ ? ira_memory_move_cost[mode][classes[i]][1]
+ : 0));
+ else if (ira_reg_class_intersect[pref_class][classes[i]]
== NO_REGS)
- alt_cost
- += register_move_cost[mode][pref_class][classes[i]];
+ alt_cost += (ira_register_move_cost
+ [mode][pref_class][classes[i]]);
}
}
}
@@ -596,7 +600,7 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
/* The only other way this alternative can be used is if
this is a constant that could be placed into memory. */
else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
- alt_cost += memory_move_cost[mode][classes[i]][1];
+ alt_cost += ira_memory_move_cost[mode][classes[i]][1];
else
alt_fail = 1;
}
@@ -867,14 +871,14 @@ record_address_regs (enum machine_mode mode, rtx x, int context,
pp = COSTS_OF_ALLOCNO (total_costs,
ALLOCNO_NUM (ira_curr_regno_allocno_map
[REGNO (x)]));
- pp->mem_cost += (memory_move_cost[Pmode][class][1] * scale) / 2;
- if (register_move_cost[Pmode] == NULL)
- init_register_move_cost (Pmode);
+ pp->mem_cost += (ira_memory_move_cost[Pmode][class][1] * scale) / 2;
+ if (ira_register_move_cost[Pmode] == NULL)
+ ira_init_register_move_cost (Pmode);
for (k = 0; k < cost_classes_num; k++)
{
i = cost_classes[k];
pp->cost[k]
- += (register_may_move_in_cost[Pmode][i][class] * scale) / 2;
+ += (ira_may_move_in_cost[Pmode][i][class] * scale) / 2;
}
}
break;
@@ -989,7 +993,7 @@ scan_one_insn (rtx insn)
COSTS_OF_ALLOCNO (total_costs,
ALLOCNO_NUM (ira_curr_regno_allocno_map
[REGNO (SET_DEST (set))]))->mem_cost
- -= (memory_move_cost[GET_MODE (SET_DEST (set))][GENERAL_REGS][1]
+ -= (ira_memory_move_cost[GET_MODE (SET_DEST (set))][GENERAL_REGS][1]
* frequency);
record_address_regs (GET_MODE (SET_SRC (set)), XEXP (SET_SRC (set), 0),
0, MEM, SCRATCH, frequency * 2);
@@ -1024,8 +1028,8 @@ static void
print_costs (FILE *f)
{
int k;
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
fprintf (f, "\n");
FOR_EACH_ALLOCNO (a, ai)
@@ -1063,7 +1067,7 @@ print_costs (FILE *f)
/* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
costs. */
static void
-process_bb_node_for_costs (loop_tree_node_t loop_tree_node)
+process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
{
basic_block bb;
rtx insn;
@@ -1089,7 +1093,7 @@ find_allocno_class_costs (void)
init_recog ();
#ifdef FORBIDDEN_INC_DEC_CLASSES
- in_inc_dec = ira_allocate (sizeof (bool) * allocnos_num);
+ in_inc_dec = ira_allocate (sizeof (bool) * ira_allocnos_num);
#endif /* FORBIDDEN_INC_DEC_CLASSES */
allocno_pref = NULL;
@@ -1106,11 +1110,11 @@ find_allocno_class_costs (void)
work well for some targets where some subclass of cover class
is costly and wrong cover class is chosen. */
for (cost_classes_num = 0;
- cost_classes_num < important_classes_num;
+ cost_classes_num < ira_important_classes_num;
cost_classes_num++)
{
cost_classes[cost_classes_num]
- = important_classes[cost_classes_num];
+ = ira_important_classes[cost_classes_num];
cost_class_nums[cost_classes[cost_classes_num]]
= cost_classes_num;
}
@@ -1118,15 +1122,15 @@ find_allocno_class_costs (void)
= sizeof (struct costs) + sizeof (int) * (cost_classes_num - 1);
/* Zero out our accumulation of the cost of each class for each
allocno. */
- memset (total_costs, 0, allocnos_num * struct_costs_size);
+ memset (total_costs, 0, ira_allocnos_num * struct_costs_size);
#ifdef FORBIDDEN_INC_DEC_CLASSES
- memset (in_inc_dec, 0, allocnos_num * sizeof (bool));
+ memset (in_inc_dec, 0, ira_allocnos_num * sizeof (bool));
#endif
/* Scan the instructions and record each time it would save code
to put a certain allocno in a certain class. */
- traverse_loop_tree (true, ira_loop_tree_root,
- process_bb_node_for_costs, NULL);
+ ira_traverse_loop_tree (true, ira_loop_tree_root,
+ process_bb_node_for_costs, NULL);
if (pass == 0)
allocno_pref = allocno_pref_buffer;
@@ -1135,20 +1139,20 @@ find_allocno_class_costs (void)
find which class is preferred. */
for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
{
- allocno_t a, parent_a;
+ ira_allocno_t a, parent_a;
int class, a_num, parent_a_num;
- loop_tree_node_t parent;
+ ira_loop_tree_node_t parent;
int best_cost;
enum reg_class best, alt_class, common_class;
#ifdef FORBIDDEN_INC_DEC_CLASSES
int inc_dec_p = false;
#endif
- if (regno_allocno_map[i] == NULL)
+ if (ira_regno_allocno_map[i] == NULL)
continue;
memset (temp_costs, 0, struct_costs_size);
/* Find cost of all allocnos with the same regno. */
- for (a = regno_allocno_map[i];
+ for (a = ira_regno_allocno_map[i];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
@@ -1203,7 +1207,7 @@ find_allocno_class_costs (void)
best = (enum reg_class) class;
}
else if (temp_costs->cost[k] == best_cost)
- best = reg_class_union[best][class];
+ best = ira_reg_class_union[best][class];
if (pass == flag_expensive_optimizations
&& temp_costs->cost[k] < temp_costs->mem_cost
&& (reg_class_size[reg_class_subunion[alt_class][class]]
@@ -1228,8 +1232,8 @@ find_allocno_class_costs (void)
/* Make the common class a cover class. Remember all
allocnos with the same regno should have the same cover
class. */
- common_class = class_translate[best];
- for (a = regno_allocno_map[i];
+ common_class = ira_class_translate[best];
+ for (a = ira_regno_allocno_map[i];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
@@ -1245,7 +1249,7 @@ find_allocno_class_costs (void)
for (k = 0; k < cost_classes_num; k++)
{
class = cost_classes[k];
- if (! class_subset_p[class][common_class])
+ if (! ira_class_subset_p[class][common_class])
continue;
/* Ignore classes that are too small for this
operand or invalid for an operand that was
@@ -1269,7 +1273,7 @@ find_allocno_class_costs (void)
}
else if (COSTS_OF_ALLOCNO (total_costs, a_num)->cost[k]
== best_cost)
- best = reg_class_union[best][class];
+ best = ira_reg_class_union[best][class];
}
}
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL
@@ -1283,7 +1287,7 @@ find_allocno_class_costs (void)
ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
fprintf (ira_dump_file, ") best %s, cover %s\n",
reg_class_names[best],
- reg_class_names[class_translate[best]]);
+ reg_class_names[ira_class_translate[best]]);
}
allocno_pref[a_num] = best;
}
@@ -1309,11 +1313,11 @@ find_allocno_class_costs (void)
register are already taken into account in class costs for the
allocno. */
static void
-process_bb_node_for_hard_reg_moves (loop_tree_node_t loop_tree_node)
+process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
{
int i, freq, cost, src_regno, dst_regno, hard_regno;
bool to_p;
- allocno_t a;
+ ira_allocno_t a;
enum reg_class class, cover_class, hard_reg_class;
enum machine_mode mode;
basic_block bb;
@@ -1357,16 +1361,17 @@ process_bb_node_for_hard_reg_moves (loop_tree_node_t loop_tree_node)
class = ALLOCNO_COVER_CLASS (a);
if (! TEST_HARD_REG_BIT (reg_class_contents[class], hard_regno))
continue;
- i = class_hard_reg_index[class][hard_regno];
+ i = ira_class_hard_reg_index[class][hard_regno];
if (i < 0)
continue;
mode = ALLOCNO_MODE (a);
hard_reg_class = REGNO_REG_CLASS (hard_regno);
- cost = (to_p ? register_move_cost[mode][hard_reg_class][class]
- : register_move_cost[mode][class][hard_reg_class]) * freq;
- allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), class,
- ALLOCNO_COVER_CLASS_COST (a));
- allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a), class, 0);
+ cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][class]
+ : ira_register_move_cost[mode][class][hard_reg_class]) * freq;
+ ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), class,
+ ALLOCNO_COVER_CLASS_COST (a));
+ ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
+ class, 0);
ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
ALLOCNO_COVER_CLASS_COST (a) = MIN (ALLOCNO_COVER_CLASS_COST (a),
@@ -1376,7 +1381,7 @@ process_bb_node_for_hard_reg_moves (loop_tree_node_t loop_tree_node)
{
/* Propagate changes to the upper levels in the region
tree. */
- loop_tree_node_t parent;
+ ira_loop_tree_node_t parent;
int regno = ALLOCNO_REGNO (a);
for (;;)
@@ -1386,11 +1391,11 @@ process_bb_node_for_hard_reg_moves (loop_tree_node_t loop_tree_node)
if ((a = parent->regno_allocno_map[regno]) == NULL)
break;
cover_class = ALLOCNO_COVER_CLASS (a);
- allocate_and_set_costs
+ ira_allocate_and_set_costs
(&ALLOCNO_HARD_REG_COSTS (a), cover_class,
ALLOCNO_COVER_CLASS_COST (a));
- allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
- cover_class, 0);
+ ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
+ cover_class, 0);
ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
ALLOCNO_COVER_CLASS_COST (a)
@@ -1411,32 +1416,32 @@ setup_allocno_cover_class_and_costs (void)
int *reg_costs;
enum reg_class cover_class, class;
enum machine_mode mode;
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
FOR_EACH_ALLOCNO (a, ai)
{
i = ALLOCNO_NUM (a);
mode = ALLOCNO_MODE (a);
- cover_class = class_translate[allocno_pref[i]];
+ cover_class = ira_class_translate[allocno_pref[i]];
ira_assert (allocno_pref[i] == NO_REGS || cover_class != NO_REGS);
ALLOCNO_MEMORY_COST (a) = ALLOCNO_UPDATED_MEMORY_COST (a)
= COSTS_OF_ALLOCNO (total_costs, i)->mem_cost;
- set_allocno_cover_class (a, cover_class);
+ ira_set_allocno_cover_class (a, cover_class);
if (cover_class == NO_REGS)
continue;
- ALLOCNO_AVAILABLE_REGS_NUM (a) = available_class_regs[cover_class];
+ ALLOCNO_AVAILABLE_REGS_NUM (a) = ira_available_class_regs[cover_class];
ALLOCNO_COVER_CLASS_COST (a)
= (COSTS_OF_ALLOCNO (total_costs, i)
->cost[cost_class_nums[allocno_pref[i]]]);
if (optimize && ALLOCNO_COVER_CLASS (a) != allocno_pref[i])
{
- n = class_hard_regs_num[cover_class];
+ n = ira_class_hard_regs_num[cover_class];
ALLOCNO_HARD_REG_COSTS (a)
- = reg_costs = allocate_cost_vector (cover_class);
+ = reg_costs = ira_allocate_cost_vector (cover_class);
for (j = n - 1; j >= 0; j--)
{
- regno = class_hard_regs[cover_class][j];
+ regno = ira_class_hard_regs[cover_class][j];
class = REGNO_REG_CLASS (regno);
reg_costs[j] = (COSTS_OF_ALLOCNO (total_costs, i)
->cost[cost_class_nums[class]]);
@@ -1444,15 +1449,15 @@ setup_allocno_cover_class_and_costs (void)
}
}
if (optimize)
- traverse_loop_tree (true, ira_loop_tree_root,
- process_bb_node_for_hard_reg_moves, NULL);
+ ira_traverse_loop_tree (true, ira_loop_tree_root,
+ process_bb_node_for_hard_reg_moves, NULL);
}
/* Function called once during compiler work. */
void
-init_ira_costs_once (void)
+ira_init_costs_once (void)
{
int i;
@@ -1494,17 +1499,17 @@ free_ira_costs (void)
/* This is called each time register related information is
changed. */
void
-init_ira_costs (void)
+ira_init_costs (void)
{
int i;
free_ira_costs ();
max_struct_costs_size
- = sizeof (struct costs) + sizeof (int) * (important_classes_num - 1);
+ = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
/* Don't use ira_allocate because vectors live through several IRA calls. */
init_cost = xmalloc (max_struct_costs_size);
init_cost->mem_cost = 1000000;
- for (i = 0; i < important_classes_num; i++)
+ for (i = 0; i < ira_important_classes_num; i++)
init_cost->cost[i] = 1000000;
for (i = 0; i < MAX_RECOG_OPERANDS; i++)
{
@@ -1512,12 +1517,12 @@ init_ira_costs (void)
this_op_costs[i] = xmalloc (max_struct_costs_size);
}
temp_costs = xmalloc (max_struct_costs_size);
- cost_classes = xmalloc (sizeof (enum reg_class) * important_classes_num);
+ cost_classes = xmalloc (sizeof (enum reg_class) * ira_important_classes_num);
}
/* Function called once at the end of compiler work. */
void
-finish_ira_costs_once (void)
+ira_finish_costs_once (void)
{
free_ira_costs ();
}
@@ -1529,19 +1534,20 @@ finish_ira_costs_once (void)
void
ira_costs (void)
{
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
- total_costs = ira_allocate (max_struct_costs_size * allocnos_num);
- allocno_pref_buffer = ira_allocate (sizeof (enum reg_class) * allocnos_num);
+ total_costs = ira_allocate (max_struct_costs_size * ira_allocnos_num);
+ allocno_pref_buffer
+ = ira_allocate (sizeof (enum reg_class) * ira_allocnos_num);
find_allocno_class_costs ();
setup_allocno_cover_class_and_costs ();
/* Because we could process operands only as subregs, check mode of
the registers themselves too. */
FOR_EACH_ALLOCNO (a, ai)
- if (register_move_cost[ALLOCNO_MODE (a)] == NULL
+ if (ira_register_move_cost[ALLOCNO_MODE (a)] == NULL
&& have_regs_of_mode[ALLOCNO_MODE (a)])
- init_register_move_cost (ALLOCNO_MODE (a));
+ ira_init_register_move_cost (ALLOCNO_MODE (a));
ira_free (allocno_pref_buffer);
ira_free (total_costs);
}
@@ -1552,16 +1558,16 @@ ira_costs (void)
function calls. This is called only when we found all intersected
calls during building allocno live ranges. */
void
-tune_allocno_costs_and_cover_classes (void)
+ira_tune_allocno_costs_and_cover_classes (void)
{
int j, k, n, regno, freq;
int cost, min_cost, *reg_costs;
enum reg_class cover_class, class;
enum machine_mode mode;
- allocno_t a;
+ ira_allocno_t a;
rtx call, *allocno_calls;
HARD_REG_SET clobbered_regs;
- allocno_iterator ai;
+ ira_allocno_iterator ai;
FOR_EACH_ALLOCNO (a, ai)
{
@@ -1569,33 +1575,33 @@ tune_allocno_costs_and_cover_classes (void)
if (cover_class == NO_REGS)
continue;
mode = ALLOCNO_MODE (a);
- n = class_hard_regs_num[cover_class];
+ n = ira_class_hard_regs_num[cover_class];
min_cost = INT_MAX;
if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
{
- allocate_and_set_costs
+ ira_allocate_and_set_costs
(&ALLOCNO_HARD_REG_COSTS (a), cover_class,
ALLOCNO_COVER_CLASS_COST (a));
reg_costs = ALLOCNO_HARD_REG_COSTS (a);
for (j = n - 1; j >= 0; j--)
{
- regno = class_hard_regs[cover_class][j];
+ regno = ira_class_hard_regs[cover_class][j];
class = REGNO_REG_CLASS (regno);
cost = 0;
if (! flag_ira_ipra)
{
/* ??? If only part is call clobbered. */
- if (! hard_reg_not_in_set_p (regno, mode, call_used_reg_set))
+ if (! ira_hard_reg_not_in_set_p (regno, mode, call_used_reg_set))
{
cost += (ALLOCNO_CALL_FREQ (a)
- * (memory_move_cost[mode][class][0]
- + memory_move_cost[mode][class][1]));
+ * (ira_memory_move_cost[mode][class][0]
+ + ira_memory_move_cost[mode][class][1]));
}
}
else
{
allocno_calls
- = (VEC_address (rtx, regno_calls[ALLOCNO_REGNO (a)])
+ = (VEC_address (rtx, ira_regno_calls[ALLOCNO_REGNO (a)])
+ ALLOCNO_CALLS_CROSSED_START (a));
ira_assert (allocno_calls != NULL);
for (k = ALLOCNO_CALLS_CROSSED_NUM (a) - 1; k >= 0; k--)
@@ -1607,16 +1613,17 @@ tune_allocno_costs_and_cover_classes (void)
get_call_invalidated_used_regs (call, &clobbered_regs,
false);
/* ??? If only part is call clobbered. */
- if (! hard_reg_not_in_set_p (regno, mode,
- clobbered_regs))
+ if (! ira_hard_reg_not_in_set_p (regno, mode,
+ clobbered_regs))
cost
- += freq * (memory_move_cost[mode][class][0]
- + memory_move_cost[mode][class][1]);
+ += freq * (ira_memory_move_cost[mode][class][0]
+ + ira_memory_move_cost[mode][class][1]);
}
}
#ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
- cost += ((memory_move_cost[mode][class][0]
- + memory_move_cost[mode][class][1]) * ALLOCNO_FREQ (a)
+ cost += ((ira_memory_move_cost[mode][class][0]
+ + ira_memory_move_cost[mode][class][1])
+ * ALLOCNO_FREQ (a)
* IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
#endif
reg_costs[j] += cost;
diff --git a/gcc/ira-emit.c b/gcc/ira-emit.c
index 55a444dceed..83036bf6cac 100644
--- a/gcc/ira-emit.c
+++ b/gcc/ira-emit.c
@@ -45,12 +45,12 @@ along with GCC; see the file COPYING3. If not see
#include "ira-int.h"
-/* The structure represents an allocno move. The both allocnos have
- the same origional regno but different allocation. */
+/* The structure represents an allocno move. Both allocnos have the
+ same origional regno but different allocation. */
struct move
{
/* The allocnos involved in the move. */
- allocno_t from, to;
+ ira_allocno_t from, to;
/* The next move in the move sequence. */
struct move *next;
/* Used for finding dependencies. */
@@ -78,7 +78,7 @@ static int max_regno_before_changing;
/* Return new move of allocnos TO and FROM. */
static struct move *
-create_move (allocno_t to, allocno_t from)
+create_move (ira_allocno_t to, ira_allocno_t from)
{
struct move *move;
@@ -208,10 +208,10 @@ create_new_reg (rtx original_reg)
return new_reg;
}
-/* Eeturn TRUE if loop given by SUBNODE inside the loop given by
+/* Return TRUE if loop given by SUBNODE inside the loop given by
NODE. */
static bool
-subloop_tree_node_p (loop_tree_node_t subnode, loop_tree_node_t node)
+subloop_tree_node_p (ira_loop_tree_node_t subnode, ira_loop_tree_node_t node)
{
for (; subnode != NULL; subnode = subnode->parent)
if (subnode == node)
@@ -222,14 +222,14 @@ subloop_tree_node_p (loop_tree_node_t subnode, loop_tree_node_t node)
/* Set up member `reg' to REG for allocnos which has the same regno as
ALLOCNO and which are inside the loop corresponding to ALLOCNO. */
static void
-set_allocno_reg (allocno_t allocno, rtx reg)
+set_allocno_reg (ira_allocno_t allocno, rtx reg)
{
int regno;
- allocno_t a;
- loop_tree_node_t node;
+ ira_allocno_t a;
+ ira_loop_tree_node_t node;
node = ALLOCNO_LOOP_TREE_NODE (allocno);
- for (a = regno_allocno_map[ALLOCNO_REGNO (allocno)];
+ for (a = ira_regno_allocno_map[ALLOCNO_REGNO (allocno)];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
if (subloop_tree_node_p (ALLOCNO_LOOP_TREE_NODE (a), node))
@@ -259,11 +259,11 @@ set_allocno_reg (allocno_t allocno, rtx reg)
situation when SRC_ALLOCNO is not modified in the corresponding
loop. */
static bool
-not_modified_p (allocno_t src_allocno, allocno_t dest_allocno)
+not_modified_p (ira_allocno_t src_allocno, ira_allocno_t dest_allocno)
{
int regno, orig_regno;
- allocno_t a;
- loop_tree_node_t node;
+ ira_allocno_t a;
+ ira_loop_tree_node_t node;
orig_regno = ALLOCNO_REGNO (src_allocno);
regno = REGNO (ALLOCNO_REG (dest_allocno));
@@ -285,10 +285,10 @@ not_modified_p (allocno_t src_allocno, allocno_t dest_allocno)
static void
generate_edge_moves (edge e)
{
- loop_tree_node_t src_loop_node, dest_loop_node;
+ ira_loop_tree_node_t src_loop_node, dest_loop_node;
unsigned int regno;
bitmap_iterator bi;
- allocno_t src_allocno, dest_allocno, *src_map, *dest_map;
+ ira_allocno_t src_allocno, dest_allocno, *src_map, *dest_map;
struct move *move;
src_loop_node = IRA_BB_NODE (e->src)->parent;
@@ -343,16 +343,16 @@ static bitmap renamed_regno_bitmap;
/* Change (if necessary) pseudo-registers inside loop given by loop
tree node NODE. */
static void
-change_loop (loop_tree_node_t node)
+change_loop (ira_loop_tree_node_t node)
{
bitmap_iterator bi;
unsigned int i;
int regno;
bool used_p;
- allocno_t allocno, parent_allocno, *map;
+ ira_allocno_t allocno, parent_allocno, *map;
rtx insn, original_reg;
enum reg_class cover_class;
- loop_tree_node_t parent;
+ ira_loop_tree_node_t parent;
if (node != ira_loop_tree_root)
{
@@ -378,11 +378,11 @@ change_loop (loop_tree_node_t node)
EXECUTE_IF_SET_IN_REG_SET (ira_curr_loop_tree_node->border_allocnos,
0, i, bi)
{
- allocno = allocnos[i];
+ allocno = ira_allocnos[i];
regno = ALLOCNO_REGNO (allocno);
cover_class = ALLOCNO_COVER_CLASS (allocno);
parent_allocno = map[regno];
- ira_assert (regno < reg_equiv_len);
+ ira_assert (regno < ira_reg_equiv_len);
/* We generate the same hard register move because the
reload pass can put an allocno into memory in this case
we will have live range splitting. If it does not happen
@@ -394,15 +394,15 @@ change_loop (loop_tree_node_t node)
== ALLOCNO_HARD_REGNO (parent_allocno))
&& (ALLOCNO_HARD_REGNO (allocno) < 0
|| (parent->reg_pressure[cover_class] + 1
- <= available_class_regs[cover_class])
- || TEST_HARD_REG_BIT (prohibited_mode_move_regs
+ <= ira_available_class_regs[cover_class])
+ || TEST_HARD_REG_BIT (ira_prohibited_mode_move_regs
[ALLOCNO_MODE (allocno)],
ALLOCNO_HARD_REGNO (allocno))
/* don't create copies because reload can spill an
allocno set by copy although the allocno will not
get memory slot. */
- || reg_equiv_invariant_p[regno]
- || reg_equiv_const[regno] != NULL_RTX))
+ || ira_reg_equiv_invariant_p[regno]
+ || ira_reg_equiv_const[regno] != NULL_RTX))
continue;
original_reg = ALLOCNO_REG (allocno);
if (parent_allocno == NULL
@@ -424,7 +424,7 @@ change_loop (loop_tree_node_t node)
ira_curr_loop_tree_node->border_allocnos);
EXECUTE_IF_SET_IN_REG_SET (local_allocno_bitmap, 0, i, bi)
{
- allocno = allocnos[i];
+ allocno = ira_allocnos[i];
regno = ALLOCNO_REGNO (allocno);
if (ALLOCNO_CAP_MEMBER (allocno) != NULL)
continue;
@@ -443,8 +443,8 @@ static void
set_allocno_somewhere_renamed_p (void)
{
unsigned int regno;
- allocno_t allocno;
- allocno_iterator ai;
+ ira_allocno_t allocno;
+ ira_allocno_iterator ai;
FOR_EACH_ALLOCNO (allocno, ai)
{
@@ -555,7 +555,7 @@ static move_t
modify_move_list (move_t list)
{
int i, n, nregs, hard_regno;
- allocno_t to, from, new_allocno;
+ ira_allocno_t to, from, new_allocno;
move_t move, new_move, set_move, first, last;
if (list == NULL)
@@ -630,11 +630,11 @@ modify_move_list (move_t list)
subsequent IRA internal representation
flattening. */
new_allocno
- = create_allocno (ALLOCNO_REGNO (set_move->to), false,
- ALLOCNO_LOOP_TREE_NODE (set_move->to));
+ = ira_create_allocno (ALLOCNO_REGNO (set_move->to), false,
+ ALLOCNO_LOOP_TREE_NODE (set_move->to));
ALLOCNO_MODE (new_allocno) = ALLOCNO_MODE (set_move->to);
- set_allocno_cover_class (new_allocno,
- ALLOCNO_COVER_CLASS (set_move->to));
+ ira_set_allocno_cover_class
+ (new_allocno, ALLOCNO_COVER_CLASS (set_move->to));
ALLOCNO_ASSIGNED_P (new_allocno) = true;
ALLOCNO_HARD_REGNO (new_allocno) = -1;
ALLOCNO_REG (new_allocno)
@@ -644,11 +644,11 @@ modify_move_list (move_t list)
created allocnos. Cases where temporary allocnos
created to remove the cycles are quite rare. */
ALLOCNO_MIN (new_allocno) = 0;
- ALLOCNO_MAX (new_allocno) = allocnos_num - 1;
+ ALLOCNO_MAX (new_allocno) = ira_allocnos_num - 1;
new_move = create_move (set_move->to, new_allocno);
set_move->to = new_allocno;
VEC_safe_push (move_t, heap, move_vec, new_move);
- move_loops_num++;
+ ira_move_loops_num++;
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
fprintf (ira_dump_file,
" Creating temporary allocno a%dr%d\n",
@@ -706,24 +706,24 @@ emit_move_list (move_t list, int freq)
{
if (ALLOCNO_HARD_REGNO (list->from) >= 0)
{
- cost = memory_move_cost[mode][cover_class][0] * freq;
- store_cost += cost;
+ cost = ira_memory_move_cost[mode][cover_class][0] * freq;
+ ira_store_cost += cost;
}
}
else if (ALLOCNO_HARD_REGNO (list->from) < 0)
{
if (ALLOCNO_HARD_REGNO (list->to) >= 0)
{
- cost = memory_move_cost[mode][cover_class][0] * freq;
- load_cost += cost;
+ cost = ira_memory_move_cost[mode][cover_class][0] * freq;
+ ira_load_cost += cost;
}
}
else
{
- cost = register_move_cost[mode][cover_class][cover_class] * freq;
- shuffle_cost += cost;
+ cost = ira_register_move_cost[mode][cover_class][cover_class] * freq;
+ ira_shuffle_cost += cost;
}
- overall_cost += cost;
+ ira_overall_cost += cost;
}
result = get_insns ();
end_sequence ();
@@ -780,7 +780,7 @@ emit_moves (void)
REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e))),
e);
if (e->src->next_bb != e->dest)
- additional_jumps_num++;
+ ira_additional_jumps_num++;
}
}
}
@@ -789,16 +789,16 @@ emit_moves (void)
loop tree from reading (if READ_P) or writing A on an execution
path with FREQ. */
static void
-update_costs (allocno_t a, bool read_p, int freq)
+update_costs (ira_allocno_t a, bool read_p, int freq)
{
- loop_tree_node_t parent;
+ ira_loop_tree_node_t parent;
for (;;)
{
ALLOCNO_NREFS (a)++;
ALLOCNO_FREQ (a) += freq;
ALLOCNO_MEMORY_COST (a)
- += (memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)]
+ += (ira_memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)]
[read_p ? 1 : 0] * freq);
if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
|| (a = parent->regno_allocno_map[ALLOCNO_REGNO (a)]) == NULL)
@@ -811,14 +811,14 @@ update_costs (allocno_t a, bool read_p, int freq)
living through the list is in LIVE_THROUGH, and the loop tree node
used to find corresponding allocnos is NODE. */
static void
-add_range_and_copies_from_move_list (move_t list, loop_tree_node_t node,
+add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
bitmap live_through, int freq)
{
int start, n;
unsigned int regno;
move_t move;
- allocno_t to, from, a;
- copy_t cp;
+ ira_allocno_t to, from, a;
+ ira_copy_t cp;
allocno_live_range_t r;
bitmap_iterator bi;
HARD_REG_SET hard_regs_live;
@@ -831,8 +831,8 @@ add_range_and_copies_from_move_list (move_t list, loop_tree_node_t node,
REG_SET_TO_HARD_REG_SET (hard_regs_live, live_through);
/* This is a trick to guarantee that new ranges is not merged with
the old ones. */
- max_point++;
- start = max_point;
+ ira_max_point++;
+ start = ira_max_point;
for (move = list; move != NULL; move = move->next)
{
from = move->from;
@@ -842,18 +842,19 @@ add_range_and_copies_from_move_list (move_t list, loop_tree_node_t node,
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Allocate conflicts for a%dr%d\n",
ALLOCNO_NUM (to), REGNO (ALLOCNO_REG (to)));
- allocate_allocno_conflicts (to, n);
+ ira_allocate_allocno_conflicts (to, n);
}
bitmap_clear_bit (live_through, ALLOCNO_REGNO (from));
bitmap_clear_bit (live_through, ALLOCNO_REGNO (to));
IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (from), hard_regs_live);
IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (to), hard_regs_live);
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (from),
+ IOR_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (from),
+ hard_regs_live);
+ IOR_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (to),
hard_regs_live);
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (to), hard_regs_live);
update_costs (from, true, freq);
update_costs (to, false, freq);
- cp = add_allocno_copy (from, to, freq, move->insn, NULL);
+ cp = ira_add_allocno_copy (from, to, freq, move->insn, NULL);
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Adding cp%d:a%dr%d-a%dr%d\n",
cp->num, ALLOCNO_NUM (cp->first),
@@ -863,27 +864,27 @@ add_range_and_copies_from_move_list (move_t list, loop_tree_node_t node,
if (r == NULL || r->finish >= 0)
{
ALLOCNO_LIVE_RANGES (from)
- = create_allocno_live_range (from, start, max_point, r);
+ = ira_create_allocno_live_range (from, start, ira_max_point, r);
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
fprintf (ira_dump_file,
" Adding range [%d..%d] to allocno a%dr%d\n",
- start, max_point, ALLOCNO_NUM (from),
+ start, ira_max_point, ALLOCNO_NUM (from),
REGNO (ALLOCNO_REG (from)));
}
else
- r->finish = max_point;
- max_point++;
+ r->finish = ira_max_point;
+ ira_max_point++;
ALLOCNO_LIVE_RANGES (to)
- = create_allocno_live_range (to, max_point, -1,
- ALLOCNO_LIVE_RANGES (to));
- max_point++;
+ = ira_create_allocno_live_range (to, ira_max_point, -1,
+ ALLOCNO_LIVE_RANGES (to));
+ ira_max_point++;
}
for (move = list; move != NULL; move = move->next)
{
r = ALLOCNO_LIVE_RANGES (move->to);
if (r->finish < 0)
{
- r->finish = max_point - 1;
+ r->finish = ira_max_point - 1;
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
fprintf (ira_dump_file,
" Adding range [%d..%d] to allocno a%dr%d\n",
@@ -897,13 +898,14 @@ add_range_and_copies_from_move_list (move_t list, loop_tree_node_t node,
if (ALLOCNO_MEM_OPTIMIZED_DEST (a) == NULL)
{
ALLOCNO_LIVE_RANGES (a)
- = create_allocno_live_range (a, start, max_point - 1,
- ALLOCNO_LIVE_RANGES (a));
+ = ira_create_allocno_live_range (a, start, ira_max_point - 1,
+ ALLOCNO_LIVE_RANGES (a));
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
fprintf
(ira_dump_file,
" Adding range [%d..%d] to live through allocno a%dr%d\n",
- start, max_point - 1, ALLOCNO_NUM (a), REGNO (ALLOCNO_REG (a)));
+ start, ira_max_point - 1, ALLOCNO_NUM (a),
+ REGNO (ALLOCNO_REG (a)));
}
}
}
@@ -916,7 +918,7 @@ add_ranges_and_copies (void)
basic_block bb;
edge_iterator ei;
edge e;
- loop_tree_node_t node;
+ ira_loop_tree_node_t node;
bitmap live_through;
live_through = ira_allocate_bitmap ();
@@ -952,8 +954,8 @@ ira_emit (bool loops_p)
basic_block bb;
edge_iterator ei;
edge e;
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
FOR_EACH_ALLOCNO (a, ai)
ALLOCNO_REG (a) = regno_reg_rtx[ALLOCNO_REGNO (a)];
@@ -967,7 +969,7 @@ ira_emit (bool loops_p)
used_regno_bitmap = ira_allocate_bitmap ();
renamed_regno_bitmap = ira_allocate_bitmap ();
max_regno_before_changing = max_reg_num ();
- traverse_loop_tree (true, ira_loop_tree_root, change_loop, NULL);
+ ira_traverse_loop_tree (true, ira_loop_tree_root, change_loop, NULL);
set_allocno_somewhere_renamed_p ();
ira_free_bitmap (used_regno_bitmap);
ira_free_bitmap (renamed_regno_bitmap);
@@ -989,7 +991,7 @@ ira_emit (bool loops_p)
unify_moves (bb, true);
FOR_EACH_BB (bb)
unify_moves (bb, false);
- move_vec = VEC_alloc (move_t, heap, allocnos_num);
+ move_vec = VEC_alloc (move_t, heap, ira_allocnos_num);
emit_moves ();
add_ranges_and_copies ();
/* Clean up: */
diff --git a/gcc/ira-int.h b/gcc/ira-int.h
index d1f5587fe02..6383a8d61e5 100644
--- a/gcc/ira-int.h
+++ b/gcc/ira-int.h
@@ -23,6 +23,9 @@ along with GCC; see the file COPYING3. If not see
#include "ira.h"
#include "alloc-pool.h"
+/* To provide consistency in naming, all IRA external variables,
+ functions, common typedefs start with prefix ira_. */
+
#ifdef ENABLE_CHECKING
#define ENABLE_IRA_CHECKING
#endif
@@ -54,18 +57,18 @@ extern FILE *ira_dump_file;
/* Typedefs for pointers to allocno live range, allocno, and copy of
allocnos. */
-typedef struct allocno_live_range *allocno_live_range_t;
-typedef struct allocno *allocno_t;
-typedef struct allocno_copy *copy_t;
+typedef struct ira_allocno_live_range *allocno_live_range_t;
+typedef struct ira_allocno *ira_allocno_t;
+typedef struct ira_allocno_copy *ira_copy_t;
/* Definition of vector of allocnos and copies. */
-DEF_VEC_P(allocno_t);
-DEF_VEC_ALLOC_P(allocno_t, heap);
-DEF_VEC_P(copy_t);
-DEF_VEC_ALLOC_P(copy_t, heap);
+DEF_VEC_P(ira_allocno_t);
+DEF_VEC_ALLOC_P(ira_allocno_t, heap);
+DEF_VEC_P(ira_copy_t);
+DEF_VEC_ALLOC_P(ira_copy_t, heap);
/* Typedef for pointer to the subsequent structure. */
-typedef struct loop_tree_node *loop_tree_node_t;
+typedef struct ira_loop_tree_node *ira_loop_tree_node_t;
/* In general case, IRA is a regional allocator. The regions are
nested and form a tree. Currently regions are natural loops. The
@@ -75,19 +78,19 @@ typedef struct loop_tree_node *loop_tree_node_t;
not a part of the tree from cfgloop.h. We also use the nodes for
storing additional information about basic blocks/loops for the
register allocation purposes. */
-struct loop_tree_node
+struct ira_loop_tree_node
{
/* The node represents basic block if children == NULL. */
basic_block bb; /* NULL for loop. */
struct loop *loop; /* NULL for BB. */
/* The next (loop) node of with the same parent. SUBLOOP_NEXT is
always NULL for BBs. */
- loop_tree_node_t subloop_next, next;
+ ira_loop_tree_node_t subloop_next, next;
/* The first (loop) node immediately inside the node. SUBLOOPS is
always NULL for BBs. */
- loop_tree_node_t subloops, children;
+ ira_loop_tree_node_t subloops, children;
/* The node immediately containing given node. */
- loop_tree_node_t parent;
+ ira_loop_tree_node_t parent;
/* Loop level in range [0, ira_loop_tree_height). */
int level;
@@ -102,7 +105,7 @@ struct loop_tree_node
allocation is used for a pseudo-register on different sides of
the edges). Caps are not in the map (remember we can have more
one cap with the same regno in a region). */
- allocno_t *regno_allocno_map;
+ ira_allocno_t *regno_allocno_map;
/* Maximal register pressure inside loop for given register class
(defined only for the cover classes). */
@@ -123,7 +126,7 @@ struct loop_tree_node
};
/* The root of the loop tree corresponding to the all function. */
-extern loop_tree_node_t ira_loop_tree_root;
+extern ira_loop_tree_node_t ira_loop_tree_root;
/* Height of the loop tree. */
extern int ira_loop_tree_height;
@@ -131,12 +134,12 @@ extern int ira_loop_tree_height;
/* All nodes representing basic blocks are referred through the
following array. We can not use basic block member `aux' for this
because it is used for insertion of insns on edges. */
-extern loop_tree_node_t ira_bb_nodes;
+extern ira_loop_tree_node_t ira_bb_nodes;
/* Two access macros to the nodes representing basic blocks. */
#if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
#define IRA_BB_NODE_BY_INDEX(index) __extension__ \
-(({ loop_tree_node_t _node = (&ira_bb_nodes[index]); \
+(({ ira_loop_tree_node_t _node = (&ira_bb_nodes[index]); \
if (_node->children != NULL || _node->loop != NULL || _node->bb == NULL)\
{ \
fprintf (stderr, \
@@ -153,12 +156,12 @@ extern loop_tree_node_t ira_bb_nodes;
/* All nodes representing loops are referred through the following
array. */
-extern loop_tree_node_t ira_loop_nodes;
+extern ira_loop_tree_node_t ira_loop_nodes;
/* Two access macros to the nodes representing loops. */
#if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
#define IRA_LOOP_NODE_BY_INDEX(index) __extension__ \
-(({ loop_tree_node_t const _node = (&ira_loop_nodes[index]);\
+(({ ira_loop_tree_node_t const _node = (&ira_loop_nodes[index]);\
if (_node->children == NULL || _node->bb != NULL || _node->loop == NULL)\
{ \
fprintf (stderr, \
@@ -181,10 +184,10 @@ extern loop_tree_node_t ira_loop_nodes;
conflicts for other allocnos (e.g. to assign stack memory slot) we
use the live ranges. If the live ranges of two allocnos are
intersected, the allocnos are in conflict. */
-struct allocno_live_range
+struct ira_allocno_live_range
{
/* Allocno whose live range is described by given structure. */
- allocno_t allocno;
+ ira_allocno_t allocno;
/* Program point range. */
int start, finish;
/* Next structure describing program points where the allocno
@@ -194,16 +197,16 @@ struct allocno_live_range
allocno_live_range_t start_next, finish_next;
};
-/* Program points are enumerated by number from range 0..MAX_POINT-1.
- There are approximately two times more program points than insns.
- One program points correspond points between subsequent insns and
- other ones correspond to points after usage of input operands but
- before setting the output operands in insns. */
-extern int max_point;
+/* Program points are enumerated by number from range
+ 0..IRA_MAX_POINT-1. There are approximately two times more program
+ points than insns. One program points correspond points between
+ subsequent insns and other ones correspond to points after usage of
+ input operands but before setting the output operands in insns. */
+extern int ira_max_point;
-/* Arrays of size MAX_POINT mapping a program point to the allocno
+/* Arrays of size IRA_MAX_POINT mapping a program point to the allocno
live ranges with given start/finish point. */
-extern allocno_live_range_t *start_point_ranges, *finish_point_ranges;
+extern allocno_live_range_t *ira_start_point_ranges, *ira_finish_point_ranges;
/* A structure representing an allocno (allocation entity). Allocno
represents a pseudo-register in an allocation region. If
@@ -212,7 +215,7 @@ extern allocno_live_range_t *start_point_ranges, *finish_point_ranges;
called *cap*. There may be more one cap representing the same
pseudo-register in region. It means that the corresponding
pseudo-register lives in more one non-intersected subregion. */
-struct allocno
+struct ira_allocno
{
/* The allocno order number starting with 0. Each allocno has an
unique number and the number is never changed for the
@@ -235,12 +238,12 @@ struct allocno
/* Allocnos with the same regno are linked by the following member.
Allocnos corresponding to inner loops are first in the list (it
corresponds to depth-first traverse of the loops). */
- allocno_t next_regno_allocno;
+ ira_allocno_t next_regno_allocno;
/* There may be different allocnos with the same regno in different
regions. Allocnos are bound to the corresponding loop tree node.
Pseudo-register may have only one regular allocno with given loop
tree node but more than one cap (see comments above). */
- loop_tree_node_t loop_tree_node;
+ ira_loop_tree_node_t loop_tree_node;
/* Accumulated usage references of the allocno. Here and below,
word 'accumulated' means info for given region and all nested
subregions. In this case, 'accumulated' means sum of references
@@ -272,18 +275,18 @@ struct allocno
/* Copies to other non-conflicting allocnos. The copies can
represent move insn or potential move insn usually because of two
operand insn constraints. */
- copy_t allocno_copies;
+ ira_copy_t allocno_copies;
/* It is a allocno (cap) representing given allocno on upper loop tree
level. */
- allocno_t cap;
+ ira_allocno_t cap;
/* It is a link to allocno (cap) on lower loop level represented by
given cap. Null if given allocno is not a cap. */
- allocno_t cap_member;
+ ira_allocno_t cap_member;
/* Coalesced allocnos form a cyclic list. One allocno given by
FIRST_COALESCED_ALLOCNO represents all coalesced allocnos. The
list is chained by NEXT_COALESCED_ALLOCNO. */
- allocno_t first_coalesced_allocno;
- allocno_t next_coalesced_allocno;
+ ira_allocno_t first_coalesced_allocno;
+ ira_allocno_t next_coalesced_allocno;
/* Pointer to structures describing at what program point the
allocno lives. We always maintain the list in such way that *the
ranges in the list are not intersected and ordered by decreasing
@@ -317,14 +320,14 @@ struct allocno
intersects. */
int call_freq;
/* Start index of calls intersected by the allocno in array
- regno_calls[regno]. */
+ ira_regno_calls[regno]. */
int calls_crossed_start;
/* Length of the previous array (number of the intersected calls). */
int calls_crossed_num;
/* Non NULL if we remove restoring value from given allocno to
MEM_OPTIMIZED_DEST at loop exit (see ira-emit.c) because the
allocno value is not changed inside the loop. */
- allocno_t mem_optimized_dest;
+ ira_allocno_t mem_optimized_dest;
/* TRUE if the allocno assigned to memory was a destination of
removed move (see ira-emit.c) at loop exit because the value of
the corresponding pseudo-register is not changed inside the
@@ -396,8 +399,8 @@ struct allocno
int available_regs_num;
/* Allocnos in a bucket (used in coloring) chained by the following
two members. */
- allocno_t next_bucket_allocno;
- allocno_t prev_bucket_allocno;
+ ira_allocno_t next_bucket_allocno;
+ ira_allocno_t prev_bucket_allocno;
/* Used for temporary purposes. */
int temp;
};
@@ -417,7 +420,7 @@ struct allocno
#define ALLOCNO_CONFLICT_ALLOCNOS_NUM(A) \
((A)->conflict_allocnos_num)
#define ALLOCNO_CONFLICT_HARD_REGS(A) ((A)->conflict_hard_regs)
-#define ALLOCNO_TOTAL_CONFLICT_HARD_REGS(A) ((A)->total_conflict_hard_regs)
+#define IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS(A) ((A)->total_conflict_hard_regs)
#define ALLOCNO_NREFS(A) ((A)->nrefs)
#define ALLOCNO_FREQ(A) ((A)->freq)
#define ALLOCNO_HARD_REGNO(A) ((A)->hard_regno)
@@ -431,7 +434,7 @@ struct allocno
#define ALLOCNO_DONT_REASSIGN_P(A) ((A)->dont_reassign_p)
#ifdef STACK_REGS
#define ALLOCNO_NO_STACK_REG_P(A) ((A)->no_stack_reg_p)
-#define ALLOCNO_TOTAL_NO_STACK_REG_P(A) ((A)->total_no_stack_reg_p)
+#define IRA_ALLOCNO_TOTAL_NO_STACK_REG_P(A) ((A)->total_no_stack_reg_p)
#endif
#define ALLOCNO_IN_GRAPH_P(A) ((A)->in_graph_p)
#define ALLOCNO_ASSIGNED_P(A) ((A)->assigned_p)
@@ -455,7 +458,7 @@ struct allocno
#define ALLOCNO_AVAILABLE_REGS_NUM(A) ((A)->available_regs_num)
#define ALLOCNO_NEXT_BUCKET_ALLOCNO(A) ((A)->next_bucket_allocno)
#define ALLOCNO_PREV_BUCKET_ALLOCNO(A) ((A)->prev_bucket_allocno)
-#define ALLOCNO_TEMP(A) ((A)->temp)
+#define IRA_ALLOCNO_TEMP(A) ((A)->temp)
#define ALLOCNO_FIRST_COALESCED_ALLOCNO(A) ((A)->first_coalesced_allocno)
#define ALLOCNO_NEXT_COALESCED_ALLOCNO(A) ((A)->next_coalesced_allocno)
#define ALLOCNO_LIVE_RANGES(A) ((A)->live_ranges)
@@ -465,32 +468,32 @@ struct allocno
/* Map regno -> allocnos with given regno (see comments for
allocno member `next_regno_allocno'). */
-extern allocno_t *regno_allocno_map;
+extern ira_allocno_t *ira_regno_allocno_map;
/* Array of references to all allocnos. The order number of the
allocno corresponds to the index in the array. Removed allocnos
have NULL element value. */
-extern allocno_t *allocnos;
+extern ira_allocno_t *ira_allocnos;
/* Sizes of the previous array. */
-extern int allocnos_num;
+extern int ira_allocnos_num;
/* Map conflict id -> allocno with given conflict id (see comments for
allocno member `conflict_id'). */
-extern allocno_t *conflict_id_allocno_map;
+extern ira_allocno_t *ira_conflict_id_allocno_map;
/* The following structure represents a copy of two allocnos. The
copies represent move insns or potential move insns usually because
of two operand insn constraints. To remove register shuffle, we
also create copies between allocno which is output of an insn and
allocno becoming dead in the insn. */
-struct allocno_copy
+struct ira_allocno_copy
{
/* The unique order number of the copy node starting with 0. */
int num;
/* Allocnos connected by the copy. The first allocno should have
smaller order number than the second one. */
- allocno_t first, second;
+ ira_allocno_t first, second;
/* Execution frequency of the copy. */
int freq;
/* It is a move insn which is an origin of the copy. The member
@@ -501,25 +504,25 @@ struct allocno_copy
rtx insn;
/* All copies with the same allocno as FIRST are linked by the two
following members. */
- copy_t prev_first_allocno_copy, next_first_allocno_copy;
+ ira_copy_t prev_first_allocno_copy, next_first_allocno_copy;
/* All copies with the same allocno as SECOND are linked by the two
following members. */
- copy_t prev_second_allocno_copy, next_second_allocno_copy;
+ ira_copy_t prev_second_allocno_copy, next_second_allocno_copy;
/* Region from which given copy is originated. */
- loop_tree_node_t loop_tree_node;
+ ira_loop_tree_node_t loop_tree_node;
};
/* Array of references to all copies. The order number of the copy
corresponds to the index in the array. Removed copies have NULL
element value. */
-extern copy_t *copies;
+extern ira_copy_t *ira_copies;
/* Size of the previous array. */
-extern int copies_num;
+extern int ira_copies_num;
/* The following structure describes a stack slot used for spilled
pseudo-registers. */
-struct spilled_reg_stack_slot
+struct ira_spilled_reg_stack_slot
{
/* pseudo-registers assigned to the stack slot. */
regset_head spilled_regs;
@@ -530,35 +533,35 @@ struct spilled_reg_stack_slot
};
/* The number of elements in the following array. */
-extern int spilled_reg_stack_slots_num;
+extern int ira_spilled_reg_stack_slots_num;
/* The following array contains info about spilled pseudo-registers
stack slots used in current function so far. */
-extern struct spilled_reg_stack_slot *spilled_reg_stack_slots;
+extern struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
/* Correspondingly overall cost of the allocation, cost of the
allocnos assigned to hard-registers, cost of the allocnos assigned
to memory, cost of loads, stores and register move insns generated
for pseudo-register live range splitting (see ira-emit.c). */
-extern int overall_cost;
-extern int reg_cost, mem_cost;
-extern int load_cost, store_cost, shuffle_cost;
-extern int move_loops_num, additional_jumps_num;
+extern int ira_overall_cost;
+extern int ira_reg_cost, ira_mem_cost;
+extern int ira_load_cost, ira_store_cost, ira_shuffle_cost;
+extern int ira_move_loops_num, ira_additional_jumps_num;
/* Map: register class x machine mode -> number of hard registers of
given class needed to store value of given mode. If the number for
some hard-registers of the register class is different, the size
will be negative. */
-extern int reg_class_nregs[N_REG_CLASSES][MAX_MACHINE_MODE];
+extern int ira_reg_class_nregs[N_REG_CLASSES][MAX_MACHINE_MODE];
/* Maximal value of the previous array elements. */
-extern int max_nregs;
+extern int ira_max_nregs;
/* The number of bits in each element of array used to implement a bit
vector of allocnos and what type that element has. We use the
largest integer format on the host machine. */
-#define INT_BITS HOST_BITS_PER_WIDE_INT
-#define INT_TYPE HOST_WIDE_INT
+#define IRA_INT_BITS HOST_BITS_PER_WIDE_INT
+#define IRA_INT_TYPE HOST_WIDE_INT
/* Set, clear or test bit number I in R, a bit vector of elements with
minimal index and maximal index equal correspondingly to MIN and
@@ -574,8 +577,8 @@ extern int max_nregs;
__FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
gcc_unreachable (); \
} \
- ((R)[(unsigned) (_i - _min) / INT_BITS] \
- |= ((INT_TYPE) 1 << ((unsigned) (_i - _min) % INT_BITS))); }))
+ ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
+ |= ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
#define CLEAR_ALLOCNO_SET_BIT(R, I, MIN, MAX) __extension__ \
@@ -587,8 +590,8 @@ extern int max_nregs;
__FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
gcc_unreachable (); \
} \
- ((R)[(unsigned) (_i - _min) / INT_BITS] \
- &= ~((INT_TYPE) 1 << ((unsigned) (_i - _min) % INT_BITS))); }))
+ ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
+ &= ~((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
#define TEST_ALLOCNO_SET_BIT(R, I, MIN, MAX) __extension__ \
(({ int _min = (MIN), _max = (MAX), _i = (I); \
@@ -599,22 +602,22 @@ extern int max_nregs;
__FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
gcc_unreachable (); \
} \
- ((R)[(unsigned) (_i - _min) / INT_BITS] \
- & ((INT_TYPE) 1 << ((unsigned) (_i - _min) % INT_BITS))); }))
+ ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
+ & ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
#else
#define SET_ALLOCNO_SET_BIT(R, I, MIN, MAX) \
- ((R)[(unsigned) ((I) - (MIN)) / INT_BITS] \
- |= ((INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % INT_BITS)))
+ ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
+ |= ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
#define CLEAR_ALLOCNO_SET_BIT(R, I, MIN, MAX) \
- ((R)[(unsigned) ((I) - (MIN)) / INT_BITS] \
- &= ~((INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % INT_BITS)))
+ ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
+ &= ~((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
#define TEST_ALLOCNO_SET_BIT(R, I, MIN, MAX) \
- ((R)[(unsigned) ((I) - (MIN)) / INT_BITS] \
- & ((INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % INT_BITS)))
+ ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
+ & ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
#endif
@@ -623,7 +626,7 @@ extern int max_nregs;
typedef struct {
/* Array containing the allocno bit vector. */
- INT_TYPE *vec;
+ IRA_INT_TYPE *vec;
/* The number of the current element in the vector. */
unsigned int word_num;
@@ -638,13 +641,14 @@ typedef struct {
int start_val;
/* The word of the bit vector currently visited. */
- unsigned INT_TYPE word;
-} allocno_set_iterator;
+ unsigned IRA_INT_TYPE word;
+} ira_allocno_set_iterator;
/* Initialize the iterator I for allocnos bit vector VEC containing
minimal and maximal values MIN and MAX. */
static inline void
-allocno_set_iter_init (allocno_set_iterator *i, INT_TYPE *vec, int min, int max)
+ira_allocno_set_iter_init (ira_allocno_set_iterator *i,
+ IRA_INT_TYPE *vec, int min, int max)
{
i->vec = vec;
i->word_num = 0;
@@ -658,13 +662,13 @@ allocno_set_iter_init (allocno_set_iterator *i, INT_TYPE *vec, int min, int max)
set to the allocno number to be visited. Otherwise, return
FALSE. */
static inline bool
-allocno_set_iter_cond (allocno_set_iterator *i, int *n)
+ira_allocno_set_iter_cond (ira_allocno_set_iterator *i, int *n)
{
/* Skip words that are zeros. */
for (; i->word == 0; i->word = i->vec[i->word_num])
{
i->word_num++;
- i->bit_num = i->word_num * INT_BITS;
+ i->bit_num = i->word_num * IRA_INT_BITS;
/* If we have reached the end, break. */
if (i->bit_num >= i->nel)
@@ -682,7 +686,7 @@ allocno_set_iter_cond (allocno_set_iterator *i, int *n)
/* Advance to the next allocno in the set. */
static inline void
-allocno_set_iter_next (allocno_set_iterator *i)
+ira_allocno_set_iter_next (ira_allocno_set_iterator *i)
{
i->word >>= 1;
i->bit_num++;
@@ -691,67 +695,67 @@ allocno_set_iter_next (allocno_set_iterator *i)
/* Loop over all elements of allocno set given by bit vector VEC and
their minimal and maximal values MIN and MAX. In each iteration, N
is set to the number of next allocno. ITER is an instance of
- allocno_set_iterator used to iterate the allocnos in the set. */
+ ira_allocno_set_iterator used to iterate the allocnos in the set. */
#define FOR_EACH_ALLOCNO_IN_SET(VEC, MIN, MAX, N, ITER) \
- for (allocno_set_iter_init (&(ITER), (VEC), (MIN), (MAX)); \
- allocno_set_iter_cond (&(ITER), &(N)); \
- allocno_set_iter_next (&(ITER)))
+ for (ira_allocno_set_iter_init (&(ITER), (VEC), (MIN), (MAX)); \
+ ira_allocno_set_iter_cond (&(ITER), &(N)); \
+ ira_allocno_set_iter_next (&(ITER)))
/* ira.c: */
/* Hard regsets whose all bits are correspondingly zero or one. */
-extern HARD_REG_SET zero_hard_reg_set;
-extern HARD_REG_SET one_hard_reg_set;
+extern HARD_REG_SET ira_zero_hard_reg_set;
+extern HARD_REG_SET ira_one_hard_reg_set;
/* Map: hard regs X modes -> set of hard registers for storing value
of given mode starting with given hard register. */
-extern HARD_REG_SET reg_mode_hard_regset
+extern HARD_REG_SET ira_reg_mode_hard_regset
[FIRST_PSEUDO_REGISTER][NUM_MACHINE_MODES];
/* Arrays analogous to macros MEMORY_MOVE_COST and
REGISTER_MOVE_COST. */
-extern short memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
-extern move_table *register_move_cost[MAX_MACHINE_MODE];
+extern short ira_memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
+extern move_table *ira_register_move_cost[MAX_MACHINE_MODE];
/* Similar to may_move_in_cost but it is calculated in IRA instead of
regclass. Another difference we take only available hard registers
into account to figure out that one register class is a subset of
the another one. */
-extern move_table *register_may_move_in_cost[MAX_MACHINE_MODE];
+extern move_table *ira_may_move_in_cost[MAX_MACHINE_MODE];
/* Similar to may_move_out_cost but it is calculated in IRA instead of
regclass. Another difference we take only available hard registers
into account to figure out that one register class is a subset of
the another one. */
-extern move_table *register_may_move_out_cost[MAX_MACHINE_MODE];
+extern move_table *ira_may_move_out_cost[MAX_MACHINE_MODE];
/* Register class subset relation: TRUE if the first class is a subset
of the second one considering only hard registers available for the
allocation. */
-extern int class_subset_p[N_REG_CLASSES][N_REG_CLASSES];
+extern int ira_class_subset_p[N_REG_CLASSES][N_REG_CLASSES];
/* Array of number of hard registers of given class which are
available for the allocation. The order is defined by the
allocation order. */
-extern short class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
+extern short ira_class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
/* The number of elements of the above array for given register
class. */
-extern int class_hard_regs_num[N_REG_CLASSES];
+extern int ira_class_hard_regs_num[N_REG_CLASSES];
-/* Index (in class_hard_regs) for given register class and hard
+/* Index (in ira_class_hard_regs) for given register class and hard
register (in general case a hard register can belong to several
register classes). The index is negative for hard registers
unavailable for the allocation. */
-extern short class_hard_reg_index[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
+extern short ira_class_hard_reg_index[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
/* Function specific hard registers can not be used for the register
allocation. */
-extern HARD_REG_SET no_alloc_regs;
+extern HARD_REG_SET ira_no_alloc_regs;
/* Number of given class hard registers available for the register
allocation for given classes. */
-extern int available_class_regs[N_REG_CLASSES];
+extern int ira_available_class_regs[N_REG_CLASSES];
/* Array whose values are hard regset of hard registers available for
the allocation of given register class whose HARD_REGNO_MODE_OK
@@ -762,42 +766,42 @@ extern HARD_REG_SET prohibited_class_mode_regs
/* Array whose values are hard regset of hard registers for which
move of the hard register in given mode into itself is
prohibited. */
-extern HARD_REG_SET prohibited_mode_move_regs[NUM_MACHINE_MODES];
+extern HARD_REG_SET ira_prohibited_mode_move_regs[NUM_MACHINE_MODES];
/* Number of cover classes. Cover classes is non-intersected register
classes containing all hard-registers available for the
allocation. */
-extern int reg_class_cover_size;
+extern int ira_reg_class_cover_size;
/* The array containing cover classes (see also comments for macro
- IRA_COVER_CLASSES). Only first REG_CLASS_COVER_SIZE elements are
+ IRA_COVER_CLASSES). Only first IRA_REG_CLASS_COVER_SIZE elements are
used for this. */
-extern enum reg_class reg_class_cover[N_REG_CLASSES];
+extern enum reg_class ira_reg_class_cover[N_REG_CLASSES];
/* The value is number of elements in the subsequent array. */
-extern int important_classes_num;
+extern int ira_important_classes_num;
/* The array containing non-empty classes (including non-empty cover
classes) which are subclasses of cover classes. Such classes is
important for calculation of the hard register usage costs. */
-extern enum reg_class important_classes[N_REG_CLASSES];
+extern enum reg_class ira_important_classes[N_REG_CLASSES];
/* The array containing indexes of important classes in the previous
array. The array elements are defined only for important
classes. */
-extern int important_class_nums[N_REG_CLASSES];
+extern int ira_important_class_nums[N_REG_CLASSES];
/* Map of all register classes to corresponding cover class containing
the given class. If given class is not a subset of a cover class,
we translate it into the cheapest cover class. */
-extern enum reg_class class_translate[N_REG_CLASSES];
+extern enum reg_class ira_class_translate[N_REG_CLASSES];
/* The biggest important class inside of intersection of the two
classes (that is calculated taking only hard registers available
for allocation into account). If the both classes contain no hard
registers available for allocation, the value is calculated with
taking all hard-registers including fixed ones into account. */
-extern enum reg_class reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
+extern enum reg_class ira_reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
/* The biggest important class inside of union of the two classes
(that is calculated taking only hard registers available for
@@ -806,100 +810,100 @@ extern enum reg_class reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
taking all hard-registers including fixed ones into account. In
other words, the value is the corresponding reg_class_subunion
value. */
-extern enum reg_class reg_class_union[N_REG_CLASSES][N_REG_CLASSES];
+extern enum reg_class ira_reg_class_union[N_REG_CLASSES][N_REG_CLASSES];
-extern void set_non_alloc_regs (int);
extern void *ira_allocate (size_t);
extern void *ira_reallocate (void *, size_t);
extern void ira_free (void *addr);
extern bitmap ira_allocate_bitmap (void);
extern void ira_free_bitmap (bitmap);
-extern void print_disposition (FILE *);
-extern void debug_disposition (void);
-extern void debug_class_cover (void);
-extern void init_register_move_cost (enum machine_mode);
+extern void ira_print_disposition (FILE *);
+extern void ira_debug_disposition (void);
+extern void ira_debug_class_cover (void);
+extern void ira_init_register_move_cost (enum machine_mode);
/* The length of the two following arrays. */
-extern int reg_equiv_len;
+extern int ira_reg_equiv_len;
/* The element value is TRUE if the corresponding regno value is
invariant. */
-extern bool *reg_equiv_invariant_p;
+extern bool *ira_reg_equiv_invariant_p;
/* The element value is equiv constant of given pseudo-register or
NULL_RTX. */
-extern rtx *reg_equiv_const;
+extern rtx *ira_reg_equiv_const;
/* ira-build.c */
/* The current loop tree node and its regno allocno map. */
-extern loop_tree_node_t ira_curr_loop_tree_node;
-extern allocno_t *ira_curr_regno_allocno_map;
+extern ira_loop_tree_node_t ira_curr_loop_tree_node;
+extern ira_allocno_t *ira_curr_regno_allocno_map;
/* Array of vectors containing calls given pseudo-register lives
through. */
-extern VEC(rtx, heap) **regno_calls;
-
-extern int add_regno_call (int, rtx);
-
-extern void debug_allocno_copies (allocno_t);
-
-extern void traverse_loop_tree (bool, loop_tree_node_t,
- void (*) (loop_tree_node_t),
- void (*) (loop_tree_node_t));
-extern allocno_t create_allocno (int, bool, loop_tree_node_t);
-extern void set_allocno_cover_class (allocno_t, enum reg_class);
-extern bool conflict_vector_profitable_p (allocno_t, int);
-extern void allocate_allocno_conflict_vec (allocno_t, int);
-extern void allocate_allocno_conflicts (allocno_t, int);
-extern void add_allocno_conflict (allocno_t, allocno_t);
-extern void print_expanded_allocno (allocno_t);
-extern allocno_live_range_t create_allocno_live_range (allocno_t, int, int,
- allocno_live_range_t);
-extern void finish_allocno_live_range (allocno_live_range_t);
-extern void free_allocno_updated_costs (allocno_t);
-extern copy_t create_copy (allocno_t, allocno_t, int, rtx, loop_tree_node_t);
-extern void add_allocno_copy_to_list (copy_t);
-extern void swap_allocno_copy_ends_if_necessary (copy_t);
-extern void remove_allocno_copy_from_list (copy_t);
-extern copy_t add_allocno_copy (allocno_t, allocno_t, int, rtx,
- loop_tree_node_t);
-
-extern int *allocate_cost_vector (enum reg_class);
-extern void free_cost_vector (int *, enum reg_class);
+extern VEC(rtx, heap) **ira_regno_calls;
+
+extern int ira_add_regno_call (int, rtx);
+
+extern void ira_debug_allocno_copies (ira_allocno_t);
+
+extern void ira_traverse_loop_tree (bool, ira_loop_tree_node_t,
+ void (*) (ira_loop_tree_node_t),
+ void (*) (ira_loop_tree_node_t));
+extern ira_allocno_t ira_create_allocno (int, bool, ira_loop_tree_node_t);
+extern void ira_set_allocno_cover_class (ira_allocno_t, enum reg_class);
+extern bool ira_conflict_vector_profitable_p (ira_allocno_t, int);
+extern void ira_allocate_allocno_conflict_vec (ira_allocno_t, int);
+extern void ira_allocate_allocno_conflicts (ira_allocno_t, int);
+extern void ira_add_allocno_conflict (ira_allocno_t, ira_allocno_t);
+extern void ira_print_expanded_allocno (ira_allocno_t);
+extern allocno_live_range_t ira_create_allocno_live_range
+ (ira_allocno_t, int, int, allocno_live_range_t);
+extern void ira_finish_allocno_live_range (allocno_live_range_t);
+extern void ira_free_allocno_updated_costs (ira_allocno_t);
+extern ira_copy_t ira_create_copy (ira_allocno_t, ira_allocno_t,
+ int, rtx, ira_loop_tree_node_t);
+extern void ira_add_allocno_copy_to_list (ira_copy_t);
+extern void ira_swap_allocno_copy_ends_if_necessary (ira_copy_t);
+extern void ira_remove_allocno_copy_from_list (ira_copy_t);
+extern ira_copy_t ira_add_allocno_copy (ira_allocno_t, ira_allocno_t, int, rtx,
+ ira_loop_tree_node_t);
+
+extern int *ira_allocate_cost_vector (enum reg_class);
+extern void ira_free_cost_vector (int *, enum reg_class);
extern void ira_flattening (int, int);
extern bool ira_build (bool);
extern void ira_destroy (void);
/* ira-costs.c */
-extern void init_ira_costs_once (void);
-extern void init_ira_costs (void);
-extern void finish_ira_costs_once (void);
+extern void ira_init_costs_once (void);
+extern void ira_init_costs (void);
+extern void ira_finish_costs_once (void);
extern void ira_costs (void);
-extern void tune_allocno_costs_and_cover_classes (void);
+extern void ira_tune_allocno_costs_and_cover_classes (void);
/* ira-lives.c */
-extern void rebuild_start_finish_chains (void);
-extern void print_live_range_list (FILE *, allocno_live_range_t);
-extern void debug_live_range_list (allocno_live_range_t);
-extern void debug_allocno_live_ranges (allocno_t);
-extern void debug_live_ranges (void);
-extern void create_allocno_live_ranges (void);
-extern void finish_allocno_live_ranges (void);
+extern void ira_rebuild_start_finish_chains (void);
+extern void ira_print_live_range_list (FILE *, allocno_live_range_t);
+extern void ira_debug_live_range_list (allocno_live_range_t);
+extern void ira_debug_allocno_live_ranges (ira_allocno_t);
+extern void ira_debug_live_ranges (void);
+extern void ira_create_allocno_live_ranges (void);
+extern void ira_finish_allocno_live_ranges (void);
/* ira-conflicts.c */
-extern bool allocno_live_ranges_intersect_p (allocno_t, allocno_t);
-extern bool pseudo_live_ranges_intersect_p (int, int);
-extern void debug_conflicts (bool);
+extern bool ira_allocno_live_ranges_intersect_p (ira_allocno_t, ira_allocno_t);
+extern bool ira_pseudo_live_ranges_intersect_p (int, int);
+extern void ira_debug_conflicts (bool);
extern void ira_build_conflicts (void);
/* ira-color.c */
-extern int loop_edge_freq (loop_tree_node_t, int, bool);
-extern void reassign_conflict_allocnos (int);
-extern void initiate_ira_assign (void);
-extern void finish_ira_assign (void);
+extern int ira_loop_edge_freq (ira_loop_tree_node_t, int, bool);
+extern void ira_reassign_conflict_allocnos (int);
+extern void ira_initiate_assign (void);
+extern void ira_finish_assign (void);
extern void ira_color (void);
extern void ira_fast_allocation (void);
@@ -910,13 +914,13 @@ extern void ira_emit (bool);
/* The iterator for all allocnos. */
typedef struct {
- /* The number of the current element in ALLOCNOS. */
+ /* The number of the current element in IRA_ALLOCNOS. */
int n;
-} allocno_iterator;
+} ira_allocno_iterator;
/* Initialize the iterator I. */
static inline void
-allocno_iter_init (allocno_iterator *i)
+ira_allocno_iter_init (ira_allocno_iterator *i)
{
i->n = 0;
}
@@ -924,14 +928,14 @@ allocno_iter_init (allocno_iterator *i)
/* Return TRUE if we have more allocnos to visit, in which case *A is
set to the allocno to be visited. Otherwise, return FALSE. */
static inline bool
-allocno_iter_cond (allocno_iterator *i, allocno_t *a)
+ira_allocno_iter_cond (ira_allocno_iterator *i, ira_allocno_t *a)
{
int n;
- for (n = i->n; n < allocnos_num; n++)
- if (allocnos[n] != NULL)
+ for (n = i->n; n < ira_allocnos_num; n++)
+ if (ira_allocnos[n] != NULL)
{
- *a = allocnos[n];
+ *a = ira_allocnos[n];
i->n = n + 1;
return true;
}
@@ -939,24 +943,24 @@ allocno_iter_cond (allocno_iterator *i, allocno_t *a)
}
/* Loop over all allocnos. In each iteration, A is set to the next
- allocno. ITER is an instance of allocno_iterator used to iterate
+ allocno. ITER is an instance of ira_allocno_iterator used to iterate
the allocnos. */
#define FOR_EACH_ALLOCNO(A, ITER) \
- for (allocno_iter_init (&(ITER)); \
- allocno_iter_cond (&(ITER), &(A));)
+ for (ira_allocno_iter_init (&(ITER)); \
+ ira_allocno_iter_cond (&(ITER), &(A));)
/* The iterator for copies. */
typedef struct {
- /* The number of the current element in COPIES. */
+ /* The number of the current element in IRA_COPIES. */
int n;
-} copy_iterator;
+} ira_copy_iterator;
/* Initialize the iterator I. */
static inline void
-copy_iter_init (copy_iterator *i)
+ira_copy_iter_init (ira_copy_iterator *i)
{
i->n = 0;
}
@@ -964,14 +968,14 @@ copy_iter_init (copy_iterator *i)
/* Return TRUE if we have more copies to visit, in which case *CP is
set to the copy to be visited. Otherwise, return FALSE. */
static inline bool
-copy_iter_cond (copy_iterator *i, copy_t *cp)
+ira_copy_iter_cond (ira_copy_iterator *i, ira_copy_t *cp)
{
int n;
- for (n = i->n; n < copies_num; n++)
- if (copies[n] != NULL)
+ for (n = i->n; n < ira_copies_num; n++)
+ if (ira_copies[n] != NULL)
{
- *cp = copies[n];
+ *cp = ira_copies[n];
i->n = n + 1;
return true;
}
@@ -979,11 +983,11 @@ copy_iter_cond (copy_iterator *i, copy_t *cp)
}
/* Loop over all copies. In each iteration, C is set to the next
- copy. ITER is an instance of copy_iterator used to iterate
+ copy. ITER is an instance of ira_copy_iterator used to iterate
the copies. */
#define FOR_EACH_COPY(C, ITER) \
- for (copy_iter_init (&(ITER)); \
- copy_iter_cond (&(ITER), &(C));)
+ for (ira_copy_iter_init (&(ITER)); \
+ ira_copy_iter_cond (&(ITER), &(C));)
@@ -998,7 +1002,7 @@ typedef struct {
void *vec;
/* The number of the current element in the vector (of type
- allocno_t or INT_TYPE). */
+ ira_allocno_t or IRA_INT_TYPE). */
unsigned int word_num;
/* The bit vector size. It is defined only if
@@ -1016,12 +1020,13 @@ typedef struct {
/* The word of bit vector currently visited. It is defined only if
ALLOCNO_CONFLICT_VEC_P is FALSE. */
- unsigned INT_TYPE word;
-} allocno_conflict_iterator;
+ unsigned IRA_INT_TYPE word;
+} ira_allocno_conflict_iterator;
/* Initialize the iterator I with ALLOCNO conflicts. */
static inline void
-allocno_conflict_iter_init (allocno_conflict_iterator *i, allocno_t allocno)
+ira_allocno_conflict_iter_init (ira_allocno_conflict_iterator *i,
+ ira_allocno_t allocno)
{
i->allocno_conflict_vec_p = ALLOCNO_CONFLICT_VEC_P (allocno);
i->vec = ALLOCNO_CONFLICT_ALLOCNO_ARRAY (allocno);
@@ -1033,11 +1038,12 @@ allocno_conflict_iter_init (allocno_conflict_iterator *i, allocno_t allocno)
if (ALLOCNO_MIN (allocno) > ALLOCNO_MAX (allocno))
i->size = 0;
else
- i->size = ((ALLOCNO_MAX (allocno) - ALLOCNO_MIN (allocno) + INT_BITS)
- / INT_BITS) * sizeof (INT_TYPE);
+ i->size = ((ALLOCNO_MAX (allocno) - ALLOCNO_MIN (allocno)
+ + IRA_INT_BITS)
+ / IRA_INT_BITS) * sizeof (IRA_INT_TYPE);
i->bit_num = 0;
i->base_conflict_id = ALLOCNO_MIN (allocno);
- i->word = (i->size == 0 ? 0 : ((INT_TYPE *) i->vec)[0]);
+ i->word = (i->size == 0 ? 0 : ((IRA_INT_TYPE *) i->vec)[0]);
}
}
@@ -1045,13 +1051,14 @@ allocno_conflict_iter_init (allocno_conflict_iterator *i, allocno_t allocno)
case *A is set to the allocno to be visited. Otherwise, return
FALSE. */
static inline bool
-allocno_conflict_iter_cond (allocno_conflict_iterator *i, allocno_t *a)
+ira_allocno_conflict_iter_cond (ira_allocno_conflict_iterator *i,
+ ira_allocno_t *a)
{
- allocno_t conflict_allocno;
+ ira_allocno_t conflict_allocno;
if (i->allocno_conflict_vec_p)
{
- conflict_allocno = ((allocno_t *) i->vec)[i->word_num];
+ conflict_allocno = ((ira_allocno_t *) i->vec)[i->word_num];
if (conflict_allocno == NULL)
return false;
*a = conflict_allocno;
@@ -1060,22 +1067,22 @@ allocno_conflict_iter_cond (allocno_conflict_iterator *i, allocno_t *a)
else
{
/* Skip words that are zeros. */
- for (; i->word == 0; i->word = ((INT_TYPE *) i->vec)[i->word_num])
+ for (; i->word == 0; i->word = ((IRA_INT_TYPE *) i->vec)[i->word_num])
{
i->word_num++;
/* If we have reached the end, break. */
- if (i->word_num * sizeof (INT_TYPE) >= i->size)
+ if (i->word_num * sizeof (IRA_INT_TYPE) >= i->size)
return false;
- i->bit_num = i->word_num * INT_BITS;
+ i->bit_num = i->word_num * IRA_INT_BITS;
}
/* Skip bits that are zero. */
for (; (i->word & 1) == 0; i->word >>= 1)
i->bit_num++;
- *a = conflict_id_allocno_map[i->bit_num + i->base_conflict_id];
+ *a = ira_conflict_id_allocno_map[i->bit_num + i->base_conflict_id];
return true;
}
@@ -1083,7 +1090,7 @@ allocno_conflict_iter_cond (allocno_conflict_iterator *i, allocno_t *a)
/* Advance to the next conflicting allocno. */
static inline void
-allocno_conflict_iter_next (allocno_conflict_iterator *i)
+ira_allocno_conflict_iter_next (ira_allocno_conflict_iterator *i)
{
if (i->allocno_conflict_vec_p)
i->word_num++;
@@ -1096,12 +1103,12 @@ allocno_conflict_iter_next (allocno_conflict_iterator *i)
/* Loop over all allocnos conflicting with ALLOCNO. In each
iteration, A is set to the next conflicting allocno. ITER is an
- instance of allocno_conflict_iterator used to iterate the
+ instance of ira_allocno_conflict_iterator used to iterate the
conflicts. */
#define FOR_EACH_ALLOCNO_CONFLICT(ALLOCNO, A, ITER) \
- for (allocno_conflict_iter_init (&(ITER), (ALLOCNO)); \
- allocno_conflict_iter_cond (&(ITER), &(A)); \
- allocno_conflict_iter_next (&(ITER)))
+ for (ira_allocno_conflict_iter_init (&(ITER), (ALLOCNO)); \
+ ira_allocno_conflict_iter_cond (&(ITER), &(A)); \
+ ira_allocno_conflict_iter_next (&(ITER)))
@@ -1109,8 +1116,8 @@ allocno_conflict_iter_next (allocno_conflict_iterator *i)
HARD_REGNO and containing value of MODE are not in set
HARD_REGSET. */
static inline bool
-hard_reg_not_in_set_p (int hard_regno, enum machine_mode mode,
- HARD_REG_SET hard_regset)
+ira_hard_reg_not_in_set_p (int hard_regno, enum machine_mode mode,
+ HARD_REG_SET hard_regset)
{
int i;
@@ -1130,15 +1137,15 @@ hard_reg_not_in_set_p (int hard_regno, enum machine_mode mode,
/* Allocate cost vector *VEC for hard registers of COVER_CLASS and
initialize the elements by VAL if it is necessary */
static inline void
-allocate_and_set_costs (int **vec, enum reg_class cover_class, int val)
+ira_allocate_and_set_costs (int **vec, enum reg_class cover_class, int val)
{
int i, *reg_costs;
int len;
if (*vec != NULL)
return;
- *vec = reg_costs = allocate_cost_vector (cover_class);
- len = class_hard_regs_num[cover_class];
+ *vec = reg_costs = ira_allocate_cost_vector (cover_class);
+ len = ira_class_hard_regs_num[cover_class];
for (i = 0; i < len; i++)
reg_costs[i] = val;
}
@@ -1146,14 +1153,14 @@ allocate_and_set_costs (int **vec, enum reg_class cover_class, int val)
/* Allocate cost vector *VEC for hard registers of COVER_CLASS and
copy values of vector SRC into the vector if it is necessary */
static inline void
-allocate_and_copy_costs (int **vec, enum reg_class cover_class, int *src)
+ira_allocate_and_copy_costs (int **vec, enum reg_class cover_class, int *src)
{
int len;
if (*vec != NULL || src == NULL)
return;
- *vec = allocate_cost_vector (cover_class);
- len = class_hard_regs_num[cover_class];
+ *vec = ira_allocate_cost_vector (cover_class);
+ len = ira_class_hard_regs_num[cover_class];
memcpy (*vec, src, sizeof (int) * len);
}
@@ -1161,16 +1168,16 @@ allocate_and_copy_costs (int **vec, enum reg_class cover_class, int *src)
copy values of vector SRC into the vector or initialize it by VAL
(if SRC is null). */
static inline void
-allocate_and_set_or_copy_costs (int **vec, enum reg_class cover_class,
- int val, int *src)
+ira_allocate_and_set_or_copy_costs (int **vec, enum reg_class cover_class,
+ int val, int *src)
{
int i, *reg_costs;
int len;
if (*vec != NULL)
return;
- *vec = reg_costs = allocate_cost_vector (cover_class);
- len = class_hard_regs_num[cover_class];
+ *vec = reg_costs = ira_allocate_cost_vector (cover_class);
+ len = ira_class_hard_regs_num[cover_class];
if (src != NULL)
memcpy (reg_costs, src, sizeof (int) * len);
else
diff --git a/gcc/ira-lives.c b/gcc/ira-lives.c
index b4e82ca90c4..448deadae43 100644
--- a/gcc/ira-lives.c
+++ b/gcc/ira-lives.c
@@ -42,16 +42,18 @@ along with GCC; see the file COPYING3. If not see
works on the allocno basis and creates live ranges instead of
pseudo-register conflicts. */
-/* Program points are enumerated by number from range 0..MAX_POINT-1.
- There are approximately two times more program points than insns.
- One program points correspond points between subsequent insns and
- other ones correspond to points after usage of input operands but
- before setting the output operands in insns. */
-int max_point;
-
-/* Arrays of size MAX_POINT mapping a program point to the allocno
+/* Program points are enumerated by numbers from range
+ 0..IRA_MAX_POINT-1. There are approximately two times more program
+ points than insns. Program points are places in the program where
+ liveness info can be changed. In most general case (there are more
+ complicated cases too) some program points correspond places where
+ input operand dies and other ones correspond to places where output
+ operands are born. */
+int ira_max_point;
+
+/* Arrays of size IRA_MAX_POINT mapping a program point to the allocno
live ranges with given start/finish point. */
-allocno_live_range_t *start_point_ranges, *finish_point_ranges;
+allocno_live_range_t *ira_start_point_ranges, *ira_finish_point_ranges;
/* Number of the current program point. */
static int curr_point;
@@ -71,7 +73,7 @@ static sparseset allocnos_live;
static HARD_REG_SET hard_regs_live;
/* The loop tree node corresponding to the current basic block. */
-static loop_tree_node_t curr_bb_node;
+static ira_loop_tree_node_t curr_bb_node;
/* The function processing birth of register REGNO. It updates living
hard regs and conflict hard regs for living allocnos or starts a
@@ -81,7 +83,7 @@ static void
make_regno_born (int regno)
{
unsigned int i;
- allocno_t a;
+ ira_allocno_t a;
allocno_live_range_t p;
if (regno < FIRST_PSEUDO_REGISTER)
@@ -89,8 +91,8 @@ make_regno_born (int regno)
SET_HARD_REG_BIT (hard_regs_live, regno);
EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
{
- SET_HARD_REG_BIT (ALLOCNO_CONFLICT_HARD_REGS (allocnos[i]), regno);
- SET_HARD_REG_BIT (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (allocnos[i]),
+ SET_HARD_REG_BIT (ALLOCNO_CONFLICT_HARD_REGS (ira_allocnos[i]), regno);
+ SET_HARD_REG_BIT (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (ira_allocnos[i]),
regno);
}
return;
@@ -101,12 +103,13 @@ make_regno_born (int regno)
if ((p = ALLOCNO_LIVE_RANGES (a)) == NULL
|| (p->finish != curr_point && p->finish + 1 != curr_point))
ALLOCNO_LIVE_RANGES (a)
- = create_allocno_live_range (a, curr_point, -1, ALLOCNO_LIVE_RANGES (a));
+ = ira_create_allocno_live_range (a, curr_point, -1,
+ ALLOCNO_LIVE_RANGES (a));
}
/* Update ALLOCNO_EXCESS_PRESSURE_POINTS_NUM for allocno A. */
static void
-update_allocno_pressure_excess_length (allocno_t a)
+update_allocno_pressure_excess_length (ira_allocno_t a)
{
int start;
enum reg_class cover_class;
@@ -128,7 +131,7 @@ update_allocno_pressure_excess_length (allocno_t a)
static void
make_regno_dead (int regno)
{
- allocno_t a;
+ ira_allocno_t a;
allocno_live_range_t p;
if (regno < FIRST_PSEUDO_REGISTER)
@@ -163,7 +166,7 @@ static int curr_reg_pressure[N_REG_CLASSES];
of the register pressure excess, and conflicting hard registers of
A. */
static void
-set_allocno_live (allocno_t a)
+set_allocno_live (ira_allocno_t a)
{
int nregs;
enum reg_class cover_class;
@@ -172,12 +175,13 @@ set_allocno_live (allocno_t a)
return;
sparseset_set_bit (allocnos_live, ALLOCNO_NUM (a));
IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a), hard_regs_live);
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), hard_regs_live);
+ IOR_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), hard_regs_live);
cover_class = ALLOCNO_COVER_CLASS (a);
- nregs = reg_class_nregs[cover_class][ALLOCNO_MODE (a)];
+ nregs = ira_reg_class_nregs[cover_class][ALLOCNO_MODE (a)];
curr_reg_pressure[cover_class] += nregs;
if (high_pressure_start_point[cover_class] < 0
- && curr_reg_pressure[cover_class] > available_class_regs[cover_class])
+ && (curr_reg_pressure[cover_class]
+ > ira_available_class_regs[cover_class]))
high_pressure_start_point[cover_class] = curr_point;
if (curr_bb_node->reg_pressure[cover_class]
< curr_reg_pressure[cover_class])
@@ -188,7 +192,7 @@ set_allocno_live (allocno_t a)
pressure, start point of the register pressure excess, and register
pressure excess length for living allocnos. */
static void
-clear_allocno_live (allocno_t a)
+clear_allocno_live (ira_allocno_t a)
{
unsigned int i;
enum reg_class cover_class;
@@ -197,15 +201,15 @@ clear_allocno_live (allocno_t a)
{
cover_class = ALLOCNO_COVER_CLASS (a);
curr_reg_pressure[cover_class]
- -= reg_class_nregs[cover_class][ALLOCNO_MODE (a)];
+ -= ira_reg_class_nregs[cover_class][ALLOCNO_MODE (a)];
ira_assert (curr_reg_pressure[cover_class] >= 0);
if (high_pressure_start_point[cover_class] >= 0
&& (curr_reg_pressure[cover_class]
- <= available_class_regs[cover_class]))
+ <= ira_available_class_regs[cover_class]))
{
EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
{
- update_allocno_pressure_excess_length (allocnos[i]);
+ update_allocno_pressure_excess_length (ira_allocnos[i]);
}
high_pressure_start_point[cover_class] = -1;
}
@@ -251,7 +255,7 @@ mark_reg_store (rtx reg, const_rtx setter ATTRIBUTE_UNUSED,
if (regno >= FIRST_PSEUDO_REGISTER)
{
- allocno_t a = ira_curr_regno_allocno_map[regno];
+ ira_allocno_t a = ira_curr_regno_allocno_map[regno];
if (a != NULL)
{
@@ -261,7 +265,7 @@ mark_reg_store (rtx reg, const_rtx setter ATTRIBUTE_UNUSED,
}
make_regno_born (regno);
}
- else if (! TEST_HARD_REG_BIT (no_alloc_regs, regno))
+ else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
int last = regno + hard_regno_nregs[regno][GET_MODE (reg)];
enum reg_class cover_class;
@@ -271,13 +275,13 @@ mark_reg_store (rtx reg, const_rtx setter ATTRIBUTE_UNUSED,
if (! TEST_HARD_REG_BIT (hard_regs_live, regno)
&& ! TEST_HARD_REG_BIT (eliminable_regset, regno))
{
- cover_class = class_translate[REGNO_REG_CLASS (regno)];
+ cover_class = ira_class_translate[REGNO_REG_CLASS (regno)];
if (cover_class != NO_REGS)
{
curr_reg_pressure[cover_class]++;
if (high_pressure_start_point[cover_class] < 0
&& (curr_reg_pressure[cover_class]
- > available_class_regs[cover_class]))
+ > ira_available_class_regs[cover_class]))
high_pressure_start_point[cover_class] = curr_point;
}
make_regno_born (regno);
@@ -319,7 +323,7 @@ mark_reg_conflicts (rtx reg)
if (regno >= FIRST_PSEUDO_REGISTER)
make_regno_born_and_dead (regno);
- else if (! TEST_HARD_REG_BIT (no_alloc_regs, regno))
+ else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
int last = regno + hard_regno_nregs[regno][GET_MODE (reg)];
@@ -342,7 +346,7 @@ mark_reg_death (rtx reg)
if (regno >= FIRST_PSEUDO_REGISTER)
{
- allocno_t a = ira_curr_regno_allocno_map[regno];
+ ira_allocno_t a = ira_curr_regno_allocno_map[regno];
if (a != NULL)
{
@@ -352,7 +356,7 @@ mark_reg_death (rtx reg)
}
make_regno_dead (regno);
}
- else if (! TEST_HARD_REG_BIT (no_alloc_regs, regno))
+ else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
int last = regno + hard_regno_nregs[regno][GET_MODE (reg)];
enum reg_class cover_class;
@@ -361,17 +365,17 @@ mark_reg_death (rtx reg)
{
if (TEST_HARD_REG_BIT (hard_regs_live, regno))
{
- cover_class = class_translate[REGNO_REG_CLASS (regno)];
+ cover_class = ira_class_translate[REGNO_REG_CLASS (regno)];
if (cover_class != NO_REGS)
{
curr_reg_pressure[cover_class]--;
if (high_pressure_start_point[cover_class] >= 0
&& (curr_reg_pressure[cover_class]
- <= available_class_regs[cover_class]))
+ <= ira_available_class_regs[cover_class]))
{
EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
{
- update_allocno_pressure_excess_length (allocnos[i]);
+ update_allocno_pressure_excess_length (ira_allocnos[i]);
}
high_pressure_start_point[cover_class] = -1;
}
@@ -494,7 +498,7 @@ single_reg_class (const char *constraints, rtx op, rtx equiv_const)
? GENERAL_REGS
: REG_CLASS_FROM_CONSTRAINT (c, constraints));
if ((cl != NO_REGS && next_cl != cl)
- || available_class_regs[next_cl] > 1)
+ || ira_available_class_regs[next_cl] > 1)
return NO_REGS;
cl = next_cl;
break;
@@ -505,7 +509,7 @@ single_reg_class (const char *constraints, rtx op, rtx equiv_const)
= single_reg_class (recog_data.constraints[c - '0'],
recog_data.operand[c - '0'], NULL_RTX);
if ((cl != NO_REGS && next_cl != cl) || next_cl == NO_REGS
- || available_class_regs[next_cl] > 1)
+ || ira_available_class_regs[next_cl] > 1)
return NO_REGS;
cl = next_cl;
break;
@@ -539,7 +543,7 @@ process_single_reg_class_operands (bool in_p, int freq)
unsigned int px;
enum reg_class cl, cover_class;
rtx operand;
- allocno_t operand_a, a;
+ ira_allocno_t operand_a, a;
for (i = 0; i < recog_data.n_operands; i++)
{
@@ -568,26 +572,28 @@ process_single_reg_class_operands (bool in_p, int freq)
operand_a = ira_curr_regno_allocno_map[regno];
mode = ALLOCNO_MODE (operand_a);
cover_class = ALLOCNO_COVER_CLASS (operand_a);
- if (class_subset_p[cl][cover_class]
- && class_hard_regs_num[cl] != 0
- && class_hard_reg_index[cover_class][class_hard_regs[cl][0]] >= 0
+ if (ira_class_subset_p[cl][cover_class]
+ && ira_class_hard_regs_num[cl] != 0
+ && (ira_class_hard_reg_index[cover_class]
+ [ira_class_hard_regs[cl][0]]) >= 0
&& reg_class_size[cl] <= (unsigned) CLASS_MAX_NREGS (cl, mode))
{
/* ??? FREQ */
cost = freq * (in_p
- ? register_move_cost[mode][cover_class][cl]
- : register_move_cost[mode][cl][cover_class]);
- allocate_and_set_costs
+ ? ira_register_move_cost[mode][cover_class][cl]
+ : ira_register_move_cost[mode][cl][cover_class]);
+ ira_allocate_and_set_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a), cover_class, 0);
ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a)
- [class_hard_reg_index[cover_class][class_hard_regs[cl][0]]]
+ [ira_class_hard_reg_index
+ [cover_class][ira_class_hard_regs[cl][0]]]
-= cost;
}
}
EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, px)
{
- a = allocnos[px];
+ a = ira_allocnos[px];
cover_class = ALLOCNO_COVER_CLASS (a);
if (a != operand_a)
{
@@ -596,7 +602,7 @@ process_single_reg_class_operands (bool in_p, int freq)
because it will be spilled in reload in anyway. */
IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a),
reg_class_contents[cl]);
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
+ IOR_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
reg_class_contents[cl]);
}
}
@@ -608,7 +614,7 @@ process_single_reg_class_operands (bool in_p, int freq)
intersected calls, and register pressure info for allocnos for the
basic block for and regions containing the basic block. */
static void
-process_bb_node_lives (loop_tree_node_t loop_tree_node)
+process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
{
int i, index;
unsigned int j;
@@ -623,17 +629,17 @@ process_bb_node_lives (loop_tree_node_t loop_tree_node)
bb = loop_tree_node->bb;
if (bb != NULL)
{
- for (i = 0; i < reg_class_cover_size; i++)
+ for (i = 0; i < ira_reg_class_cover_size; i++)
{
- curr_reg_pressure[reg_class_cover[i]] = 0;
- high_pressure_start_point[reg_class_cover[i]] = -1;
+ curr_reg_pressure[ira_reg_class_cover[i]] = 0;
+ high_pressure_start_point[ira_reg_class_cover[i]] = -1;
}
curr_bb_node = loop_tree_node;
reg_live_in = DF_LR_IN (bb);
sparseset_clear (allocnos_live);
REG_SET_TO_HARD_REG_SET (hard_regs_live, reg_live_in);
AND_COMPL_HARD_REG_SET (hard_regs_live, eliminable_regset);
- AND_COMPL_HARD_REG_SET (hard_regs_live, no_alloc_regs);
+ AND_COMPL_HARD_REG_SET (hard_regs_live, ira_no_alloc_regs);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (hard_regs_live, i))
{
@@ -642,18 +648,18 @@ process_bb_node_lives (loop_tree_node_t loop_tree_node)
cover_class = REGNO_REG_CLASS (i);
if (cover_class == NO_REGS)
continue;
- cover_class = class_translate[cover_class];
+ cover_class = ira_class_translate[cover_class];
curr_reg_pressure[cover_class]++;
if (curr_bb_node->reg_pressure[cover_class]
< curr_reg_pressure[cover_class])
curr_bb_node->reg_pressure[cover_class]
= curr_reg_pressure[cover_class];
ira_assert (curr_reg_pressure[cover_class]
- <= available_class_regs[cover_class]);
+ <= ira_available_class_regs[cover_class]);
}
EXECUTE_IF_SET_IN_BITMAP (reg_live_in, FIRST_PSEUDO_REGISTER, j, bi)
{
- allocno_t a = ira_curr_regno_allocno_map[j];
+ ira_allocno_t a = ira_curr_regno_allocno_map[j];
if (a == NULL)
continue;
@@ -690,8 +696,8 @@ process_bb_node_lives (loop_tree_node_t loop_tree_node)
#ifdef STACK_REGS
EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, px)
{
- ALLOCNO_NO_STACK_REG_P (allocnos[px]) = true;
- ALLOCNO_TOTAL_NO_STACK_REG_P (allocnos[px]) = true;
+ ALLOCNO_NO_STACK_REG_P (ira_allocnos[px]) = true;
+ IRA_ALLOCNO_TOTAL_NO_STACK_REG_P (ira_allocnos[px]) = true;
}
for (px = FIRST_STACK_REG; px <= LAST_STACK_REG; px++)
make_regno_born_and_dead (px);
@@ -749,10 +755,10 @@ process_bb_node_lives (loop_tree_node_t loop_tree_node)
IOR_HARD_REG_SET (crtl->emit.call_used_regs, clobbered_regs);
EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
{
- allocno_t a = allocnos[i];
+ ira_allocno_t a = ira_allocnos[i];
ALLOCNO_CALL_FREQ (a) += freq;
- index = add_regno_call (ALLOCNO_REGNO (a), insn);
+ index = ira_add_regno_call (ALLOCNO_REGNO (a), insn);
if (ALLOCNO_CALLS_CROSSED_START (a) < 0)
ALLOCNO_CALLS_CROSSED_START (a) = index;
ALLOCNO_CALLS_CROSSED_NUM (a)++;
@@ -761,7 +767,7 @@ process_bb_node_lives (loop_tree_node_t loop_tree_node)
if (cfun->has_nonlocal_label)
{
SET_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a));
- SET_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
+ SET_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
}
}
}
@@ -825,7 +831,7 @@ process_bb_node_lives (loop_tree_node_t loop_tree_node)
}
EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
{
- make_regno_dead (ALLOCNO_REGNO (allocnos[i]));
+ make_regno_dead (ALLOCNO_REGNO (ira_allocnos[i]));
}
curr_point++;
@@ -833,11 +839,11 @@ process_bb_node_lives (loop_tree_node_t loop_tree_node)
}
/* Propagate register pressure to upper loop tree nodes: */
if (loop_tree_node != ira_loop_tree_root)
- for (i = 0; i < reg_class_cover_size; i++)
+ for (i = 0; i < ira_reg_class_cover_size; i++)
{
enum reg_class cover_class;
- cover_class = reg_class_cover[i];
+ cover_class = ira_reg_class_cover[i];
if (loop_tree_node->reg_pressure[cover_class]
> loop_tree_node->parent->reg_pressure[cover_class])
loop_tree_node->parent->reg_pressure[cover_class]
@@ -845,46 +851,49 @@ process_bb_node_lives (loop_tree_node_t loop_tree_node)
}
}
-/* Create and set up START_POINT_RANGES and FINISH_POINT_RANGES. */
+/* Create and set up IRA_START_POINT_RANGES and
+ IRA_FINISH_POINT_RANGES. */
static void
create_start_finish_chains (void)
{
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
allocno_live_range_t r;
- start_point_ranges
- = ira_allocate (max_point * sizeof (allocno_live_range_t));
- memset (start_point_ranges, 0, max_point * sizeof (allocno_live_range_t));
- finish_point_ranges
- = ira_allocate (max_point * sizeof (allocno_live_range_t));
- memset (finish_point_ranges, 0, max_point * sizeof (allocno_live_range_t));
+ ira_start_point_ranges
+ = ira_allocate (ira_max_point * sizeof (allocno_live_range_t));
+ memset (ira_start_point_ranges, 0,
+ ira_max_point * sizeof (allocno_live_range_t));
+ ira_finish_point_ranges
+ = ira_allocate (ira_max_point * sizeof (allocno_live_range_t));
+ memset (ira_finish_point_ranges, 0,
+ ira_max_point * sizeof (allocno_live_range_t));
FOR_EACH_ALLOCNO (a, ai)
{
for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
{
- r->start_next = start_point_ranges[r->start];
- start_point_ranges[r->start] = r;
- r->finish_next = finish_point_ranges[r->finish];
- finish_point_ranges[r->finish] = r;
+ r->start_next = ira_start_point_ranges[r->start];
+ ira_start_point_ranges[r->start] = r;
+ r->finish_next = ira_finish_point_ranges[r->finish];
+ ira_finish_point_ranges[r->finish] = r;
}
}
}
-/* Rebuild START_POINT_RANGES and FINISH_POINT_RANGES after new live
- ranges and program points were added as a result if new insn
- generation. */
+/* Rebuild IRA_START_POINT_RANGES and IRA_FINISH_POINT_RANGES after
+ new live ranges and program points were added as a result if new
+ insn generation. */
void
-rebuild_start_finish_chains (void)
+ira_rebuild_start_finish_chains (void)
{
- ira_free (finish_point_ranges);
- ira_free (start_point_ranges);
+ ira_free (ira_finish_point_ranges);
+ ira_free (ira_start_point_ranges);
create_start_finish_chains ();
}
/* Print live ranges R to file F. */
void
-print_live_range_list (FILE *f, allocno_live_range_t r)
+ira_print_live_range_list (FILE *f, allocno_live_range_t r)
{
for (; r != NULL; r = r->next)
fprintf (f, " [%d..%d]", r->start, r->finish);
@@ -893,22 +902,22 @@ print_live_range_list (FILE *f, allocno_live_range_t r)
/* Print live ranges R to stderr. */
void
-debug_live_range_list (allocno_live_range_t r)
+ira_debug_live_range_list (allocno_live_range_t r)
{
- print_live_range_list (stderr, r);
+ ira_print_live_range_list (stderr, r);
}
/* Print live ranges of allocno A to file F. */
static void
-print_allocno_live_ranges (FILE *f, allocno_t a)
+print_allocno_live_ranges (FILE *f, ira_allocno_t a)
{
fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
- print_live_range_list (f, ALLOCNO_LIVE_RANGES (a));
+ ira_print_live_range_list (f, ALLOCNO_LIVE_RANGES (a));
}
/* Print live ranges of allocno A to stderr. */
void
-debug_allocno_live_ranges (allocno_t a)
+ira_debug_allocno_live_ranges (ira_allocno_t a)
{
print_allocno_live_ranges (stderr, a);
}
@@ -917,8 +926,8 @@ debug_allocno_live_ranges (allocno_t a)
static void
print_live_ranges (FILE *f)
{
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
FOR_EACH_ALLOCNO (a, ai)
print_allocno_live_ranges (f, a);
@@ -926,7 +935,7 @@ print_live_ranges (FILE *f)
/* Print live ranges of all allocnos to stderr. */
void
-debug_live_ranges (void)
+ira_debug_live_ranges (void)
{
print_live_ranges (stderr);
}
@@ -938,11 +947,11 @@ debug_live_ranges (void)
The new info means allocno info finally calculated in this
file. */
static void
-propagate_new_allocno_info (allocno_t a)
+propagate_new_allocno_info (ira_allocno_t a)
{
int regno;
- allocno_t parent_a;
- loop_tree_node_t parent;
+ ira_allocno_t parent_a;
+ ira_loop_tree_node_t parent;
regno = ALLOCNO_REGNO (a);
if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
@@ -950,11 +959,11 @@ propagate_new_allocno_info (allocno_t a)
{
ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
#ifdef STACK_REGS
- if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
- ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a) = true;
+ if (IRA_ALLOCNO_TOTAL_NO_STACK_REG_P (a))
+ IRA_ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a) = true;
#endif
- IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (parent_a),
- ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
+ IOR_HARD_REG_SET (IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (parent_a),
+ IRA_ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
if (ALLOCNO_CALLS_CROSSED_START (parent_a) < 0
|| (ALLOCNO_CALLS_CROSSED_START (a) >= 0
&& (ALLOCNO_CALLS_CROSSED_START (parent_a)
@@ -973,10 +982,10 @@ static void
propagate_new_info (void)
{
int i;
- allocno_t a;
+ ira_allocno_t a;
for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
- for (a = regno_allocno_map[i];
+ for (a = ira_regno_allocno_map[i];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
propagate_new_allocno_info (a);
@@ -986,15 +995,16 @@ propagate_new_info (void)
CONFLICT_HARD_REGS and TOTAL_CONFLICT_HARD_REGS for allocnos, and
calculate register pressure info. */
void
-create_allocno_live_ranges (void)
+ira_create_allocno_live_ranges (void)
{
- allocnos_live = sparseset_alloc (allocnos_num);
+ allocnos_live = sparseset_alloc (ira_allocnos_num);
/* Make a vector that mark_reg_{store,clobber} will store in. */
if (!regs_set)
regs_set = VEC_alloc (rtx, heap, 10);
curr_point = 0;
- traverse_loop_tree (true, ira_loop_tree_root, NULL, process_bb_node_lives);
- max_point = curr_point;
+ ira_traverse_loop_tree (true, ira_loop_tree_root, NULL,
+ process_bb_node_lives);
+ ira_max_point = curr_point;
create_start_finish_chains ();
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
print_live_ranges (ira_dump_file);
@@ -1003,10 +1013,10 @@ create_allocno_live_ranges (void)
sparseset_free (allocnos_live);
}
-/* Free arrays START_POINT_RANGES and FINISH_POINT_RANGES. */
+/* Free arrays IRA_START_POINT_RANGES and IRA_FINISH_POINT_RANGES. */
void
-finish_allocno_live_ranges (void)
+ira_finish_allocno_live_ranges (void)
{
- ira_free (finish_point_ranges);
- ira_free (start_point_ranges);
+ ira_free (ira_finish_point_ranges);
+ ira_free (ira_start_point_ranges);
}
diff --git a/gcc/ira.c b/gcc/ira.c
index 61ac669886d..6f0b98d7fb6 100644
--- a/gcc/ira.c
+++ b/gcc/ira.c
@@ -245,7 +245,7 @@ along with GCC; see the file COPYING3. If not see
o After IR flattening, IRA tries to assign hard registers to all
spilled allocnos. This is impelemented by a simple and fast
priority coloring algorithm (see function
- reassign_conflict_allocnos::ira-color.c). Here new allocnos
+ ira_reassign_conflict_allocnos::ira-color.c). Here new allocnos
created during the code change pass can be assigned to hard
registers.
@@ -333,53 +333,53 @@ FILE *ira_dump_file;
alloc_pool allocno_pool, copy_pool, allocno_live_range_pool;
/* The number of elements in the following array. */
-int spilled_reg_stack_slots_num;
+int ira_spilled_reg_stack_slots_num;
/* The following array contains info about spilled pseudo-registers
stack slots used in current function so far. */
-struct spilled_reg_stack_slot *spilled_reg_stack_slots;
+struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
/* Correspondingly overall cost of the allocation, cost of the
allocnos assigned to hard-registers, cost of the allocnos assigned
to memory, cost of loads, stores and register move insns generated
for pseudo-register live range splitting (see ira-emit.c). */
-int overall_cost;
-int reg_cost, mem_cost;
-int load_cost, store_cost, shuffle_cost;
-int move_loops_num, additional_jumps_num;
+int ira_overall_cost;
+int ira_reg_cost, ira_mem_cost;
+int ira_load_cost, ira_store_cost, ira_shuffle_cost;
+int ira_move_loops_num, ira_additional_jumps_num;
/* Map: hard regs X modes -> set of hard registers for storing value
of given mode starting with given hard register. */
-HARD_REG_SET reg_mode_hard_regset[FIRST_PSEUDO_REGISTER][NUM_MACHINE_MODES];
+HARD_REG_SET ira_reg_mode_hard_regset[FIRST_PSEUDO_REGISTER][NUM_MACHINE_MODES];
/* The following two variables are array analogs of the macros
MEMORY_MOVE_COST and REGISTER_MOVE_COST. */
-short int memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
-move_table *register_move_cost[MAX_MACHINE_MODE];
+short int ira_memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
+move_table *ira_register_move_cost[MAX_MACHINE_MODE];
/* Similar to may_move_in_cost but it is calculated in IRA instead of
regclass. Another difference is that we take only available hard
registers into account to figure out that one register class is a
subset of the another one. */
-move_table *register_may_move_in_cost[MAX_MACHINE_MODE];
+move_table *ira_may_move_in_cost[MAX_MACHINE_MODE];
/* Similar to may_move_out_cost but it is calculated in IRA instead of
regclass. Another difference is that we take only available hard
registers into account to figure out that one register class is a
subset of the another one. */
-move_table *register_may_move_out_cost[MAX_MACHINE_MODE];
+move_table *ira_may_move_out_cost[MAX_MACHINE_MODE];
/* Register class subset relation: TRUE if the first class is a subset
of the second one considering only hard registers available for the
allocation. */
-int class_subset_p[N_REG_CLASSES][N_REG_CLASSES];
+int ira_class_subset_p[N_REG_CLASSES][N_REG_CLASSES];
/* Temporary hard reg set used for a different calculation. */
static HARD_REG_SET temp_hard_regset;
-/* The function sets up the map REG_MODE_HARD_REGSET. */
+/* The function sets up the map IRA_REG_MODE_HARD_REGSET. */
static void
setup_reg_mode_hard_regset (void)
{
@@ -388,10 +388,10 @@ setup_reg_mode_hard_regset (void)
for (m = 0; m < NUM_MACHINE_MODES; m++)
for (hard_regno = 0; hard_regno < FIRST_PSEUDO_REGISTER; hard_regno++)
{
- CLEAR_HARD_REG_SET (reg_mode_hard_regset[hard_regno][m]);
+ CLEAR_HARD_REG_SET (ira_reg_mode_hard_regset[hard_regno][m]);
for (i = hard_regno_nregs[hard_regno][m] - 1; i >= 0; i--)
if (hard_regno + i < FIRST_PSEUDO_REGISTER)
- SET_HARD_REG_BIT (reg_mode_hard_regset[hard_regno][m],
+ SET_HARD_REG_BIT (ira_reg_mode_hard_regset[hard_regno][m],
hard_regno + i);
}
}
@@ -405,17 +405,17 @@ static HARD_REG_SET no_unit_alloc_regs;
/* Array of the number of hard registers of given class which are
available for allocation. The order is defined by the
allocation order. */
-short class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
+short ira_class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
/* The number of elements of the above array for given register
class. */
-int class_hard_regs_num[N_REG_CLASSES];
+int ira_class_hard_regs_num[N_REG_CLASSES];
-/* Index (in class_hard_regs) for given register class and hard
+/* Index (in ira_class_hard_regs) for given register class and hard
register (in general case a hard register can belong to several
register classes). The index is negative for hard registers
unavailable for the allocation. */
-short class_hard_reg_index[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
+short ira_class_hard_reg_index[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
/* The function sets up the three arrays declared above. */
static void
@@ -444,35 +444,35 @@ setup_class_hard_regs (void)
continue;
SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno);
if (! TEST_HARD_REG_BIT (temp_hard_regset, hard_regno))
- class_hard_reg_index[cl][hard_regno] = -1;
+ ira_class_hard_reg_index[cl][hard_regno] = -1;
else
{
- class_hard_reg_index[cl][hard_regno] = n;
- class_hard_regs[cl][n++] = hard_regno;
+ ira_class_hard_reg_index[cl][hard_regno] = n;
+ ira_class_hard_regs[cl][n++] = hard_regno;
}
}
- class_hard_regs_num[cl] = n;
+ ira_class_hard_regs_num[cl] = n;
}
}
/* Number of given class hard registers available for the register
allocation for given classes. */
-int available_class_regs[N_REG_CLASSES];
+int ira_available_class_regs[N_REG_CLASSES];
-/* Set up AVAILABLE_CLASS_REGS. */
+/* Set up IRA_AVAILABLE_CLASS_REGS. */
static void
setup_available_class_regs (void)
{
int i, j;
- memset (available_class_regs, 0, sizeof (available_class_regs));
+ memset (ira_available_class_regs, 0, sizeof (ira_available_class_regs));
for (i = 0; i < N_REG_CLASSES; i++)
{
COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
if (TEST_HARD_REG_BIT (temp_hard_regset, j))
- available_class_regs[i]++;
+ ira_available_class_regs[i]++;
}
}
@@ -491,7 +491,7 @@ setup_alloc_regs (bool use_hard_frame_p)
-/* Set up MEMORY_MOVE_COST, REGISTER_MOVE_COST. */
+/* Set up IRA_MEMORY_MOVE_COST, IRA_REGISTER_MOVE_COST. */
static void
setup_class_subset_and_memory_move_costs (void)
{
@@ -500,26 +500,26 @@ setup_class_subset_and_memory_move_costs (void)
HARD_REG_SET temp_hard_regset2;
for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
- memory_move_cost[mode][NO_REGS][0]
- = memory_move_cost[mode][NO_REGS][1] = SHRT_MAX;
+ ira_memory_move_cost[mode][NO_REGS][0]
+ = ira_memory_move_cost[mode][NO_REGS][1] = SHRT_MAX;
for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
{
if (cl != (int) NO_REGS)
for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
{
- memory_move_cost[mode][cl][0] = MEMORY_MOVE_COST (mode, cl, 0);
- memory_move_cost[mode][cl][1] = MEMORY_MOVE_COST (mode, cl, 1);
+ ira_memory_move_cost[mode][cl][0] = MEMORY_MOVE_COST (mode, cl, 0);
+ ira_memory_move_cost[mode][cl][1] = MEMORY_MOVE_COST (mode, cl, 1);
/* Costs for NO_REGS are used in cost calculation on the
1st pass when the preferred register classes are not
known yet. In this case we take the best scenario. */
- if (memory_move_cost[mode][NO_REGS][0]
- > memory_move_cost[mode][cl][0])
- memory_move_cost[mode][NO_REGS][0]
- = memory_move_cost[mode][cl][0];
- if (memory_move_cost[mode][NO_REGS][1]
- > memory_move_cost[mode][cl][1])
- memory_move_cost[mode][NO_REGS][1]
- = memory_move_cost[mode][cl][1];
+ if (ira_memory_move_cost[mode][NO_REGS][0]
+ > ira_memory_move_cost[mode][cl][0])
+ ira_memory_move_cost[mode][NO_REGS][0]
+ = ira_memory_move_cost[mode][cl][0];
+ if (ira_memory_move_cost[mode][NO_REGS][1]
+ > ira_memory_move_cost[mode][cl][1])
+ ira_memory_move_cost[mode][NO_REGS][1]
+ = ira_memory_move_cost[mode][cl][1];
}
for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--)
{
@@ -527,7 +527,7 @@ setup_class_subset_and_memory_move_costs (void)
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
- class_subset_p[cl][cl2]
+ ira_class_subset_p[cl][cl2]
= hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2);
}
}
@@ -607,16 +607,16 @@ ira_free_bitmap (bitmap b ATTRIBUTE_UNUSED)
/* Output information about allocation of all allocnos
into file F. */
void
-print_disposition (FILE *f)
+ira_print_disposition (FILE *f)
{
int i, n, max_regno;
- allocno_t a;
+ ira_allocno_t a;
basic_block bb;
fprintf (f, "Disposition:");
max_regno = max_reg_num ();
for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
- for (a = regno_allocno_map[i];
+ for (a = ira_regno_allocno_map[i];
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
@@ -639,9 +639,9 @@ print_disposition (FILE *f)
/* Outputs information about allocation of all allocnos into
stderr. */
void
-debug_disposition (void)
+ira_debug_disposition (void)
{
- print_disposition (stderr);
+ ira_print_disposition (stderr);
}
@@ -669,7 +669,7 @@ setup_reg_subclasses (void)
COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
- if (hard_reg_set_equal_p (temp_hard_regset, zero_hard_reg_set))
+ if (hard_reg_set_equal_p (temp_hard_regset, ira_zero_hard_reg_set))
continue;
for (j = 0; j < N_REG_CLASSES; j++)
if (i != j)
@@ -693,25 +693,25 @@ setup_reg_subclasses (void)
/* Number of cover classes. Cover classes is non-intersected register
classes containing all hard-registers available for the
allocation. */
-int reg_class_cover_size;
+int ira_reg_class_cover_size;
/* The array containing cover classes (see also comments for macro
- IRA_COVER_CLASSES). Only first REG_CLASS_COVER_SIZE elements are
+ IRA_COVER_CLASSES). Only first IRA_REG_CLASS_COVER_SIZE elements are
used for this. */
-enum reg_class reg_class_cover[N_REG_CLASSES];
+enum reg_class ira_reg_class_cover[N_REG_CLASSES];
/* The number of elements in the subsequent array. */
-int important_classes_num;
+int ira_important_classes_num;
/* The array containing non-empty classes (including non-empty cover
classes) which are subclasses of cover classes. Such classes is
important for calculation of the hard register usage costs. */
-enum reg_class important_classes[N_REG_CLASSES];
+enum reg_class ira_important_classes[N_REG_CLASSES];
/* The array containing indexes of important classes in the previous
array. The array elements are defined only for important
classes. */
-int important_class_nums[N_REG_CLASSES];
+int ira_important_class_nums[N_REG_CLASSES];
#ifdef IRA_COVER_CLASSES
@@ -725,7 +725,7 @@ setup_cover_and_important_classes (void)
static enum reg_class classes[] = IRA_COVER_CLASSES;
HARD_REG_SET temp_hard_regset2;
- reg_class_cover_size = 0;
+ ira_reg_class_cover_size = 0;
for (i = 0; (cl = classes[i]) != LIM_REG_CLASSES; i++)
{
for (j = 0; j < i; j++)
@@ -733,29 +733,29 @@ setup_cover_and_important_classes (void)
gcc_unreachable ();
COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
- if (! hard_reg_set_equal_p (temp_hard_regset, zero_hard_reg_set))
- reg_class_cover[reg_class_cover_size++] = cl;
+ if (! hard_reg_set_equal_p (temp_hard_regset, ira_zero_hard_reg_set))
+ ira_reg_class_cover[ira_reg_class_cover_size++] = cl;
}
- important_classes_num = 0;
+ ira_important_classes_num = 0;
for (cl = 0; cl < N_REG_CLASSES; cl++)
{
COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
- if (! hard_reg_set_equal_p (temp_hard_regset, zero_hard_reg_set))
- for (j = 0; j < reg_class_cover_size; j++)
+ if (! hard_reg_set_equal_p (temp_hard_regset, ira_zero_hard_reg_set))
+ for (j = 0; j < ira_reg_class_cover_size; j++)
{
COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
COPY_HARD_REG_SET (temp_hard_regset2,
- reg_class_contents[reg_class_cover[j]]);
+ reg_class_contents[ira_reg_class_cover[j]]);
AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
- if (cl == reg_class_cover[j]
+ if (cl == ira_reg_class_cover[j]
|| (hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2)
&& ! hard_reg_set_equal_p (temp_hard_regset,
temp_hard_regset2)))
{
- important_class_nums[cl] = important_classes_num;
- important_classes[important_classes_num++] = cl;
+ ira_important_class_nums[cl] = ira_important_classes_num;
+ ira_important_classes[ira_important_classes_num++] = cl;
}
}
}
@@ -765,11 +765,11 @@ setup_cover_and_important_classes (void)
/* Map of all register classes to corresponding cover class containing
the given class. If given class is not a subset of a cover class,
we translate it into the cheapest cover class. */
-enum reg_class class_translate[N_REG_CLASSES];
+enum reg_class ira_class_translate[N_REG_CLASSES];
#ifdef IRA_COVER_CLASSES
-/* Set up array CLASS_TRANSLATE. */
+/* Set up array IRA_CLASS_TRANSLATE. */
static void
setup_class_translate (void)
{
@@ -778,52 +778,52 @@ setup_class_translate (void)
int i, cost, min_cost, best_cost;
for (cl = 0; cl < N_REG_CLASSES; cl++)
- class_translate[cl] = NO_REGS;
- for (i = 0; i < reg_class_cover_size; i++)
+ ira_class_translate[cl] = NO_REGS;
+ for (i = 0; i < ira_reg_class_cover_size; i++)
{
- cover_class = reg_class_cover[i];
+ cover_class = ira_reg_class_cover[i];
for (cl_ptr = &alloc_reg_class_subclasses[cover_class][0];
(cl = *cl_ptr) != LIM_REG_CLASSES;
cl_ptr++)
{
- if (class_translate[cl] == NO_REGS)
- class_translate[cl] = cover_class;
+ if (ira_class_translate[cl] == NO_REGS)
+ ira_class_translate[cl] = cover_class;
#ifdef ENABLE_IRA_CHECKING
else
{
COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
if (! hard_reg_set_subset_p (temp_hard_regset,
- zero_hard_reg_set))
+ ira_zero_hard_reg_set))
gcc_unreachable ();
}
#endif
}
- class_translate[cover_class] = cover_class;
+ ira_class_translate[cover_class] = cover_class;
}
/* For classes which are not fully covered by a cover class (in
other words covered by more one cover class), use the cheapest
cover class. */
for (cl = 0; cl < N_REG_CLASSES; cl++)
{
- if (cl == NO_REGS || class_translate[cl] != NO_REGS)
+ if (cl == NO_REGS || ira_class_translate[cl] != NO_REGS)
continue;
best_class = NO_REGS;
best_cost = INT_MAX;
- for (i = 0; i < reg_class_cover_size; i++)
+ for (i = 0; i < ira_reg_class_cover_size; i++)
{
- cover_class = reg_class_cover[i];
+ cover_class = ira_reg_class_cover[i];
COPY_HARD_REG_SET (temp_hard_regset,
reg_class_contents[cover_class]);
AND_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
- if (! hard_reg_set_equal_p (temp_hard_regset, zero_hard_reg_set))
+ if (! hard_reg_set_equal_p (temp_hard_regset, ira_zero_hard_reg_set))
{
min_cost = INT_MAX;
for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
{
- cost = (memory_move_cost[mode][cl][0]
- + memory_move_cost[mode][cl][1]);
+ cost = (ira_memory_move_cost[mode][cl][0]
+ + ira_memory_move_cost[mode][cl][1]);
if (min_cost > cost)
min_cost = cost;
}
@@ -834,7 +834,7 @@ setup_class_translate (void)
}
}
}
- class_translate[cl] = best_class;
+ ira_class_translate[cl] = best_class;
}
}
#endif
@@ -845,7 +845,7 @@ setup_class_translate (void)
contain no hard registers available for allocation, the value is
calculated by taking all hard-registers including fixed ones into
account. */
-enum reg_class reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
+enum reg_class ira_reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
/* The biggest important reg_class inside of union of the two
reg_classes (that is calculated taking only hard registers
@@ -854,11 +854,11 @@ enum reg_class reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
calculated by taking all hard-registers including fixed ones into
account. In other words, the value is the corresponding
reg_class_subunion value. */
-enum reg_class reg_class_union[N_REG_CLASSES][N_REG_CLASSES];
+enum reg_class ira_reg_class_union[N_REG_CLASSES][N_REG_CLASSES];
#ifdef IRA_COVER_CLASSES
-/* Set up REG_CLASS_INTERSECT and REG_CLASS_UNION. */
+/* Set up IRA_REG_CLASS_INTERSECT and IRA_REG_CLASS_UNION. */
static void
setup_reg_class_intersect_union (void)
{
@@ -869,54 +869,56 @@ setup_reg_class_intersect_union (void)
{
for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
{
- reg_class_intersect[cl1][cl2] = NO_REGS;
+ ira_reg_class_intersect[cl1][cl2] = NO_REGS;
COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl1]);
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
COPY_HARD_REG_SET (temp_set2, reg_class_contents[cl2]);
AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
- if (hard_reg_set_equal_p (temp_hard_regset, zero_hard_reg_set)
- && hard_reg_set_equal_p (temp_set2, zero_hard_reg_set))
+ if (hard_reg_set_equal_p (temp_hard_regset, ira_zero_hard_reg_set)
+ && hard_reg_set_equal_p (temp_set2, ira_zero_hard_reg_set))
{
for (i = 0;; i++)
{
cl3 = reg_class_subclasses[cl1][i];
if (cl3 == LIM_REG_CLASSES)
break;
- if (reg_class_subset_p (reg_class_intersect[cl1][cl2], cl3))
- reg_class_intersect[cl1][cl2] = cl3;
+ if (reg_class_subset_p (ira_reg_class_intersect[cl1][cl2],
+ cl3))
+ ira_reg_class_intersect[cl1][cl2] = cl3;
}
- reg_class_union[cl1][cl2] = reg_class_subunion[cl1][cl2];
+ ira_reg_class_union[cl1][cl2] = reg_class_subunion[cl1][cl2];
continue;
}
- reg_class_union[cl1][cl2] = NO_REGS;
+ ira_reg_class_union[cl1][cl2] = NO_REGS;
COPY_HARD_REG_SET (intersection_set, reg_class_contents[cl1]);
AND_HARD_REG_SET (intersection_set, reg_class_contents[cl2]);
AND_COMPL_HARD_REG_SET (intersection_set, no_unit_alloc_regs);
COPY_HARD_REG_SET (union_set, reg_class_contents[cl1]);
IOR_HARD_REG_SET (union_set, reg_class_contents[cl2]);
AND_COMPL_HARD_REG_SET (union_set, no_unit_alloc_regs);
- for (i = 0; i < important_classes_num; i++)
+ for (i = 0; i < ira_important_classes_num; i++)
{
- cl3 = important_classes[i];
+ cl3 = ira_important_classes[i];
COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl3]);
AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
if (hard_reg_set_subset_p (temp_hard_regset, intersection_set))
{
COPY_HARD_REG_SET
(temp_set2,
- reg_class_contents[(int) reg_class_intersect[cl1][cl2]]);
+ reg_class_contents[(int)
+ ira_reg_class_intersect[cl1][cl2]]);
AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2))
- reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
+ ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
}
if (hard_reg_set_subset_p (temp_hard_regset, union_set))
{
COPY_HARD_REG_SET
(temp_set2,
- reg_class_contents[(int) reg_class_union[cl1][cl2]]);
+ reg_class_contents[(int) ira_reg_class_union[cl1][cl2]]);
AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
if (hard_reg_set_subset_p (temp_set2, temp_hard_regset))
- reg_class_union[cl1][cl2] = (enum reg_class) cl3;
+ ira_reg_class_union[cl1][cl2] = (enum reg_class) cl3;
}
}
}
@@ -933,18 +935,18 @@ print_class_cover (FILE *f)
int i;
fprintf (f, "Class cover:\n");
- for (i = 0; i < reg_class_cover_size; i++)
- fprintf (f, " %s", reg_class_names[reg_class_cover[i]]);
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ fprintf (f, " %s", reg_class_names[ira_reg_class_cover[i]]);
fprintf (f, "\nClass translation:\n");
for (i = 0; i < N_REG_CLASSES; i++)
fprintf (f, " %s -> %s\n", reg_class_names[i],
- reg_class_names[class_translate[i]]);
+ reg_class_names[ira_class_translate[i]]);
}
/* Output all cover classes and the translation map into
stderr. */
void
-debug_class_cover (void)
+ira_debug_class_cover (void)
{
print_class_cover (stderr);
}
@@ -967,25 +969,25 @@ find_reg_class_closure (void)
/* Map: register class x machine mode -> number of hard registers of
given class needed to store value of given mode. If the number is
different, the size will be negative. */
-int reg_class_nregs[N_REG_CLASSES][MAX_MACHINE_MODE];
+int ira_reg_class_nregs[N_REG_CLASSES][MAX_MACHINE_MODE];
/* Maximal value of the previous array elements. */
-int max_nregs;
+int ira_max_nregs;
-/* Form REG_CLASS_NREGS map. */
+/* Form IRA_REG_CLASS_NREGS map. */
static void
setup_reg_class_nregs (void)
{
int m;
enum reg_class cl;
- max_nregs = -1;
+ ira_max_nregs = -1;
for (cl = 0; cl < N_REG_CLASSES; cl++)
for (m = 0; m < MAX_MACHINE_MODE; m++)
{
- reg_class_nregs[cl][m] = CLASS_MAX_NREGS (cl, m);
- if (max_nregs < reg_class_nregs[cl][m])
- max_nregs = reg_class_nregs[cl][m];
+ ira_reg_class_nregs[cl][m] = CLASS_MAX_NREGS (cl, m);
+ if (ira_max_nregs < ira_reg_class_nregs[cl][m])
+ ira_max_nregs = ira_reg_class_nregs[cl][m];
}
}
@@ -1003,15 +1005,15 @@ setup_prohibited_class_mode_regs (void)
int i, j, k, hard_regno;
enum reg_class cl;
- for (i = 0; i < reg_class_cover_size; i++)
+ for (i = 0; i < ira_reg_class_cover_size; i++)
{
- cl = reg_class_cover[i];
+ cl = ira_reg_class_cover[i];
for (j = 0; j < NUM_MACHINE_MODES; j++)
{
CLEAR_HARD_REG_SET (prohibited_class_mode_regs[cl][j]);
- for (k = class_hard_regs_num[cl] - 1; k >= 0; k--)
+ for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
{
- hard_regno = class_hard_regs[cl][k];
+ hard_regno = ira_class_hard_regs[cl][k];
if (! HARD_REGNO_MODE_OK (hard_regno, j))
SET_HARD_REG_BIT (prohibited_class_mode_regs[cl][j],
hard_regno);
@@ -1022,38 +1024,38 @@ setup_prohibited_class_mode_regs (void)
-/* Allocate and initialize REGISTER_MOVE_COST,
- REGISTER_MAY_MOVE_IN_COST, and REGISTER_MAY_MOVE_OUT_COST for MODE
- if it is not done yet. */
+/* Allocate and initialize IRA_REGISTER_MOVE_COST,
+ IRA_MAY_MOVE_IN_COST, and IRA_MAY_MOVE_OUT_COST for MODE if it is
+ not done yet. */
void
-init_register_move_cost (enum machine_mode mode)
+ira_init_register_move_cost (enum machine_mode mode)
{
int cl1, cl2;
- ira_assert (register_move_cost[mode] == NULL
- && register_may_move_in_cost[mode] == NULL
- && register_may_move_out_cost[mode] == NULL);
+ ira_assert (ira_register_move_cost[mode] == NULL
+ && ira_may_move_in_cost[mode] == NULL
+ && ira_may_move_out_cost[mode] == NULL);
if (move_cost[mode] == NULL)
init_move_cost (mode);
- register_move_cost[mode] = move_cost[mode];
+ ira_register_move_cost[mode] = move_cost[mode];
/* Don't use ira_allocate because the tables exist out of scope of a
IRA call. */
- register_may_move_in_cost[mode]
+ ira_may_move_in_cost[mode]
= (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
- memcpy (register_may_move_in_cost[mode], may_move_in_cost[mode],
+ memcpy (ira_may_move_in_cost[mode], may_move_in_cost[mode],
sizeof (move_table) * N_REG_CLASSES);
- register_may_move_out_cost[mode]
+ ira_may_move_out_cost[mode]
= (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
- memcpy (register_may_move_out_cost[mode], may_move_out_cost[mode],
+ memcpy (ira_may_move_out_cost[mode], may_move_out_cost[mode],
sizeof (move_table) * N_REG_CLASSES);
for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
{
for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
{
- if (class_subset_p[cl1][cl2])
- register_may_move_in_cost[mode][cl1][cl2] = 0;
- if (class_subset_p[cl2][cl1])
- register_may_move_out_cost[mode][cl1][cl2] = 0;
+ if (ira_class_subset_p[cl1][cl2])
+ ira_may_move_in_cost[mode][cl1][cl2] = 0;
+ if (ira_class_subset_p[cl2][cl1])
+ ira_may_move_out_cost[mode][cl1][cl2] = 0;
}
}
}
@@ -1061,30 +1063,30 @@ init_register_move_cost (enum machine_mode mode)
/* Hard regsets whose all bits are correspondingly zero or one. */
-HARD_REG_SET zero_hard_reg_set;
-HARD_REG_SET one_hard_reg_set;
+HARD_REG_SET ira_zero_hard_reg_set;
+HARD_REG_SET ira_one_hard_reg_set;
/* This is called once during compiler work. It sets up
different arrays whose values don't depend on the compiled
function. */
void
-init_ira_once (void)
+ira_init_once (void)
{
enum machine_mode mode;
- CLEAR_HARD_REG_SET (zero_hard_reg_set);
- SET_HARD_REG_SET (one_hard_reg_set);
+ CLEAR_HARD_REG_SET (ira_zero_hard_reg_set);
+ SET_HARD_REG_SET (ira_one_hard_reg_set);
for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
{
- register_move_cost[mode] = NULL;
- register_may_move_in_cost[mode] = NULL;
- register_may_move_out_cost[mode] = NULL;
+ ira_register_move_cost[mode] = NULL;
+ ira_may_move_in_cost[mode] = NULL;
+ ira_may_move_out_cost[mode] = NULL;
}
- init_ira_costs_once ();
+ ira_init_costs_once ();
}
-/* Free register_move_cost, register_may_move_in_cost, and
- register_may_move_out_cost for each mode. */
+/* Free ira_register_move_cost, ira_may_move_in_cost, and
+ ira_may_move_out_cost for each mode. */
static void
free_register_move_costs (void)
{
@@ -1092,20 +1094,20 @@ free_register_move_costs (void)
for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
{
- if (register_may_move_in_cost[mode] != NULL)
- free (register_may_move_in_cost[mode]);
- if (register_may_move_out_cost[mode] != NULL)
- free (register_may_move_out_cost[mode]);
- register_move_cost[mode] = NULL;
- register_may_move_in_cost[mode] = NULL;
- register_may_move_out_cost[mode] = NULL;
+ if (ira_may_move_in_cost[mode] != NULL)
+ free (ira_may_move_in_cost[mode]);
+ if (ira_may_move_out_cost[mode] != NULL)
+ free (ira_may_move_out_cost[mode]);
+ ira_register_move_cost[mode] = NULL;
+ ira_may_move_in_cost[mode] = NULL;
+ ira_may_move_out_cost[mode] = NULL;
}
}
/* This is called every time when register related information is
changed. */
void
-init_ira (void)
+ira_init (void)
{
free_register_move_costs ();
setup_reg_mode_hard_regset ();
@@ -1114,14 +1116,14 @@ init_ira (void)
find_reg_class_closure ();
setup_reg_class_nregs ();
setup_prohibited_class_mode_regs ();
- init_ira_costs ();
+ ira_init_costs ();
}
/* Function called once at the end of compiler work. */
void
-finish_ira_once (void)
+ira_finish_once (void)
{
- finish_ira_costs_once ();
+ ira_finish_costs_once ();
free_register_move_costs ();
}
@@ -1130,28 +1132,28 @@ finish_ira_once (void)
/* Array whose values are hard regset of hard registers for which
move of the hard register in given mode into itself is
prohibited. */
-HARD_REG_SET prohibited_mode_move_regs[NUM_MACHINE_MODES];
+HARD_REG_SET ira_prohibited_mode_move_regs[NUM_MACHINE_MODES];
/* Flag of that the above array has been initialized. */
-static bool prohibited_mode_move_regs_initialized_p = false;
+static bool ira_prohibited_mode_move_regs_initialized_p = false;
-/* Set up PROHIBITED_MODE_MOVE_REGS. */
+/* Set up IRA_PROHIBITED_MODE_MOVE_REGS. */
static void
setup_prohibited_mode_move_regs (void)
{
int i, j;
rtx test_reg1, test_reg2, move_pat, move_insn;
- if (prohibited_mode_move_regs_initialized_p)
+ if (ira_prohibited_mode_move_regs_initialized_p)
return;
- prohibited_mode_move_regs_initialized_p = true;
+ ira_prohibited_mode_move_regs_initialized_p = true;
test_reg1 = gen_rtx_REG (VOIDmode, 0);
test_reg2 = gen_rtx_REG (VOIDmode, 0);
move_pat = gen_rtx_SET (VOIDmode, test_reg1, test_reg2);
move_insn = gen_rtx_INSN (VOIDmode, 0, 0, 0, 0, 0, move_pat, -1, 0);
for (i = 0; i < NUM_MACHINE_MODES; i++)
{
- SET_HARD_REG_SET (prohibited_mode_move_regs[i]);
+ SET_HARD_REG_SET (ira_prohibited_mode_move_regs[i]);
for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
{
if (! HARD_REGNO_MODE_OK (j, i))
@@ -1167,7 +1169,7 @@ setup_prohibited_mode_move_regs (void)
extract_insn (move_insn);
if (! constrain_operands (1))
continue;
- CLEAR_HARD_REG_BIT (prohibited_mode_move_regs[i], j);
+ CLEAR_HARD_REG_BIT (ira_prohibited_mode_move_regs[i], j);
}
}
}
@@ -1176,7 +1178,7 @@ setup_prohibited_mode_move_regs (void)
/* Function specific hard registers that can not be used for the
register allocation. */
-HARD_REG_SET no_alloc_regs;
+HARD_REG_SET ira_no_alloc_regs;
/* Return TRUE if *LOC contains an asm. */
static int
@@ -1233,7 +1235,7 @@ compute_regs_asm_clobbered (char *regs_asm_clobbered)
}
-/* Set up ELIMINABLE_REGSET, NO_ALLOC_REGS, and REGS_EVER_LIVE. */
+/* Set up ELIMINABLE_REGSET, IRA_NO_ALLOC_REGS, and REGS_EVER_LIVE. */
static void
setup_eliminable_regset (void)
{
@@ -1251,7 +1253,7 @@ setup_eliminable_regset (void)
|| (cfun->calls_alloca && EXIT_IGNORE_STACK)
|| FRAME_POINTER_REQUIRED);
- COPY_HARD_REG_SET (no_alloc_regs, no_unit_alloc_regs);
+ COPY_HARD_REG_SET (ira_no_alloc_regs, no_unit_alloc_regs);
CLEAR_HARD_REG_SET (eliminable_regset);
compute_regs_asm_clobbered (regs_asm_clobbered);
@@ -1269,7 +1271,7 @@ setup_eliminable_regset (void)
SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from);
if (cannot_elim)
- SET_HARD_REG_BIT (no_alloc_regs, eliminables[i].from);
+ SET_HARD_REG_BIT (ira_no_alloc_regs, eliminables[i].from);
}
else if (cannot_elim)
error ("%s cannot be used in asm here",
@@ -1282,7 +1284,7 @@ setup_eliminable_regset (void)
{
SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM);
if (need_fp)
- SET_HARD_REG_BIT (no_alloc_regs, HARD_FRAME_POINTER_REGNUM);
+ SET_HARD_REG_BIT (ira_no_alloc_regs, HARD_FRAME_POINTER_REGNUM);
}
else if (need_fp)
error ("%s cannot be used in asm here",
@@ -1296,7 +1298,7 @@ setup_eliminable_regset (void)
{
SET_HARD_REG_BIT (eliminable_regset, FRAME_POINTER_REGNUM);
if (need_fp)
- SET_HARD_REG_BIT (no_alloc_regs, FRAME_POINTER_REGNUM);
+ SET_HARD_REG_BIT (ira_no_alloc_regs, FRAME_POINTER_REGNUM);
}
else if (need_fp)
error ("%s cannot be used in asm here", reg_names[FRAME_POINTER_REGNUM]);
@@ -1308,15 +1310,15 @@ setup_eliminable_regset (void)
/* The length of the following two arrays. */
-int reg_equiv_len;
+int ira_reg_equiv_len;
/* The element value is TRUE if the corresponding regno value is
invariant. */
-bool *reg_equiv_invariant_p;
+bool *ira_reg_equiv_invariant_p;
/* The element value is equiv constant of given pseudo-register or
NULL_RTX. */
-rtx *reg_equiv_const;
+rtx *ira_reg_equiv_const;
/* Set up the two arrays declared above. */
static void
@@ -1364,8 +1366,8 @@ find_reg_equiv_invariant_const (void)
}
}
}
- reg_equiv_invariant_p[i] = invariant_p;
- reg_equiv_const[i] = constant;
+ ira_reg_equiv_invariant_p[i] = invariant_p;
+ ira_reg_equiv_const[i] = constant;
}
}
@@ -1377,8 +1379,8 @@ static void
setup_reg_renumber (void)
{
int regno, hard_regno;
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
caller_save_needed = 0;
FOR_EACH_ALLOCNO (a, ai)
@@ -1389,17 +1391,17 @@ setup_reg_renumber (void)
/* It can happen if A is not referenced but partially anticipated
somewhere in a region. */
ALLOCNO_ASSIGNED_P (a) = true;
- free_allocno_updated_costs (a);
+ ira_free_allocno_updated_costs (a);
hard_regno = ALLOCNO_HARD_REGNO (a);
regno = (int) REGNO (ALLOCNO_REG (a));
reg_renumber[regno] = (hard_regno < 0 ? -1 : hard_regno);
if (hard_regno >= 0 && ALLOCNO_CALLS_CROSSED_NUM (a) != 0
- && ! hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
- call_used_reg_set))
+ && ! ira_hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
+ call_used_reg_set))
{
- ira_assert (!optimize || flag_caller_saves || regno >= reg_equiv_len
- || reg_equiv_const[regno]
- || reg_equiv_invariant_p[regno]);
+ ira_assert (!optimize || flag_caller_saves || regno >= ira_reg_equiv_len
+ || ira_reg_equiv_const[regno]
+ || ira_reg_equiv_invariant_p[regno]);
caller_save_needed = 1;
}
}
@@ -1411,15 +1413,15 @@ static void
setup_allocno_assignment_flags (void)
{
int hard_regno;
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
FOR_EACH_ALLOCNO (a, ai)
{
if (! ALLOCNO_ASSIGNED_P (a))
/* It can happen if A is not referenced but partially anticipated
somewhere in a region. */
- free_allocno_updated_costs (a);
+ ira_free_allocno_updated_costs (a);
hard_regno = ALLOCNO_HARD_REGNO (a);
/* Don't assign hard registers to allocnos which are destination
of removed store at the end of loop. It has no sense to keep
@@ -1432,9 +1434,9 @@ setup_allocno_assignment_flags (void)
|| (ALLOCNO_MEMORY_COST (a)
- ALLOCNO_COVER_CLASS_COST (a)) < 0);
ira_assert (hard_regno < 0
- || ! hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
- reg_class_contents
- [ALLOCNO_COVER_CLASS (a)]));
+ || ! ira_hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
+ reg_class_contents
+ [ALLOCNO_COVER_CLASS (a)]));
}
}
@@ -1444,44 +1446,45 @@ static void
calculate_allocation_cost (void)
{
int hard_regno, cost;
- allocno_t a;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
- overall_cost = reg_cost = mem_cost = 0;
+ ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
FOR_EACH_ALLOCNO (a, ai)
{
hard_regno = ALLOCNO_HARD_REGNO (a);
ira_assert (hard_regno < 0
- || ! hard_reg_not_in_set_p
+ || ! ira_hard_reg_not_in_set_p
(hard_regno, ALLOCNO_MODE (a),
reg_class_contents[ALLOCNO_COVER_CLASS (a)]));
if (hard_regno < 0)
{
cost = ALLOCNO_MEMORY_COST (a);
- mem_cost += cost;
+ ira_mem_cost += cost;
}
else if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
{
cost = (ALLOCNO_HARD_REG_COSTS (a)
- [class_hard_reg_index[ALLOCNO_COVER_CLASS (a)][hard_regno]]);
- reg_cost += cost;
+ [ira_class_hard_reg_index
+ [ALLOCNO_COVER_CLASS (a)][hard_regno]]);
+ ira_reg_cost += cost;
}
else
{
cost = ALLOCNO_COVER_CLASS_COST (a);
- reg_cost += cost;
+ ira_reg_cost += cost;
}
- overall_cost += cost;
+ ira_overall_cost += cost;
}
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
{
fprintf (ira_dump_file,
"+++Costs: overall %d, reg %d, mem %d, ld %d, st %d, move %d\n",
- overall_cost, reg_cost, mem_cost,
- load_cost, store_cost, shuffle_cost);
+ ira_overall_cost, ira_reg_cost, ira_mem_cost,
+ ira_load_cost, ira_store_cost, ira_shuffle_cost);
fprintf (ira_dump_file, "+++ move loops %d, new jumps %d\n",
- move_loops_num, additional_jumps_num);
+ ira_move_loops_num, ira_additional_jumps_num);
}
}
@@ -1493,10 +1496,10 @@ calculate_allocation_cost (void)
static void
check_allocation (void)
{
- allocno_t a, conflict_a;
+ ira_allocno_t a, conflict_a;
int hard_regno, conflict_hard_regno, nregs, conflict_nregs;
- allocno_conflict_iterator aci;
- allocno_iterator ai;
+ ira_allocno_conflict_iterator aci;
+ ira_allocno_iterator ai;
FOR_EACH_ALLOCNO (a, ai)
{
@@ -1577,9 +1580,9 @@ static void
print_redundant_copies (void)
{
int hard_regno;
- allocno_t a;
- copy_t cp, next_cp;
- allocno_iterator ai;
+ ira_allocno_t a;
+ ira_copy_t cp, next_cp;
+ ira_allocno_iterator ai;
FOR_EACH_ALLOCNO (a, ai)
{
@@ -1691,7 +1694,7 @@ chain_bb_compare (const void *v1p, const void *v2p)
/* Sort the insn chain according to insn frequencies if
FREQ_P or according to insn original order otherwise. */
void
-sort_insn_chain (bool freq_p)
+ira_sort_insn_chain (bool freq_p)
{
struct insn_chain *chain, **chain_arr;
basic_block bb;
@@ -1737,7 +1740,7 @@ ira (FILE *f)
{
int overall_cost_before, allocated_reg_info_size;
bool loops_p;
- int max_regno_before_ira, max_point_before_emit;
+ int max_regno_before_ira, ira_max_point_before_emit;
int rebuild_p;
int saved_flag_ira_algorithm;
basic_block bb;
@@ -1787,11 +1790,11 @@ ira (FILE *f)
if (optimize)
{
max_regno = max_reg_num ();
- reg_equiv_len = max_regno;
- reg_equiv_invariant_p = ira_allocate (max_regno * sizeof (bool));
- memset (reg_equiv_invariant_p, 0, max_regno * sizeof (bool));
- reg_equiv_const = ira_allocate (max_regno * sizeof (rtx));
- memset (reg_equiv_const, 0, max_regno * sizeof (rtx));
+ ira_reg_equiv_len = max_regno;
+ ira_reg_equiv_invariant_p = ira_allocate (max_regno * sizeof (bool));
+ memset (ira_reg_equiv_invariant_p, 0, max_regno * sizeof (bool));
+ ira_reg_equiv_const = ira_allocate (max_regno * sizeof (rtx));
+ memset (ira_reg_equiv_const, 0, max_regno * sizeof (rtx));
find_reg_equiv_invariant_const ();
if (rebuild_p)
{
@@ -1806,9 +1809,9 @@ ira (FILE *f)
allocate_reg_info ();
setup_eliminable_regset ();
- overall_cost = reg_cost = mem_cost = 0;
- load_cost = store_cost = shuffle_cost = 0;
- move_loops_num = additional_jumps_num = 0;
+ ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
+ ira_load_cost = ira_store_cost = ira_shuffle_cost = 0;
+ ira_move_loops_num = ira_additional_jumps_num = 0;
ira_assert (current_loops == NULL);
flow_loops_find (&ira_loops);
@@ -1827,7 +1830,7 @@ ira (FILE *f)
else
ira_fast_allocation ();
- max_point_before_emit = max_point;
+ ira_max_point_before_emit = ira_max_point;
ira_emit (loops_p);
@@ -1836,7 +1839,7 @@ ira (FILE *f)
max_regno = max_reg_num ();
if (! loops_p)
- initiate_ira_assign ();
+ ira_initiate_assign ();
else
{
expand_reg_info (allocated_reg_info_size);
@@ -1846,7 +1849,7 @@ ira (FILE *f)
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, "Flattening IR\n");
- ira_flattening (max_regno_before_ira, max_point_before_emit);
+ ira_flattening (max_regno_before_ira, ira_max_point_before_emit);
/* New insns were generated: add notes and recalculate live
info. */
df_analyze ();
@@ -1855,8 +1858,8 @@ ira (FILE *f)
current_loops = &ira_loops;
setup_allocno_assignment_flags ();
- initiate_ira_assign ();
- reassign_conflict_allocnos (max_regno);
+ ira_initiate_assign ();
+ ira_reassign_conflict_allocnos (max_regno);
}
}
@@ -1893,7 +1896,7 @@ ira (FILE *f)
allocate_initial_values (reg_equiv_memory_loc);
- overall_cost_before = overall_cost;
+ overall_cost_before = ira_overall_cost;
if (optimize)
{
fix_reg_equiv_init ();
@@ -1902,11 +1905,11 @@ ira (FILE *f)
print_redundant_copies ();
#endif
- spilled_reg_stack_slots_num = 0;
- spilled_reg_stack_slots
- = ira_allocate (max_regno * sizeof (struct spilled_reg_stack_slot));
- memset (spilled_reg_stack_slots, 0,
- max_regno * sizeof (struct spilled_reg_stack_slot));
+ ira_spilled_reg_stack_slots_num = 0;
+ ira_spilled_reg_stack_slots
+ = ira_allocate (max_regno * sizeof (struct ira_spilled_reg_stack_slot));
+ memset (ira_spilled_reg_stack_slots, 0,
+ max_regno * sizeof (struct ira_spilled_reg_stack_slot));
}
timevar_pop (TV_IRA);
@@ -1916,7 +1919,7 @@ ira (FILE *f)
build_insn_chain ();
if (optimize)
- sort_insn_chain (true);
+ ira_sort_insn_chain (true);
reload_completed = !reload (get_insns (), optimize > 0);
@@ -1926,14 +1929,14 @@ ira (FILE *f)
if (optimize)
{
- ira_free (spilled_reg_stack_slots);
+ ira_free (ira_spilled_reg_stack_slots);
- finish_ira_assign ();
+ ira_finish_assign ();
}
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
- && overall_cost_before != overall_cost)
- fprintf (ira_dump_file, "+++Overall after reload %d\n", overall_cost);
+ && overall_cost_before != ira_overall_cost)
+ fprintf (ira_dump_file, "+++Overall after reload %d\n", ira_overall_cost);
ira_destroy ();
flow_loops_free (&ira_loops);
@@ -1951,8 +1954,8 @@ ira (FILE *f)
{
cleanup_cfg (CLEANUP_EXPENSIVE);
- ira_free (reg_equiv_invariant_p);
- ira_free (reg_equiv_const);
+ ira_free (ira_reg_equiv_invariant_p);
+ ira_free (ira_reg_equiv_const);
}
bitmap_obstack_release (&ira_bitmap_obstack);
diff --git a/gcc/ira.h b/gcc/ira.h
index 3f5865b30c3..da9a4c7d1a8 100644
--- a/gcc/ira.h
+++ b/gcc/ira.h
@@ -20,19 +20,19 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-extern void init_ira_once (void);
-extern void init_ira (void);
-extern void finish_ira_once (void);
+extern void ira_init_once (void);
+extern void ira_init (void);
+extern void ira_finish_once (void);
extern rtx ira_eliminate_regs (rtx, enum machine_mode);
-extern void sort_insn_chain (bool);
+extern void ira_sort_insn_chain (bool);
-extern void sort_regnos_for_alter_reg (int *, int, unsigned int *);
-extern void mark_allocation_change (int);
-extern void mark_memory_move_deletion (int, int);
-extern bool reassign_pseudos (int *, int, HARD_REG_SET, HARD_REG_SET *,
- HARD_REG_SET *, bitmap);
-extern rtx reuse_stack_slot (int, unsigned int, unsigned int);
-extern void mark_new_stack_slot (rtx, int, unsigned int);
-extern bool better_spill_reload_regno_p (int *, int *, rtx, rtx, rtx);
-extern void collect_pseudo_call_clobbered_regs (int, HARD_REG_SET *);
+extern void ira_sort_regnos_for_alter_reg (int *, int, unsigned int *);
+extern void ira_mark_allocation_change (int);
+extern void ira_mark_memory_move_deletion (int, int);
+extern bool ira_reassign_pseudos (int *, int, HARD_REG_SET, HARD_REG_SET *,
+ HARD_REG_SET *, bitmap);
+extern rtx ira_reuse_stack_slot (int, unsigned int, unsigned int);
+extern void ira_mark_new_stack_slot (rtx, int, unsigned int);
+extern bool ira_better_spill_reload_regno_p (int *, int *, rtx, rtx, rtx);
+extern void ira_collect_pseudo_call_clobbered_regs (int, HARD_REG_SET *);
diff --git a/gcc/reload1.c b/gcc/reload1.c
index cfd596256c7..c0c891e89fa 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -905,7 +905,7 @@ reload (rtx first, int global)
if (flag_ira && optimize)
/* Ask IRA to order pseudo-registers for better stack slot
sharing. */
- sort_regnos_for_alter_reg (temp_pseudo_reg_arr, n, reg_max_ref_width);
+ ira_sort_regnos_for_alter_reg (temp_pseudo_reg_arr, n, reg_max_ref_width);
for (i = 0; i < n; i++)
alter_reg (temp_pseudo_reg_arr[i], -1, false);
@@ -1122,7 +1122,7 @@ reload (rtx first, int global)
if (flag_ira && optimize)
/* Restore the original insn chain order for correct reload
work. */
- sort_insn_chain (false);
+ ira_sort_insn_chain (false);
/* If global-alloc was run, notify it of any register eliminations we have
done. */
@@ -1632,8 +1632,8 @@ calculate_needs_all_insns (int global)
{
if (flag_ira && optimize)
/* Inform IRA about the insn deletion. */
- mark_memory_move_deletion (REGNO (SET_DEST (set)),
- REGNO (SET_SRC (set)));
+ ira_mark_memory_move_deletion (REGNO (SET_DEST (set)),
+ REGNO (SET_SRC (set)));
delete_insn (insn);
/* Delete it from the reload chain. */
if (chain->prev)
@@ -1909,10 +1909,10 @@ find_reg (struct insn_chain *chain, int order)
}
regno_pseudo_regs[n++] = -1;
if (best_reg < 0
- || better_spill_reload_regno_p (regno_pseudo_regs,
- best_regno_pseudo_regs,
- rl->in, rl->out,
- chain->insn))
+ || ira_better_spill_reload_regno_p (regno_pseudo_regs,
+ best_regno_pseudo_regs,
+ rl->in, rl->out,
+ chain->insn))
{
best_reg = regno;
for (j = 0;; j++)
@@ -2251,7 +2251,7 @@ alter_reg (int i, int from_reg, bool dont_share_p)
/* Mark the spill for IRA. */
SET_REGNO_REG_SET (&spilled_pseudos, i);
x = (dont_share_p || ! flag_ira || ! optimize
- ? NULL_RTX : reuse_stack_slot (i, inherent_size, total_size));
+ ? NULL_RTX : ira_reuse_stack_slot (i, inherent_size, total_size));
if (x)
shared_p = true;
/* Each pseudo reg has an inherent size which comes from its own mode,
@@ -2283,7 +2283,7 @@ alter_reg (int i, int from_reg, bool dont_share_p)
if (! dont_share_p && flag_ira && optimize)
/* Inform IRA about allocation a new stack slot. */
- mark_new_stack_slot (x, i, total_size);
+ ira_mark_new_stack_slot (x, i, total_size);
}
/* Reuse a stack slot if possible. */
@@ -4038,7 +4038,7 @@ finish_spills (int global)
reg_renumber[i] = -1;
if (flag_ira && optimize)
/* Inform IRA about the change. */
- mark_allocation_change (i);
+ ira_mark_allocation_change (i);
/* We will need to scan everything again. */
something_changed = 1;
}
@@ -4092,7 +4092,7 @@ finish_spills (int global)
/* Retry allocating the pseudos spilled in IRA and the
reload. For each reg, merge the various reg sets that
indicate which hard regs can't be used, and call
- reassign_pseudos. */
+ ira_reassign_pseudos. */
unsigned int n;
for (n = 0, i = FIRST_PSEUDO_REGISTER; i < (unsigned) max_regno; i++)
@@ -4103,9 +4103,10 @@ finish_spills (int global)
else
CLEAR_REGNO_REG_SET (&spilled_pseudos, i);
}
- if (reassign_pseudos (temp_pseudo_reg_arr, n, bad_spill_regs_global,
- pseudo_forbidden_regs, pseudo_previous_regs,
- &spilled_pseudos))
+ if (ira_reassign_pseudos (temp_pseudo_reg_arr, n,
+ bad_spill_regs_global,
+ pseudo_forbidden_regs, pseudo_previous_regs,
+ &spilled_pseudos))
something_changed = 1;
}
@@ -7094,7 +7095,7 @@ emit_input_reload_insns (struct insn_chain *chain, struct reload *rl,
reg_renumber[REGNO (old)] = REGNO (reloadreg);
if (flag_ira && optimize)
/* Inform IRA about the change. */
- mark_allocation_change (REGNO (old));
+ ira_mark_allocation_change (REGNO (old));
alter_reg (REGNO (old), -1, false);
}
special = 1;
@@ -8633,7 +8634,7 @@ delete_output_reload (rtx insn, int j, int last_reload_reg, rtx new_reload_reg)
reg_renumber[REGNO (reg)] = REGNO (new_reload_reg);
if (flag_ira && optimize)
/* Inform IRA about the change. */
- mark_allocation_change (REGNO (reg));
+ ira_mark_allocation_change (REGNO (reg));
alter_reg (REGNO (reg), -1, false);
}
else
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 589f5fd68e4..d711d10d784 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -2014,7 +2014,7 @@ backend_init (void)
save_register_info ();
/* Initialize the target-specific back end pieces. */
- init_ira_once ();
+ ira_init_once ();
backend_init_target ();
}
@@ -2038,7 +2038,7 @@ lang_dependent_init_target (void)
/* Although the actions of these functions are language-independent,
they use optabs, so we cannot call them from backend_init. */
init_set_costs ();
- init_ira ();
+ ira_init ();
expand_dummy_function_end ();
}
@@ -2138,7 +2138,7 @@ finalize (void)
statistics_fini ();
finish_optimization_passes ();
- finish_ira_once ();
+ ira_finish_once ();
if (mem_report)
dump_memory_report (true);