aboutsummaryrefslogtreecommitdiff
path: root/gcc/df-scan.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/df-scan.c')
-rw-r--r--gcc/df-scan.c441
1 files changed, 199 insertions, 242 deletions
diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index 40296fddc88..fd61e32016b 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -46,6 +46,19 @@ along with GCC; see the file COPYING3. If not see
#include "df.h"
#include "tree-pass.h"
+DEF_VEC_P(df_ref);
+DEF_VEC_ALLOC_P_STACK(df_ref);
+
+#define VEC_df_ref_stack_alloc(alloc) VEC_stack_alloc (df_ref, alloc)
+
+typedef struct df_mw_hardreg *df_mw_hardreg_ptr;
+
+DEF_VEC_P(df_mw_hardreg_ptr);
+DEF_VEC_ALLOC_P_STACK(df_mw_hardreg_ptr);
+
+#define VEC_df_mw_hardreg_ptr_stack_alloc(alloc) \
+ VEC_stack_alloc (df_mw_hardreg_ptr, alloc)
+
#ifndef HAVE_epilogue
#define HAVE_epilogue 0
#endif
@@ -84,74 +97,12 @@ static HARD_REG_SET elim_reg_set;
struct df_collection_rec
{
- df_ref * def_vec;
- df_ref * use_vec;
- unsigned int next_def;
- unsigned int alloc_def;
- unsigned int next_use;
- unsigned int alloc_use;
- df_ref * eq_use_vec;
- struct df_mw_hardreg **mw_vec;
- unsigned int next_eq_use;
- unsigned int alloc_eq_use;
- unsigned int next_mw;
- unsigned int alloc_mw;
+ VEC(df_ref,stack) *def_vec;
+ VEC(df_ref,stack) *use_vec;
+ VEC(df_ref,stack) *eq_use_vec;
+ VEC(df_mw_hardreg_ptr,stack) *mw_vec;
};
-/* The following macros define the default sizes for the vectors above. */
-
-#define vec_SIZE_def 256
-#define vec_SIZE_use 256
-#define vec_SIZE_eq_use 256
-#define vec_SIZE_mw 64
-
-/* Initialize the vector to a stack-allocated region. PTR is a struct
- container_rec*, and FLD is either def, use, eq_use or mw. */
-
-#define safe_alloca_vec(ptr, fld, type) \
- ((ptr)->fld##_vec = ((df_##type *) \
- alloca (sizeof (*(ptr)->fld##_vec) \
- * ((ptr)->alloc_##fld = vec_SIZE_##fld))))
-
-/* Safely grow the vector, moving to a non-alloca area if we outgrow
- the default size, and growing exponentially as needed. The result
- is an lvalue for the newly-added vector element. */
-
-#define safe_grow_vec(ptr, fld, type) \
- (*(((ptr)->next_##fld < (ptr)->alloc_##fld \
- ? 0 \
- : (ptr)->next_##fld <= vec_SIZE_##fld \
- ? ((ptr)->fld##_vec = ((df_##type *) \
- memcpy (xmalloc \
- (sizeof (*(ptr)->fld##_vec) \
- * ((ptr)->alloc_##fld \
- = 2 * vec_SIZE_##fld)), \
- (ptr)->fld##_vec, \
- sizeof (*(ptr)->fld##_vec) \
- * (ptr)->next_##fld))) \
- : ((ptr)->fld##_vec = ((df_##type *) \
- xrealloc ((ptr)->fld##_vec, \
- (sizeof (*(ptr)->fld##_vec) \
- * (((ptr)->alloc_##fld \
- *= 2))))))), \
- &(ptr)->fld##_vec[(ptr)->next_##fld++]))
-
-/* Release the vector storage, if we've used non-alloca memory. */
-
-#define safe_free_vec(ptr, fld) \
- (((ptr)->alloc_##fld > vec_SIZE_##fld \
- ? free ((ptr)->fld##_vec), 0 \
- : 0), \
- (ptr)->next_##fld = (ptr)->alloc_##fld = 0, \
- (ptr)->fld##_vec = 0)
-
-/* This alias enables us to use mwhreg for the type in the macros
- above. */
-typedef struct df_mw_hardreg *df_mwhreg;
-
-
-
-
static df_ref df_null_ref_rec[1];
static struct df_mw_hardreg * df_null_mw_rec[1];
@@ -1238,28 +1189,32 @@ df_insn_delete (basic_block bb, unsigned int uid)
static void
df_free_collection_rec (struct df_collection_rec *collection_rec)
{
+ unsigned int ix;
struct df_scan_problem_data *problem_data
= (struct df_scan_problem_data *) df_scan->problem_data;
- df_ref *ref;
- struct df_mw_hardreg **mw;
-
- if (collection_rec->def_vec)
- for (ref = collection_rec->def_vec; *ref; ref++)
- df_free_ref (*ref);
- if (collection_rec->use_vec)
- for (ref = collection_rec->use_vec; *ref; ref++)
- df_free_ref (*ref);
- if (collection_rec->eq_use_vec)
- for (ref = collection_rec->eq_use_vec; *ref; ref++)
- df_free_ref (*ref);
- if (collection_rec->mw_vec)
- for (mw = collection_rec->mw_vec; *mw; mw++)
- pool_free (problem_data->mw_reg_pool, *mw);
+ df_ref ref;
+ struct df_mw_hardreg *mw;
+
+ for (ix = 0; VEC_iterate (df_ref, collection_rec->def_vec, ix, ref); ++ix)
+ df_free_ref (ref);
+ for (ix = 0; VEC_iterate (df_ref, collection_rec->use_vec, ix, ref); ++ix)
+ df_free_ref (ref);
+ for (ix = 0; VEC_iterate (df_ref, collection_rec->eq_use_vec, ix, ref); ++ix)
+ df_free_ref (ref);
+ for (ix = 0;
+ VEC_iterate (df_mw_hardreg_ptr, collection_rec->mw_vec, ix, mw);
+ ++ix)
+ pool_free (problem_data->mw_reg_pool, mw);
+
+ VEC_free (df_ref, stack, collection_rec->def_vec);
+ VEC_free (df_ref, stack, collection_rec->use_vec);
+ VEC_free (df_ref, stack, collection_rec->eq_use_vec);
+ VEC_free (df_mw_hardreg_ptr, stack, collection_rec->mw_vec);
}
/* Rescan INSN. Return TRUE if the rescanning produced any changes. */
-bool
+bool
df_insn_rescan (rtx insn)
{
unsigned int uid = INSN_UID (insn);
@@ -1306,15 +1261,14 @@ df_insn_rescan (rtx insn)
return false;
}
+ collection_rec.def_vec = VEC_alloc (df_ref, stack, 128);
+ collection_rec.use_vec = VEC_alloc (df_ref, stack, 32);
+ collection_rec.eq_use_vec = VEC_alloc (df_ref, stack, 32);
+ collection_rec.mw_vec = VEC_alloc (df_mw_hardreg_ptr, stack, 32);
+
bitmap_clear_bit (df->insns_to_delete, uid);
bitmap_clear_bit (df->insns_to_rescan, uid);
bitmap_clear_bit (df->insns_to_notes_rescan, uid);
-
- safe_alloca_vec (&collection_rec, def, ref);
- safe_alloca_vec (&collection_rec, use, ref);
- safe_alloca_vec (&collection_rec, eq_use, ref);
- safe_alloca_vec (&collection_rec, mw, mwhreg);
-
if (insn_info)
{
bool the_same = df_insn_refs_verify (&collection_rec, bb, insn, false);
@@ -1322,13 +1276,8 @@ df_insn_rescan (rtx insn)
if (the_same)
{
df_free_collection_rec (&collection_rec);
- safe_free_vec (&collection_rec, def);
- safe_free_vec (&collection_rec, use);
- safe_free_vec (&collection_rec, eq_use);
- safe_free_vec (&collection_rec, mw);
if (dump_file)
fprintf (dump_file, "verify found no changes in insn with uid = %d.\n", uid);
-
return false;
}
if (dump_file)
@@ -1349,10 +1298,10 @@ df_insn_rescan (rtx insn)
df_refs_add_to_chains (&collection_rec, bb, insn);
df_set_bb_dirty (bb);
- safe_free_vec (&collection_rec, def);
- safe_free_vec (&collection_rec, use);
- safe_free_vec (&collection_rec, eq_use);
- safe_free_vec (&collection_rec, mw);
+ VEC_free (df_ref, stack, collection_rec.def_vec);
+ VEC_free (df_ref, stack, collection_rec.use_vec);
+ VEC_free (df_ref, stack, collection_rec.eq_use_vec);
+ VEC_free (df_mw_hardreg_ptr, stack, collection_rec.mw_vec);
return true;
}
@@ -2253,11 +2202,11 @@ df_notes_rescan (rtx insn)
rtx note;
struct df_collection_rec collection_rec;
unsigned int num_deleted;
+ unsigned int mw_len;
memset (&collection_rec, 0, sizeof (struct df_collection_rec));
-
- safe_alloca_vec (&collection_rec, eq_use, ref);
- safe_alloca_vec (&collection_rec, mw, mwhreg);
+ collection_rec.eq_use_vec = VEC_alloc (df_ref, stack, 32);
+ collection_rec.mw_vec = VEC_alloc (df_mw_hardreg_ptr, stack, 32);
num_deleted = df_mw_hardreg_chain_delete_eq_uses (insn_info);
df_ref_chain_delete (insn_info->eq_uses);
@@ -2281,7 +2230,8 @@ df_notes_rescan (rtx insn)
/* Find some place to put any new mw_hardregs. */
df_canonize_collection_rec (&collection_rec);
- if (collection_rec.next_mw)
+ mw_len = VEC_length (df_mw_hardreg_ptr, collection_rec.mw_vec);
+ if (mw_len)
{
unsigned int count = 0;
struct df_mw_hardreg **mw_rec = insn_info->mw_hardregs;
@@ -2295,36 +2245,36 @@ df_notes_rescan (rtx insn)
{
/* Append to the end of the existing record after
expanding it if necessary. */
- if (collection_rec.next_mw > num_deleted)
+ if (mw_len > num_deleted)
{
insn_info->mw_hardregs =
XRESIZEVEC (struct df_mw_hardreg *,
- insn_info->mw_hardregs,
- count + 1 + collection_rec.next_mw);
+ insn_info->mw_hardregs,
+ count + 1 + mw_len);
}
- memcpy (&insn_info->mw_hardregs[count], collection_rec.mw_vec,
- (collection_rec.next_mw + 1) * sizeof (struct df_mw_hardreg *));
- qsort (insn_info->mw_hardregs, count + collection_rec.next_mw,
+ memcpy (&insn_info->mw_hardregs[count],
+ VEC_address (df_mw_hardreg_ptr, collection_rec.mw_vec),
+ mw_len * sizeof (struct df_mw_hardreg *));
+ insn_info->mw_hardregs[count + mw_len] = NULL;
+ qsort (insn_info->mw_hardregs, count + mw_len,
sizeof (struct df_mw_hardreg *), df_mw_compare);
}
else
{
/* No vector there. */
insn_info->mw_hardregs
- = XNEWVEC (struct df_mw_hardreg*,
- count + 1 + collection_rec.next_mw);
- memcpy (insn_info->mw_hardregs, collection_rec.mw_vec,
- (collection_rec.next_mw + 1) * sizeof (struct df_mw_hardreg *));
+ = XNEWVEC (struct df_mw_hardreg*, 1 + mw_len);
+ memcpy (insn_info->mw_hardregs,
+ VEC_address (df_mw_hardreg_ptr, collection_rec.mw_vec),
+ mw_len * sizeof (struct df_mw_hardreg *));
+ insn_info->mw_hardregs[mw_len] = NULL;
}
}
/* Get rid of the mw_rec so that df_refs_add_to_chains will
ignore it. */
- collection_rec.mw_vec = NULL;
- collection_rec.next_mw = 0;
+ VEC_free (df_mw_hardreg_ptr, stack, collection_rec.mw_vec);
df_refs_add_to_chains (&collection_rec, bb, insn);
-
- safe_free_vec (&collection_rec, eq_use);
- safe_free_vec (&collection_rec, mw);
+ VEC_free (df_ref, stack, collection_rec.eq_use_vec);
}
else
df_insn_rescan (insn);
@@ -2442,35 +2392,43 @@ df_ref_compare (const void *r1, const void *r2)
}
static void
-df_swap_refs (df_ref *ref_vec, int i, int j)
+df_swap_refs (VEC(df_ref,stack) **ref_vec, int i, int j)
{
- df_ref tmp = ref_vec[i];
- ref_vec[i] = ref_vec[j];
- ref_vec[j] = tmp;
+ df_ref tmp = VEC_index (df_ref, *ref_vec, i);
+ VEC_replace (df_ref, *ref_vec, i, VEC_index (df_ref, *ref_vec, j));
+ VEC_replace (df_ref, *ref_vec, j, tmp);
}
/* Sort and compress a set of refs. */
-static unsigned int
-df_sort_and_compress_refs (df_ref *ref_vec, unsigned int count)
+static void
+df_sort_and_compress_refs (VEC(df_ref,stack) **ref_vec)
{
+ unsigned int count;
unsigned int i;
unsigned int dist = 0;
- ref_vec[count] = NULL;
+ count = VEC_length (df_ref, *ref_vec);
+
/* If there are 1 or 0 elements, there is nothing to do. */
if (count < 2)
- return count;
+ return;
else if (count == 2)
{
- if (df_ref_compare (&ref_vec[0], &ref_vec[1]) > 0)
+ df_ref r0 = VEC_index (df_ref, *ref_vec, 0);
+ df_ref r1 = VEC_index (df_ref, *ref_vec, 1);
+ if (df_ref_compare (&r0, &r1) > 0)
df_swap_refs (ref_vec, 0, 1);
}
else
{
for (i = 0; i < count - 1; i++)
- if (df_ref_compare (&ref_vec[i], &ref_vec[i+1]) >= 0)
- break;
+ {
+ df_ref r0 = VEC_index (df_ref, *ref_vec, i);
+ df_ref r1 = VEC_index (df_ref, *ref_vec, i + 1);
+ if (df_ref_compare (&r0, &r1) >= 0)
+ break;
+ }
/* If the array is already strictly ordered,
which is the most common case for large COUNT case
(which happens for CALL INSNs),
@@ -2479,26 +2437,29 @@ df_sort_and_compress_refs (df_ref *ref_vec, unsigned int count)
Make sure DF_GET_ADD_REFS adds refs in the increasing order
of DF_REF_COMPARE. */
if (i == count - 1)
- return count;
- qsort (ref_vec, count, sizeof (df_ref), df_ref_compare);
+ return;
+ qsort (VEC_address (df_ref, *ref_vec), count, sizeof (df_ref),
+ df_ref_compare);
}
for (i=0; i<count-dist; i++)
{
/* Find the next ref that is not equal to the current ref. */
- while (df_ref_equal_p (ref_vec[i], ref_vec[i + dist + 1]))
+ while (i + dist + 1 < count
+ && df_ref_equal_p (VEC_index (df_ref, *ref_vec, i),
+ VEC_index (df_ref, *ref_vec, i + dist + 1)))
{
- df_free_ref (ref_vec[i + dist + 1]);
+ df_free_ref (VEC_index (df_ref, *ref_vec, i + dist + 1));
dist++;
}
/* Copy it down to the next position. */
- if (dist)
- ref_vec[i+1] = ref_vec[i + dist + 1];
+ if (dist && i + dist + 1 < count)
+ VEC_replace (df_ref, *ref_vec, i + 1,
+ VEC_index (df_ref, *ref_vec, i + dist + 1));
}
count -= dist;
- ref_vec[count] = NULL;
- return count;
+ VEC_truncate (df_ref, *ref_vec, count);
}
@@ -2551,45 +2512,55 @@ df_mw_compare (const void *m1, const void *m2)
/* Sort and compress a set of refs. */
-static unsigned int
-df_sort_and_compress_mws (struct df_mw_hardreg **mw_vec, unsigned int count)
+static void
+df_sort_and_compress_mws (VEC(df_mw_hardreg_ptr,stack) **mw_vec)
{
+ unsigned int count;
struct df_scan_problem_data *problem_data
= (struct df_scan_problem_data *) df_scan->problem_data;
unsigned int i;
unsigned int dist = 0;
- mw_vec[count] = NULL;
+ count = VEC_length (df_mw_hardreg_ptr, *mw_vec);
if (count < 2)
- return count;
+ return;
else if (count == 2)
{
- if (df_mw_compare (&mw_vec[0], &mw_vec[1]) > 0)
+ struct df_mw_hardreg *m0 = VEC_index (df_mw_hardreg_ptr, *mw_vec, 0);
+ struct df_mw_hardreg *m1 = VEC_index (df_mw_hardreg_ptr, *mw_vec, 1);
+ if (df_mw_compare (&m0, &m1) > 0)
{
- struct df_mw_hardreg *tmp = mw_vec[0];
- mw_vec[0] = mw_vec[1];
- mw_vec[1] = tmp;
+ struct df_mw_hardreg *tmp = VEC_index (df_mw_hardreg_ptr,
+ *mw_vec, 0);
+ VEC_replace (df_mw_hardreg_ptr, *mw_vec, 0,
+ VEC_index (df_mw_hardreg_ptr, *mw_vec, 1));
+ VEC_replace (df_mw_hardreg_ptr, *mw_vec, 1, tmp);
}
}
else
- qsort (mw_vec, count, sizeof (struct df_mw_hardreg *), df_mw_compare);
+ qsort (VEC_address (df_mw_hardreg_ptr, *mw_vec), count,
+ sizeof (struct df_mw_hardreg *), df_mw_compare);
for (i=0; i<count-dist; i++)
{
/* Find the next ref that is not equal to the current ref. */
- while (df_mw_equal_p (mw_vec[i], mw_vec[i + dist + 1]))
+ while (i + dist + 1 < count
+ && df_mw_equal_p (VEC_index (df_mw_hardreg_ptr, *mw_vec, i),
+ VEC_index (df_mw_hardreg_ptr, *mw_vec,
+ i + dist + 1)))
{
- pool_free (problem_data->mw_reg_pool, mw_vec[i + dist + 1]);
+ pool_free (problem_data->mw_reg_pool,
+ VEC_index (df_mw_hardreg_ptr, *mw_vec, i + dist + 1));
dist++;
}
/* Copy it down to the next position. */
- if (dist)
- mw_vec[i+1] = mw_vec[i + dist + 1];
+ if (dist && i + dist + 1 < count)
+ VEC_replace (df_mw_hardreg_ptr, *mw_vec, i + 1,
+ VEC_index (df_mw_hardreg_ptr, *mw_vec, i + dist + 1));
}
count -= dist;
- mw_vec[count] = NULL;
- return count;
+ VEC_truncate (df_mw_hardreg_ptr, *mw_vec, count);
}
@@ -2598,22 +2569,10 @@ df_sort_and_compress_mws (struct df_mw_hardreg **mw_vec, unsigned int count)
static void
df_canonize_collection_rec (struct df_collection_rec *collection_rec)
{
- if (collection_rec->def_vec)
- collection_rec->next_def
- = df_sort_and_compress_refs (collection_rec->def_vec,
- collection_rec->next_def);
- if (collection_rec->use_vec)
- collection_rec->next_use
- = df_sort_and_compress_refs (collection_rec->use_vec,
- collection_rec->next_use);
- if (collection_rec->eq_use_vec)
- collection_rec->next_eq_use
- = df_sort_and_compress_refs (collection_rec->eq_use_vec,
- collection_rec->next_eq_use);
- if (collection_rec->mw_vec)
- collection_rec->next_mw
- = df_sort_and_compress_mws (collection_rec->mw_vec,
- collection_rec->next_mw);
+ df_sort_and_compress_refs (&collection_rec->def_vec);
+ df_sort_and_compress_refs (&collection_rec->use_vec);
+ df_sort_and_compress_refs (&collection_rec->eq_use_vec);
+ df_sort_and_compress_mws (&collection_rec->mw_vec);
}
@@ -2671,16 +2630,20 @@ df_install_ref (df_ref this_ref,
static df_ref *
df_install_refs (basic_block bb,
- df_ref *old_vec, unsigned int count,
+ VEC(df_ref,stack)* old_vec,
struct df_reg_info **reg_info,
struct df_ref_info *ref_info,
bool is_notes)
{
+ unsigned int count;
+
+ count = VEC_length (df_ref, old_vec);
if (count)
{
- unsigned int i;
df_ref *new_vec = XNEWVEC (df_ref, count + 1);
bool add_to_table;
+ df_ref this_ref;
+ unsigned int ix;
switch (ref_info->ref_order)
{
@@ -2705,10 +2668,9 @@ df_install_refs (basic_block bb,
if (add_to_table && df->analyze_subset)
add_to_table = bitmap_bit_p (df->blocks_to_analyze, bb->index);
- for (i = 0; i < count; i++)
+ for (ix = 0; VEC_iterate (df_ref, old_vec, ix, this_ref); ++ix)
{
- df_ref this_ref = old_vec[i];
- new_vec[i] = this_ref;
+ new_vec[ix] = this_ref;
df_install_ref (this_ref, reg_info[DF_REF_REGNO (this_ref)],
ref_info, add_to_table);
}
@@ -2725,14 +2687,18 @@ df_install_refs (basic_block bb,
insn. */
static struct df_mw_hardreg **
-df_install_mws (struct df_mw_hardreg **old_vec, unsigned int count)
+df_install_mws (VEC(df_mw_hardreg_ptr,stack) *old_vec)
{
+ unsigned int count;
+
+ count = VEC_length (df_mw_hardreg_ptr, old_vec);
if (count)
{
struct df_mw_hardreg **new_vec
= XNEWVEC (struct df_mw_hardreg*, count + 1);
- memcpy (new_vec, old_vec,
- sizeof (struct df_mw_hardreg*) * (count + 1));
+ memcpy (new_vec, VEC_address (df_mw_hardreg_ptr, old_vec),
+ sizeof (struct df_mw_hardreg*) * count);
+ new_vec[count] = NULL;
return new_vec;
}
else
@@ -2757,8 +2723,7 @@ df_refs_add_to_chains (struct df_collection_rec *collection_rec,
{
df_scan_free_ref_vec (insn_rec->defs);
insn_rec->defs
- = df_install_refs (bb, collection_rec->def_vec,
- collection_rec->next_def,
+ = df_install_refs (bb, collection_rec->def_vec,
df->def_regs,
&df->def_info, false);
}
@@ -2767,7 +2732,6 @@ df_refs_add_to_chains (struct df_collection_rec *collection_rec,
df_scan_free_ref_vec (insn_rec->uses);
insn_rec->uses
= df_install_refs (bb, collection_rec->use_vec,
- collection_rec->next_use,
df->use_regs,
&df->use_info, false);
}
@@ -2776,7 +2740,6 @@ df_refs_add_to_chains (struct df_collection_rec *collection_rec,
df_scan_free_ref_vec (insn_rec->eq_uses);
insn_rec->eq_uses
= df_install_refs (bb, collection_rec->eq_use_vec,
- collection_rec->next_eq_use,
df->eq_use_regs,
&df->use_info, true);
}
@@ -2784,8 +2747,7 @@ df_refs_add_to_chains (struct df_collection_rec *collection_rec,
{
df_scan_free_mws_vec (insn_rec->mw_hardregs);
insn_rec->mw_hardregs
- = df_install_mws (collection_rec->mw_vec,
- collection_rec->next_mw);
+ = df_install_mws (collection_rec->mw_vec);
}
}
else
@@ -2794,14 +2756,12 @@ df_refs_add_to_chains (struct df_collection_rec *collection_rec,
df_scan_free_ref_vec (bb_info->artificial_defs);
bb_info->artificial_defs
- = df_install_refs (bb, collection_rec->def_vec,
- collection_rec->next_def,
+ = df_install_refs (bb, collection_rec->def_vec,
df->def_regs,
&df->def_info, false);
df_scan_free_ref_vec (bb_info->artificial_uses);
bb_info->artificial_uses
= df_install_refs (bb, collection_rec->use_vec,
- collection_rec->next_use,
df->use_regs,
&df->use_info, false);
}
@@ -2893,11 +2853,11 @@ df_ref_create_structure (enum df_ref_class cl,
if (collection_rec)
{
if (DF_REF_REG_DEF_P (this_ref))
- safe_grow_vec (collection_rec, def, ref) = this_ref;
+ VEC_safe_push (df_ref, stack, collection_rec->def_vec, this_ref);
else if (DF_REF_FLAGS (this_ref) & DF_REF_IN_NOTE)
- safe_grow_vec (collection_rec, eq_use, ref) = this_ref;
+ VEC_safe_push (df_ref, stack, collection_rec->eq_use_vec, this_ref);
else
- safe_grow_vec (collection_rec, use, ref) = this_ref;
+ VEC_safe_push (df_ref, stack, collection_rec->use_vec, this_ref);
}
return this_ref;
@@ -2963,7 +2923,8 @@ df_ref_record (enum df_ref_class cl,
hardreg->start_regno = regno;
hardreg->end_regno = endregno - 1;
hardreg->mw_order = df->ref_order++;
- safe_grow_vec (collection_rec, mw, mwhreg) = hardreg;
+ VEC_safe_push (df_mw_hardreg_ptr, stack, collection_rec->mw_vec,
+ hardreg);
}
for (i = regno; i < endregno; i++)
@@ -3425,10 +3386,11 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
static void
df_get_conditional_uses (struct df_collection_rec *collection_rec)
{
- unsigned int i;
- for (i = 0; i < collection_rec->next_def; i++)
+ unsigned int ix;
+ df_ref ref;
+
+ for (ix = 0; VEC_iterate (df_ref, collection_rec->def_vec, ix, ref); ++ix)
{
- df_ref ref = collection_rec->def_vec[i];
if (DF_REF_FLAGS_IS_SET (ref, DF_REF_CONDITIONAL))
{
int width = -1;
@@ -3467,16 +3429,14 @@ df_get_call_refs (struct df_collection_rec * collection_rec,
unsigned int ui;
bool is_sibling_call;
unsigned int i;
+ df_ref def;
bitmap defs_generated = BITMAP_ALLOC (&df_bitmap_obstack);
/* Do not generate clobbers for registers that are the result of the
call. This causes ordering problems in the chain building code
depending on which def is seen first. */
- for (i=0; i<collection_rec->next_def; i++)
- {
- df_ref def = collection_rec->def_vec[i];
- bitmap_set_bit (defs_generated, DF_REF_REGNO (def));
- }
+ for (i = 0; VEC_iterate (df_ref, collection_rec->def_vec, i, def); ++i)
+ bitmap_set_bit (defs_generated, DF_REF_REGNO (def));
/* Record the registers used to pass arguments, and explicitly
noted as clobbered. */
@@ -3550,10 +3510,10 @@ df_insn_refs_collect (struct df_collection_rec* collection_rec,
bool is_cond_exec = (GET_CODE (PATTERN (insn_info->insn)) == COND_EXEC);
/* Clear out the collection record. */
- collection_rec->next_def = 0;
- collection_rec->next_use = 0;
- collection_rec->next_eq_use = 0;
- collection_rec->next_mw = 0;
+ VEC_truncate (df_ref, collection_rec->def_vec, 0);
+ VEC_truncate (df_ref, collection_rec->use_vec, 0);
+ VEC_truncate (df_ref, collection_rec->eq_use_vec, 0);
+ VEC_truncate (df_mw_hardreg_ptr, collection_rec->mw_vec, 0);
/* Record register defs. */
df_defs_record (collection_rec, PATTERN (insn_info->insn), bb, insn_info, 0);
@@ -3651,10 +3611,10 @@ df_need_static_chain_reg (struct function *fun)
static void
df_bb_refs_collect (struct df_collection_rec *collection_rec, basic_block bb)
{
- collection_rec->next_def = 0;
- collection_rec->next_use = 0;
- collection_rec->next_eq_use = 0;
- collection_rec->next_mw = 0;
+ VEC_truncate (df_ref, collection_rec->def_vec, 0);
+ VEC_truncate (df_ref, collection_rec->use_vec, 0);
+ VEC_truncate (df_ref, collection_rec->eq_use_vec, 0);
+ VEC_truncate (df_mw_hardreg_ptr, collection_rec->mw_vec, 0);
if (bb->index == ENTRY_BLOCK)
{
@@ -3723,11 +3683,6 @@ df_bb_refs_record (int bb_index, bool scan_insns)
if (!df)
return;
- safe_alloca_vec (&collection_rec, def, ref);
- safe_alloca_vec (&collection_rec, use, ref);
- safe_alloca_vec (&collection_rec, eq_use, ref);
- safe_alloca_vec (&collection_rec, mw, mwhreg);
-
bb_info = df_scan_get_bb_info (bb_index);
/* Need to make sure that there is a record in the basic block info. */
@@ -3739,6 +3694,11 @@ df_bb_refs_record (int bb_index, bool scan_insns)
bb_info->artificial_uses = NULL;
}
+ collection_rec.def_vec = VEC_alloc (df_ref, stack, 128);
+ collection_rec.use_vec = VEC_alloc (df_ref, stack, 32);
+ collection_rec.eq_use_vec = VEC_alloc (df_ref, stack, 32);
+ collection_rec.mw_vec = VEC_alloc (df_mw_hardreg_ptr, stack, 32);
+
if (scan_insns)
/* Scan the block an insn at a time from beginning to end. */
FOR_BB_INSNS (bb, insn)
@@ -3761,14 +3721,14 @@ df_bb_refs_record (int bb_index, bool scan_insns)
df_bb_refs_collect (&collection_rec, bb);
df_refs_add_to_chains (&collection_rec, bb, NULL);
+ VEC_free (df_ref, stack, collection_rec.def_vec);
+ VEC_free (df_ref, stack, collection_rec.use_vec);
+ VEC_free (df_ref, stack, collection_rec.eq_use_vec);
+ VEC_free (df_mw_hardreg_ptr, stack, collection_rec.mw_vec);
+
/* Now that the block has been processed, set the block as dirty so
LR and LIVE will get it processed. */
df_set_bb_dirty (bb);
-
- safe_free_vec (&collection_rec, def);
- safe_free_vec (&collection_rec, use);
- safe_free_vec (&collection_rec, eq_use);
- safe_free_vec (&collection_rec, mw);
}
@@ -4023,14 +3983,12 @@ df_record_entry_block_defs (bitmap entry_block_defs)
{
struct df_collection_rec collection_rec;
memset (&collection_rec, 0, sizeof (struct df_collection_rec));
- safe_alloca_vec (&collection_rec, def, ref);
-
+ collection_rec.def_vec = VEC_alloc (df_ref, stack, FIRST_PSEUDO_REGISTER);
df_entry_block_defs_collect (&collection_rec, entry_block_defs);
/* Process bb_refs chain */
df_refs_add_to_chains (&collection_rec, BASIC_BLOCK (ENTRY_BLOCK), NULL);
-
- safe_free_vec (&collection_rec, def);
+ VEC_free (df_ref, stack, collection_rec.def_vec);
}
@@ -4196,14 +4154,13 @@ df_record_exit_block_uses (bitmap exit_block_uses)
{
struct df_collection_rec collection_rec;
memset (&collection_rec, 0, sizeof (struct df_collection_rec));
- safe_alloca_vec (&collection_rec, use, ref);
+ collection_rec.use_vec = VEC_alloc (df_ref, stack, FIRST_PSEUDO_REGISTER);
df_exit_block_uses_collect (&collection_rec, exit_block_uses);
/* Process bb_refs chain */
df_refs_add_to_chains (&collection_rec, BASIC_BLOCK (EXIT_BLOCK), NULL);
-
- safe_free_vec (&collection_rec, use);
+ VEC_free (df_ref, stack, collection_rec.use_vec);
}
@@ -4380,7 +4337,7 @@ df_compute_regs_ever_live (bool reset)
df_reg_chain_mark (refs, regno, is_def, is_eq_use)
df_reg_chain_verify_unmarked (refs)
- df_refs_verify (ref*, ref*, bool)
+ df_refs_verify (VEC(stack,df_ref)*, ref*, bool)
df_mws_verify (mw*, mw*, bool)
df_insn_refs_verify (collection_rec, bb, insn, bool)
df_bb_refs_verify (bb, refs, bool)
@@ -4444,12 +4401,15 @@ df_reg_chain_verify_unmarked (df_ref refs)
/* Verify that NEW_REC and OLD_REC have exactly the same members. */
static bool
-df_refs_verify (df_ref *new_rec, df_ref *old_rec,
+df_refs_verify (VEC(df_ref,stack) *new_rec, df_ref *old_rec,
bool abort_if_fail)
{
- while ((*new_rec) && (*old_rec))
+ unsigned int ix;
+ df_ref new_ref;
+
+ for (ix = 0; VEC_iterate (df_ref, new_rec, ix, new_ref); ++ix)
{
- if (!df_ref_equal_p (*new_rec, *old_rec))
+ if (*old_rec == NULL || !df_ref_equal_p (new_ref, *old_rec))
{
if (abort_if_fail)
gcc_assert (0);
@@ -4465,14 +4425,13 @@ df_refs_verify (df_ref *new_rec, df_ref *old_rec,
DF_REF_REG_UNMARK (*old_rec);
}
- new_rec++;
old_rec++;
}
if (abort_if_fail)
- gcc_assert ((*new_rec == NULL) && (*old_rec == NULL));
+ gcc_assert (*old_rec == NULL);
else
- return ((*new_rec == NULL) && (*old_rec == NULL));
+ return *old_rec == NULL;
return false;
}
@@ -4480,26 +4439,29 @@ df_refs_verify (df_ref *new_rec, df_ref *old_rec,
/* Verify that NEW_REC and OLD_REC have exactly the same members. */
static bool
-df_mws_verify (struct df_mw_hardreg **new_rec, struct df_mw_hardreg **old_rec,
+df_mws_verify (VEC(df_mw_hardreg_ptr,stack) *new_rec,
+ struct df_mw_hardreg **old_rec,
bool abort_if_fail)
{
- while ((*new_rec) && (*old_rec))
+ unsigned int ix;
+ struct df_mw_hardreg *new_reg;
+
+ for (ix = 0; VEC_iterate (df_mw_hardreg_ptr, new_rec, ix, new_reg); ++ix)
{
- if (!df_mw_equal_p (*new_rec, *old_rec))
+ if (*old_rec == NULL || !df_mw_equal_p (new_reg, *old_rec))
{
if (abort_if_fail)
gcc_assert (0);
else
return false;
}
- new_rec++;
old_rec++;
}
if (abort_if_fail)
- gcc_assert ((*new_rec == NULL) && (*old_rec == NULL));
+ gcc_assert (*old_rec == NULL);
else
- return ((*new_rec == NULL) && (*old_rec == NULL));
+ return *old_rec == NULL;
return false;
}
@@ -4562,10 +4524,10 @@ df_bb_verify (basic_block bb)
struct df_collection_rec collection_rec;
memset (&collection_rec, 0, sizeof (struct df_collection_rec));
- safe_alloca_vec (&collection_rec, def, ref);
- safe_alloca_vec (&collection_rec, use, ref);
- safe_alloca_vec (&collection_rec, eq_use, ref);
- safe_alloca_vec (&collection_rec, mw, mwhreg);
+ collection_rec.def_vec = VEC_alloc (df_ref, stack, 128);
+ collection_rec.use_vec = VEC_alloc (df_ref, stack, 32);
+ collection_rec.eq_use_vec = VEC_alloc (df_ref, stack, 32);
+ collection_rec.mw_vec = VEC_alloc (df_mw_hardreg_ptr, stack, 32);
gcc_assert (bb_info);
@@ -4584,11 +4546,6 @@ df_bb_verify (basic_block bb)
df_refs_verify (collection_rec.use_vec, df_get_artificial_uses (bb->index), true);
df_free_collection_rec (&collection_rec);
- safe_free_vec (&collection_rec, def);
- safe_free_vec (&collection_rec, use);
- safe_free_vec (&collection_rec, eq_use);
- safe_free_vec (&collection_rec, mw);
-
return true;
}