aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Law <law@redhat.com>2018-03-03 14:03:22 +0000
committerJakub Jelinek <jakub@redhat.com>2018-03-03 14:03:22 +0000
commitc1e7aa1d88a3110f71b740fc8ea11f3274cae433 (patch)
tree7408658813a9f1958069aade8dcb100824086dab
parent22390eba166f3c0ff8a4875dfe25cb679d7b03de (diff)
svn merge -r257216:257217 svn+ssh://gcc.gnu.org/svn/gcc/trunk
git-svn-id: https://gcc.gnu.org/svn/gcc/branches/redhat/gcc-7-branch@258210 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog10
-rw-r--r--gcc/config/i386/i386.c65
-rw-r--r--gcc/testsuite/ChangeLog5
-rw-r--r--gcc/testsuite/gcc.target/i386/pr84064.c10
4 files changed, 72 insertions, 18 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 3afb2ff4dd3..e77323357b0 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -136,6 +136,16 @@
(find_candidates_dom_walker::before_dom_children): Ignore stmts that
can throw.
+2017-01-30 Jeff Law <law@redhat.com>
+
+ PR target/84064
+ * i386.c (ix86_adjust_stack_and_probe_stack_clash): New argument
+ INT_REGISTERS_SAVED. Check it prior to calling
+ get_scratch_register_on_entry.
+ (ix86_adjust_stack_and_probe): Similarly.
+ (ix86_emit_probe_stack_range): Similarly.
+ (ix86_expand_prologue): Corresponding changes.
+
2018-03-01 H.J. Lu <hongjiu.lu@intel.com>
Backport from mainline
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 633d88eb752..88411ca0c59 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -13661,10 +13661,14 @@ release_scratch_register_on_entry (struct scratch_reg *sr)
This differs from the next routine in that it tries hard to prevent
attacks that jump the stack guard. Thus it is never allowed to allocate
more than PROBE_INTERVAL bytes of stack space without a suitable
- probe. */
+ probe.
+
+ INT_REGISTERS_SAVED is true if integer registers have already been
+ pushed on the stack. */
static void
-ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
+ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size,
+ const bool int_registers_saved)
{
struct machine_function *m = cfun->machine;
@@ -13770,6 +13774,12 @@ ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
}
else
{
+ /* We expect the GP registers to be saved when probes are used
+ as the probing sequences might need a scratch register and
+ the routine to allocate one assumes the integer registers
+ have already been saved. */
+ gcc_assert (int_registers_saved);
+
struct scratch_reg sr;
get_scratch_register_on_entry (&sr);
@@ -13828,10 +13838,14 @@ ix86_adjust_stack_and_probe_stack_clash (const HOST_WIDE_INT size)
emit_insn (gen_blockage ());
}
-/* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
+/* Emit code to adjust the stack pointer by SIZE bytes while probing it.
+
+ INT_REGISTERS_SAVED is true if integer registers have already been
+ pushed on the stack. */
static void
-ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
+ix86_adjust_stack_and_probe (const HOST_WIDE_INT size,
+ const bool int_registers_saved)
{
/* We skip the probe for the first interval + a small dope of 4 words and
probe that many bytes past the specified size to maintain a protection
@@ -13892,6 +13906,12 @@ ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
equality test for the loop condition. */
else
{
+ /* We expect the GP registers to be saved when probes are used
+ as the probing sequences might need a scratch register and
+ the routine to allocate one assumes the integer registers
+ have already been saved. */
+ gcc_assert (int_registers_saved);
+
HOST_WIDE_INT rounded_size;
struct scratch_reg sr;
@@ -14019,10 +14039,14 @@ output_adjust_stack_and_probe (rtx reg)
}
/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
- inclusive. These are offsets from the current stack pointer. */
+ inclusive. These are offsets from the current stack pointer.
+
+ INT_REGISTERS_SAVED is true if integer registers have already been
+ pushed on the stack. */
static void
-ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
+ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size,
+ const bool int_registers_saved)
{
/* See if we have a constant small number of probes to generate. If so,
that's the easy case. The run-time loop is made up of 6 insns in the
@@ -14050,6 +14074,12 @@ ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
equality test for the loop condition. */
else
{
+ /* We expect the GP registers to be saved when probes are used
+ as the probing sequences might need a scratch register and
+ the routine to allocate one assumes the integer registers
+ have already been saved. */
+ gcc_assert (int_registers_saved);
+
HOST_WIDE_INT rounded_size, last;
struct scratch_reg sr;
@@ -14582,15 +14612,10 @@ ix86_expand_prologue (void)
&& (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
|| flag_stack_clash_protection))
{
- /* We expect the GP registers to be saved when probes are used
- as the probing sequences might need a scratch register and
- the routine to allocate one assumes the integer registers
- have already been saved. */
- gcc_assert (int_registers_saved);
-
if (flag_stack_clash_protection)
{
- ix86_adjust_stack_and_probe_stack_clash (allocate);
+ ix86_adjust_stack_and_probe_stack_clash (allocate,
+ int_registers_saved);
allocate = 0;
}
else if (STACK_CHECK_MOVING_SP)
@@ -14598,7 +14623,7 @@ ix86_expand_prologue (void)
if (!(crtl->is_leaf && !cfun->calls_alloca
&& allocate <= get_probe_interval ()))
{
- ix86_adjust_stack_and_probe (allocate);
+ ix86_adjust_stack_and_probe (allocate, int_registers_saved);
allocate = 0;
}
}
@@ -14614,11 +14639,12 @@ ix86_expand_prologue (void)
if (crtl->is_leaf && !cfun->calls_alloca)
{
if (size > get_probe_interval ())
- ix86_emit_probe_stack_range (0, size);
+ ix86_emit_probe_stack_range (0, size, int_registers_saved);
}
else
ix86_emit_probe_stack_range (0,
- size + get_stack_check_protect ());
+ size + get_stack_check_protect (),
+ int_registers_saved);
}
else
{
@@ -14627,10 +14653,13 @@ ix86_expand_prologue (void)
if (size > get_probe_interval ()
&& size > get_stack_check_protect ())
ix86_emit_probe_stack_range (get_stack_check_protect (),
- size - get_stack_check_protect ());
+ (size
+ - get_stack_check_protect ()),
+ int_registers_saved);
}
else
- ix86_emit_probe_stack_range (get_stack_check_protect (), size);
+ ix86_emit_probe_stack_range (get_stack_check_protect (), size,
+ int_registers_saved);
}
}
}
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 65e94b6bfe0..69e86c21ea8 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -162,6 +162,11 @@
PR tree-optimization/83605
* gcc.dg/pr83605.c: New test.
+2018-01-30 Jeff Law <law@redhat.com>
+
+ PR target/84064
+ * gcc.target/i386/pr84064: New test.
+
2018-03-01 H.J. Lu <hongjiu.lu@intel.com>
Backport from mainline
diff --git a/gcc/testsuite/gcc.target/i386/pr84064.c b/gcc/testsuite/gcc.target/i386/pr84064.c
new file mode 100644
index 00000000000..01f8d9e945a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr84064.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=i686 -fstack-clash-protection" } */
+/* { dg-require-effective-target ia32 } */
+
+void
+f (void *p1, void *p2)
+{
+ __builtin_memcpy (p1, p2, 1000);
+}
+