aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wahab <matthew.wahab@arm.com>2015-06-01 15:21:02 +0000
committerMatthew Wahab <matthew.wahab@arm.com>2015-06-01 15:21:02 +0000
commit0f63602c76d7abe750efa402b2648100da360adb (patch)
treef5bad7721528b5d821e13a00eb571dace099e0b0
parent09a879602c68efe1df72dae8934273630dc50df0 (diff)
PR target/65697
* config/aarch64/aarch64.c (aarch64_split_compare_and_swap): Check for __sync memory models, emit initial loads and final barriers as appropriate. git-svn-id: https://gcc.gnu.org/svn/gcc/trunk@223984 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog7
-rw-r--r--gcc/config/aarch64/aarch64.c18
2 files changed, 23 insertions, 2 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 0c109c33c0a..b0c172c6f15 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,12 @@
2015-06-01 Matthew Wahab <matthew.wahab@arm.com>
+ PR target/65697
+ * config/aarch64/aarch64.c (aarch64_split_compare_and_swap): Check
+ for __sync memory models, emit initial loads and final barriers as
+ appropriate.
+
+2015-06-01 Matthew Wahab <matthew.wahab@arm.com>
+
PR target/65697
* config/aarch64/aarch64.c (aarch64_emit_post_barrier):New.
(aarch64_split_atomic_op): Check for __sync memory models, emit
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 93bea074d68..62c8c8fe47a 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -9436,14 +9436,18 @@ aarch64_split_compare_and_swap (rtx operands[])
bool is_weak;
rtx_code_label *label1, *label2;
rtx x, cond;
+ enum memmodel model;
+ rtx model_rtx;
rval = operands[0];
mem = operands[1];
oldval = operands[2];
newval = operands[3];
is_weak = (operands[4] != const0_rtx);
+ model_rtx = operands[5];
scratch = operands[7];
mode = GET_MODE (mem);
+ model = memmodel_from_int (INTVAL (model_rtx));
label1 = NULL;
if (!is_weak)
@@ -9453,7 +9457,13 @@ aarch64_split_compare_and_swap (rtx operands[])
}
label2 = gen_label_rtx ();
- aarch64_emit_load_exclusive (mode, rval, mem, operands[5]);
+ /* The initial load can be relaxed for a __sync operation since a final
+ barrier will be emitted to stop code hoisting. */
+ if (is_mm_sync (model))
+ aarch64_emit_load_exclusive (mode, rval, mem,
+ GEN_INT (MEMMODEL_RELAXED));
+ else
+ aarch64_emit_load_exclusive (mode, rval, mem, model_rtx);
cond = aarch64_gen_compare_reg (NE, rval, oldval);
x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
@@ -9461,7 +9471,7 @@ aarch64_split_compare_and_swap (rtx operands[])
gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
- aarch64_emit_store_exclusive (mode, scratch, mem, newval, operands[5]);
+ aarch64_emit_store_exclusive (mode, scratch, mem, newval, model_rtx);
if (!is_weak)
{
@@ -9478,6 +9488,10 @@ aarch64_split_compare_and_swap (rtx operands[])
}
emit_label (label2);
+
+ /* Emit any final barrier needed for a __sync operation. */
+ if (is_mm_sync (model))
+ aarch64_emit_post_barrier (model);
}
/* Split an atomic operation. */