aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wahab <matthew.wahab@arm.com>2015-06-01 15:18:19 +0000
committerMatthew Wahab <matthew.wahab@arm.com>2015-06-01 15:18:19 +0000
commit09a879602c68efe1df72dae8934273630dc50df0 (patch)
treefe81d44ef889699582bcebedca3d4329ff80c8fd
parenta074e43d6b29e5e6253dc025a66e0684d0dd0819 (diff)
PR target/65697
* config/aarch64/aarch64.c (aarch64_emit_post_barrier):New. (aarch64_split_atomic_op): Check for __sync memory models, emit appropriate initial loads and final barriers. git-svn-id: https://gcc.gnu.org/svn/gcc/trunk@223983 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog7
-rw-r--r--gcc/config/aarch64/aarch64.c31
2 files changed, 37 insertions, 1 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index daf647637e4..0c109c33c0a 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,10 @@
+2015-06-01 Matthew Wahab <matthew.wahab@arm.com>
+
+ PR target/65697
+ * config/aarch64/aarch64.c (aarch64_emit_post_barrier):New.
+ (aarch64_split_atomic_op): Check for __sync memory models, emit
+ appropriate initial loads and final barriers.
+
2015-06-01 Vidya Praveen <vidya.praveen@atmel.com>
* Makefile.in: Fix gcov dependencies that should
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 648a548e0e0..93bea074d68 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -9409,6 +9409,23 @@ aarch64_expand_compare_and_swap (rtx operands[])
emit_insn (gen_rtx_SET (bval, x));
}
+/* Emit a barrier, that is appropriate for memory model MODEL, at the end of a
+ sequence implementing an atomic operation. */
+
+static void
+aarch64_emit_post_barrier (enum memmodel model)
+{
+ const enum memmodel base_model = memmodel_base (model);
+
+ if (is_mm_sync (model)
+ && (base_model == MEMMODEL_ACQUIRE
+ || base_model == MEMMODEL_ACQ_REL
+ || base_model == MEMMODEL_SEQ_CST))
+ {
+ emit_insn (gen_mem_thread_fence (GEN_INT (MEMMODEL_SEQ_CST)));
+ }
+}
+
/* Split a compare and swap pattern. */
void
@@ -9471,6 +9488,8 @@ aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
{
machine_mode mode = GET_MODE (mem);
machine_mode wmode = (mode == DImode ? DImode : SImode);
+ const enum memmodel model = memmodel_from_int (INTVAL (model_rtx));
+ const bool is_sync = is_mm_sync (model);
rtx_code_label *label;
rtx x;
@@ -9485,7 +9504,13 @@ aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
old_out = new_out;
value = simplify_gen_subreg (wmode, value, mode, 0);
- aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx);
+ /* The initial load can be relaxed for a __sync operation since a final
+ barrier will be emitted to stop code hoisting. */
+ if (is_sync)
+ aarch64_emit_load_exclusive (mode, old_out, mem,
+ GEN_INT (MEMMODEL_RELAXED));
+ else
+ aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx);
switch (code)
{
@@ -9521,6 +9546,10 @@ aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
gen_rtx_LABEL_REF (Pmode, label), pc_rtx);
aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
+
+ /* Emit any final barrier needed for a __sync operation. */
+ if (is_sync)
+ aarch64_emit_post_barrier (model);
}
static void