aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <rth@redhat.com>2011-11-03 05:01:25 +0000
committerRichard Henderson <rth@redhat.com>2011-11-03 05:01:25 +0000
commitd504ae3b56288b947b45b020370008e7f4beb3a5 (patch)
tree8f892b956de5cb836c84eccb9794881a6685ba45
parentf4ef0189f031003b3204c98dcbc3da7da8076e7a (diff)
Remove compare-and-swap fallback for __atomic_load.
This might be valid if we knew for certain that the memory is writable. But I don't see that we can assume that. * optabs.c (expand_atomic_load): Don't try compare-and-swap. git-svn-id: https://gcc.gnu.org/svn/gcc/branches/cxx-mem-model@180815 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog.mm6
-rw-r--r--gcc/optabs.c15
-rw-r--r--gcc/testsuite/ChangeLog.mm5
-rw-r--r--gcc/testsuite/gcc.dg/atomic-load-5.c65
-rw-r--r--gcc/testsuite/gcc.dg/simulate-thread/atomic-load-int128.c132
5 files changed, 14 insertions, 209 deletions
diff --git a/gcc/ChangeLog.mm b/gcc/ChangeLog.mm
index a22c40bfc27..4454e9319b5 100644
--- a/gcc/ChangeLog.mm
+++ b/gcc/ChangeLog.mm
@@ -1,3 +1,7 @@
+2011-11-02 Richard Henderson <rth@redhat.com>
+
+ * optabs.c (expand_atomic_load): Don't try compare-and-swap.
+
2011-11-02 Aldy Hernandez <aldyh@redhat.com>
* Merge from trunk at revision 180790.
@@ -15,7 +19,7 @@
* builtin-types.def (BT_FN_BOOL_SIZE): Remove.
(BT_FN_BOOL_SIZE_CONST_VPTR): Add.
-2011-11-02 Andrew MacLeod
+2011-11-02 Andrew MacLeod <amacleod@redhat.com>
* common.opt (finline-atomics): New. Flag to disable atomic inlining.
* builtins.c (expand_builtin_atomic_exchange,
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 276cb8660e6..56608f89181 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -7458,19 +7458,12 @@ expand_atomic_load (rtx target, rtx mem, enum memmodel model)
return ops[0].value;
}
- /* If there is no load pattern, default to a move with barriers. If the size
- of the object is greater than word size on this target, a default load
- will not be atomic. */
+ /* If the size of the object is greater than word size on this target,
+ then we assume that a load will not be atomic. */
if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
- {
- /* Issue val = compare_and_swap (mem, 0, 0).
- This may cause the occasional harmless store of 0 when the value is
- already 0, but do it anyway until its determined to be invalid. */
- expand_atomic_compare_and_swap (NULL, &target, mem, const0_rtx,
- const0_rtx, false, model, model);
- return target;
- }
+ return NULL_RTX;
+ /* Otherwise assume loads are atomic, and emit the proper barriers. */
if (!target || target == const0_rtx)
target = gen_reg_rtx (mode);
diff --git a/gcc/testsuite/ChangeLog.mm b/gcc/testsuite/ChangeLog.mm
index e4e460fd658..db789f3ee6b 100644
--- a/gcc/testsuite/ChangeLog.mm
+++ b/gcc/testsuite/ChangeLog.mm
@@ -1,3 +1,8 @@
+2011-11-02 Richard Henderson <rth@redhat.com>
+
+ * gcc.dg/atomic-load-5.c: Remove.
+ * gcc.dg/simulate-thread/atomic-load-int128.c: Remove.
+
2011-11-02 Andrew MacLeod <amacleod@redhat.com>
* gcc.dg/atomic-lockfree.c: Add extra lock-free parameter.
diff --git a/gcc/testsuite/gcc.dg/atomic-load-5.c b/gcc/testsuite/gcc.dg/atomic-load-5.c
deleted file mode 100644
index 2991e4d6c7a..00000000000
--- a/gcc/testsuite/gcc.dg/atomic-load-5.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/* Test __atomic routines for existence and proper execution on 16 byte
- values with each valid memory model. */
-/* { dg-do run } */
-/* { dg-require-effective-target sync_int_128 } */
-/* { dg-options "-mcx16" { target { x86_64-*-* } } } */
-
-extern void abort(void);
-
-__int128_t v, count;
-
-main ()
-{
- v = 0;
- count = 0;
-
- if (__atomic_load_n (&v, __ATOMIC_RELAXED) != count++)
- abort();
- else
- v++;
-
- if (__atomic_load_n (&v, __ATOMIC_ACQUIRE) != count++)
- abort();
- else
- v++;
-
- if (__atomic_load_n (&v, __ATOMIC_CONSUME) != count++)
- abort();
- else
- v++;
-
- if (__atomic_load_n (&v, __ATOMIC_SEQ_CST) != count++)
- abort();
- else
- v++;
-
- /* Now test the generic variants. */
-
- __atomic_load (&v, &count, __ATOMIC_RELAXED);
- if (count != v)
- abort();
- else
- v++;
-
- __atomic_load (&v, &count, __ATOMIC_ACQUIRE);
- if (count != v)
- abort();
- else
- v++;
-
- __atomic_load (&v, &count, __ATOMIC_CONSUME);
- if (count != v)
- abort();
- else
- v++;
-
- __atomic_load (&v, &count, __ATOMIC_SEQ_CST);
- if (count != v)
- abort();
- else
- v++;
-
-
- return 0;
-}
-
diff --git a/gcc/testsuite/gcc.dg/simulate-thread/atomic-load-int128.c b/gcc/testsuite/gcc.dg/simulate-thread/atomic-load-int128.c
deleted file mode 100644
index 3ade0d6fad3..00000000000
--- a/gcc/testsuite/gcc.dg/simulate-thread/atomic-load-int128.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/* { dg-do link } */
-/* { dg-require-effective-target sync_int_128 } */
-/* { dg-options "-mcx16" { target { x86_64-*-* i?86-*-* } } } */
-/* { dg-final { simulate-thread } } */
-
-#include <stdio.h>
-#include "simulate-thread.h"
-
-
-/* Testing load for atomicity is a little trickier.
-
- Set up the atomic value so that it changes value after every instruction
- is executed.
-
- Simply alternating between 2 values wouldn't be sufficient since a load of
- one part, followed by the load of the second part 2 instructions later would
- appear to be valid.
-
- set up a table of 16 values which change a bit in every byte of the value
- each time, this will give us a 16 instruction cycle before repetition
- kicks in, which should be sufficient to detect any issues. Just to be sure,
- we also change the table cycle size during execution.
-
- The end result is that all loads should always get one of the values from
- the table. Any other pattern means the load failed. */
-
-__int128_t ret;
-__int128_t value = 0;
-__int128_t result = 0;
-__int128_t table[16] = {
-0x0000000000000000,
-0x1111111111111111,
-0x2222222222222222,
-0x3333333333333333,
-0x4444444444444444,
-0x5555555555555555,
-0x6666666666666666,
-0x7777777777777777,
-0x8888888888888888,
-0x9999999999999999,
-0xAAAAAAAAAAAAAAAA,
-0xBBBBBBBBBBBBBBBB,
-0xCCCCCCCCCCCCCCCC,
-0xDDDDDDDDDDDDDDDD,
-0xEEEEEEEEEEEEEEEE,
-0xFFFFFFFFFFFFFFFF
-};
-
-int table_cycle_size = 16;
-
-/* Since we don't have 128 bit constants, we have to properly pad the table. */
-void fill_table()
-{
- int x;
- for (x = 0; x < 16; x++)
- {
- ret = table[x];
- ret = (ret << 64) | ret;
- table[x] = ret;
- }
-}
-
-/* Return 0 if 'result' is a valid value to have loaded. */
-int verify_result ()
-{
- int x;
- int found = 0;
-
- /* Check entire table for valid values. */
- for (x = 0; x < 16; x++)
- if (result == table[x])
- {
- found = 1;
- break;
- }
-
- if (!found)
- printf("FAIL: Invalid result returned from fetch\n");
-
- return !found;
-}
-
-/* Iterate VALUE through the different valid values. */
-void simulate_thread_other_threads ()
-{
- static int current = 0;
-
- if (++current >= table_cycle_size)
- current = 0;
- value = table[current];
-}
-
-int simulate_thread_step_verify ()
-{
- return verify_result ();
-}
-
-int simulate_thread_final_verify ()
-{
- return verify_result ();
-}
-
-__attribute__((noinline))
-void simulate_thread_main()
-{
- int x;
-
- /* Make sure value starts with an atomic value now. */
- __atomic_store_n (&value, ret, __ATOMIC_SEQ_CST);
-
- /* Execute loads with value changing at various cyclic values. */
- for (table_cycle_size = 16; table_cycle_size > 4 ; table_cycle_size--)
- {
- ret = __atomic_load_n (&value, __ATOMIC_SEQ_CST);
- /* In order to verify the returned value (which is not atomic), it needs
- to be atomically stored into another variable and check that. */
- __atomic_store_n (&result, ret, __ATOMIC_SEQ_CST);
-
- /* Execute the fetch/store a couple of times just to ensure the cycles
- have a chance to be interesting. */
- ret = __atomic_load_n (&value, __ATOMIC_SEQ_CST);
- __atomic_store_n (&result, ret, __ATOMIC_SEQ_CST);
- }
-}
-
-main()
-{
- fill_table ();
- simulate_thread_main ();
- simulate_thread_done ();
- return 0;
-}