aboutsummaryrefslogtreecommitdiff
path: root/libstdc++-v3/config/cpu/arm/atomicity.h
diff options
context:
space:
mode:
Diffstat (limited to 'libstdc++-v3/config/cpu/arm/atomicity.h')
-rw-r--r--libstdc++-v3/config/cpu/arm/atomicity.h187
1 files changed, 14 insertions, 173 deletions
diff --git a/libstdc++-v3/config/cpu/arm/atomicity.h b/libstdc++-v3/config/cpu/arm/atomicity.h
index 2363edf7cdf..7b952aac365 100644
--- a/libstdc++-v3/config/cpu/arm/atomicity.h
+++ b/libstdc++-v3/config/cpu/arm/atomicity.h
@@ -32,51 +32,23 @@
typedef int _Atomic_word;
+/* It isn't possible to write an atomic add instruction using the ARM
+ SWP instruction without using either a global guard variable or a
+ guard bit somewhere in the Atomic word. However, even with a guard
+ bit we need to understand the thread model (if any) in order to
+ make co-operatively threaded applications work correctly.
+
+ The previous Thumb-based implementations were also completely
+ broken, since they failed to switch back into Thumb mode (Gas bug,
+ I think). */
+
static inline _Atomic_word
__attribute__ ((__unused__))
__exchange_and_add (volatile _Atomic_word* __mem, int __val)
{
- _Atomic_word __tmp, __tmp2, __result;
-#ifdef __thumb__
- /* Since this function is inlined, we can't be sure of the alignment. */
- __asm__ __volatile__ (
- "ldr %0, 4f \n\t"
- "bx %0 \n\t"
- ".align 0 \n"
- "4:\t"
- ".word 0f \n\t"
- ".code 32 \n"
- "0:\t"
- "ldr %0, [%3] \n\t"
- "add %1, %0, %4 \n\t"
- "swp %2, %1, [%3] \n\t"
- "cmp %0, %2 \n\t"
- "swpne %1, %2, [%3] \n\t"
- "bne 0b \n\t"
- "ldr %1, 1f \n\t"
- "bx %1 \n"
- "1:\t"
- ".word 2f \n\t"
- ".code 16 \n"
- "2:\n"
- : "=&l"(__result), "=&r"(__tmp), "=&r"(__tmp2)
- : "r" (__mem), "r"(__val)
- : "cc", "memory");
-#else
- __asm__ __volatile__ (
- "\n"
- "0:\t"
- "ldr %0, [%3] \n\t"
- "add %1, %0, %4 \n\t"
- "swp %2, %1, [%3] \n\t"
- "cmp %0, %2 \n\t"
- "swpne %1, %2, [%3] \n\t"
- "bne 0b \n\t"
- ""
- : "=&r"(__result), "=&r"(__tmp), "=&r"(__tmp2)
- : "r" (__mem), "r"(__val)
- : "cc", "memory");
-#endif
+ _Atomic_word __result = *__mem;
+
+ *__mem = __result + __val;
return __result;
}
@@ -84,138 +56,7 @@ static inline void
__attribute__ ((__unused__))
__atomic_add (volatile _Atomic_word *__mem, int __val)
{
- _Atomic_word __tmp, __tmp2, __tmp3;
-#ifdef __thumb__
- /* Since this function is inlined, we can't be sure of the alignment. */
- __asm__ __volatile__ (
- "ldr %0, 4f \n\t"
- "bx %0 \n\t"
- ".align 0\n"
- "4:\t"
- ".word 0f \n\t"
- ".code 32 \n"
- "0:\t"
- "ldr %0, [%3] \n\t"
- "add %1, %0, %4 \n\t"
- "swp %2, %1, [%3] \n\t"
- "cmp %0, %2 \n\t"
- "swpne %1, %2,[%3] \n\t"
- "bne 0b \n\t"
- "ldr %1, 1f \n\t"
- "bx %1 \n"
- "1:\t"
- ".word 2f \n\t"
- ".code 16 \n"
- "2:\n"
- : "=&l"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3)
- : "r" (__mem), "r"(__val)
- : "cc", "memory");
-#else
- __asm__ __volatile__ (
- "\n"
- "0:\t"
- "ldr %0, [%3] \n\t"
- "add %1, %0, %4 \n\t"
- "swp %2, %1, [%3] \n\t"
- "cmp %0, %2 \n\t"
- "swpne %1, %2, [%3] \n\t"
- "bne 0b \n\t"
- ""
- : "=&r"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3)
- : "r" (__mem), "r"(__val)
- : "cc", "memory");
-#endif
-}
-
-static inline long
-__attribute__ ((__unused__))
-__always_swap (volatile long *__p, long __newval)
-{
- long __result;
-#ifdef __thumb__
- long __tmp;
- /* Since this function is inlined, we can't be sure of the alignment. */
- __asm__ __volatile__ (
- "ldr %0, 4f \n\t"
- "bx %0 \n\t"
- ".align 0 \n"
- "4:\t"
- ".word 0f \n\t"
- ".code 32\n"
- "0:\t"
- "swp %0, %3, [%2] \n\t"
- "ldr %1, 1f \n\t"
- "bx %1 \n"
- "1:\t"
- ".word 2f \n\t"
- ".code 16 \n"
- "2:\n"
- : "=&l"(__result), "=&r"(__tmp)
- : "r"(__p), "r"(__newval)
- : "memory");
-#else
- __asm__ __volatile__ (
- "\n\t"
- "swp %0, %2, [%1] \n\t"
- ""
- : "=&r"(__result)
- : "r"(__p), "r"(__newval)
- : "memory");
-#endif
- return __result;
-}
-
-static inline int
-__attribute__ ((__unused__))
-__test_and_set (volatile long *__p, long __newval)
-{
- int __result;
- long __tmp;
-#ifdef __thumb__
- /* Since this function is inlined, we can't be sure of the alignment. */
- __asm__ __volatile__ (
- "ldr %0, 4f \n\t"
- "bx %0 \n\t"
- ".align 0 \n"
- "4:\t"
- ".word 0f \n\t"
- ".code 32 \n"
- "0:\t"
- "ldr %0, [%2] \n\t"
- "cmp %0, #0 \n\t"
- "bne 1f \n\t"
- "swp %1, %3, [%2] \n\t"
- "cmp %0, %1 \n\t"
- "swpne %0, %1, [%2]\n\t"
- "bne 0b \n"
- "1:\t"
- "ldr %1, 2f \n\t"
- "bx %1 \n"
- "2:\t"
- ".word 3f \n\t"
- ".code 16 \n"
- "3:"
- : "=&l"(__result), "=r" (__tmp)
- : "r"(__p), "r"(__newval)
- : "cc", "memory");
-#else
- __asm__ __volatile__ (
- "\n"
- "0:\t"
- "ldr %0, [%2] \n\t"
- "cmp %0, #0 \n\t"
- "bne 1f \n\t"
- "swp %1, %3, [%2] \n\t"
- "cmp %0, %1 \n\t"
- "swpne %0, %1, [%2] \n\t"
- "bne 0b \n"
- "1:\n\t"
- ""
- : "=&r"(__result), "=r" (__tmp)
- : "r"(__p), "r"(__newval)
- : "cc", "memory");
-#endif
- return __result;
+ __exchange_and_add (__mem, __val);
}
#endif /* atomicity.h */