aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/compiler.h32
-rw-r--r--arch/arm/include/asm/percpu.h15
2 files changed, 34 insertions, 13 deletions
diff --git a/arch/arm/include/asm/compiler.h b/arch/arm/include/asm/compiler.h
index 8155db2f7fa..009eafa2ea0 100644
--- a/arch/arm/include/asm/compiler.h
+++ b/arch/arm/include/asm/compiler.h
@@ -11,5 +11,37 @@
*/
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
+#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
+/*
+ * Read TPIDRPRW.
+ * GCC requires a workaround as it does not treat a "memory" clobber on a
+ * non-volatile asm block as a side-effect.
+ * We want to allow caching the value, so for GCC avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+#if defined(__clang__)
+static inline unsigned long read_TPIDRPRW(void)
+{
+ unsigned long off;
+
+ /*
+ * Read TPIDRPRW.
+ */
+ asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
+
+ return off;
+}
+#else
+static inline unsigned long read_TPIDRPRW(void)
+{
+ unsigned long off;
+ register unsigned long *sp asm ("sp");
+
+ asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
+
+ return off;
+}
+#endif
+#endif
#endif /* __ASM_ARM_COMPILER_H */
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index 209e6504922..208c7e5fa7d 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -27,21 +27,10 @@ static inline void set_my_cpu_offset(unsigned long off)
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
}
-static inline unsigned long __my_cpu_offset(void)
-{
- unsigned long off;
- register unsigned long *sp asm ("sp");
+#include "asm/compiler.h"
- /*
- * Read TPIDRPRW.
- * We want to allow caching the value, so avoid using volatile and
- * instead use a fake stack read to hazard against barrier().
- */
- asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
+#define __my_cpu_offset read_TPIDRPRW()
- return off;
-}
-#define __my_cpu_offset __my_cpu_offset()
#else
#define set_my_cpu_offset(x) do {} while(0)