From d835b6c4cc02507b3bf3f8ee6c86857cf0ee67ab Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Apr 2015 21:44:42 +0200 Subject: blackfin: Provide atomic_{or,xor,and} Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. TODO: use inline asm or at least asm macros to collapse the lot. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner --- arch/blackfin/include/asm/atomic.h | 28 +++++++++++++++++++++------- arch/blackfin/kernel/bfin_ksyms.c | 7 ++++--- arch/blackfin/mach-bf561/atomic.S | 30 +++++++++++++++--------------- 3 files changed, 40 insertions(+), 25 deletions(-) (limited to 'arch/blackfin') diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index a107a98e9978..eafa55b81a7b 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -16,19 +16,33 @@ #include asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); -asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); -asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value); -asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); +asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value); + +asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value); +asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) -#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i) -#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i)) +#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i) +#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i)) + +#define CONFIG_ARCH_HAS_ATOMIC_OR + +#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i) +#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) +#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) + +static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + atomic_and(~mask, v); +} -#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m) -#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m) +static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + atomic_or(mask, v); +} #endif diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c index c446591b961d..a401c27b69b4 100644 --- a/arch/blackfin/kernel/bfin_ksyms.c +++ b/arch/blackfin/kernel/bfin_ksyms.c @@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl); EXPORT_SYMBOL(insl_16); #ifdef CONFIG_SMP -EXPORT_SYMBOL(__raw_atomic_update_asm); -EXPORT_SYMBOL(__raw_atomic_clear_asm); -EXPORT_SYMBOL(__raw_atomic_set_asm); +EXPORT_SYMBOL(__raw_atomic_add_asm); +EXPORT_SYMBOL(__raw_atomic_and_asm); +EXPORT_SYMBOL(__raw_atomic_or_asm); EXPORT_SYMBOL(__raw_atomic_xor_asm); EXPORT_SYMBOL(__raw_atomic_test_asm); + EXPORT_SYMBOL(__raw_xchg_1_asm); EXPORT_SYMBOL(__raw_xchg_2_asm); EXPORT_SYMBOL(__raw_xchg_4_asm); diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S index 2a08df8e8c4c..26fccb5568b9 100644 --- a/arch/blackfin/mach-bf561/atomic.S +++ b/arch/blackfin/mach-bf561/atomic.S @@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm) * r0 = ptr * r1 = value * - * Add a signed value to a 32bit word and return the new value atomically. + * ADD a signed value to a 32bit word and return the new value atomically. * Clobbers: r3:0, p1:0 */ -ENTRY(___raw_atomic_update_asm) +ENTRY(___raw_atomic_add_asm) p1 = r0; r3 = r1; [--sp] = rets; @@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm) r0 = r3; rets = [sp++]; rts; -ENDPROC(___raw_atomic_update_asm) +ENDPROC(___raw_atomic_add_asm) /* * r0 = ptr * r1 = mask * - * Clear the mask bits from a 32bit word and return the old 32bit value + * AND the mask bits from a 32bit word and return the old 32bit value * atomically. * Clobbers: r3:0, p1:0 */ -ENTRY(___raw_atomic_clear_asm) +ENTRY(___raw_atomic_and_asm) p1 = r0; - r3 = ~r1; + r3 = r1; [--sp] = rets; call _get_core_lock; r2 = [p1]; @@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm) r0 = r3; rets = [sp++]; rts; -ENDPROC(___raw_atomic_clear_asm) +ENDPROC(___raw_atomic_and_asm) /* * r0 = ptr * r1 = mask * - * Set the mask bits into a 32bit word and return the old 32bit value + * OR the mask bits into a 32bit word and return the old 32bit value * atomically. * Clobbers: r3:0, p1:0 */ -ENTRY(___raw_atomic_set_asm) +ENTRY(___raw_atomic_or_asm) p1 = r0; r3 = r1; [--sp] = rets; @@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm) r0 = r3; rets = [sp++]; rts; -ENDPROC(___raw_atomic_set_asm) +ENDPROC(___raw_atomic_or_asm) /* * r0 = ptr @@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm) r2 = r1; r1 = 1; r1 <<= r2; - jump ___raw_atomic_set_asm + jump ___raw_atomic_or_asm ENDPROC(___raw_bit_set_asm) /* @@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm) * Clobbers: r3:0, p1:0 */ ENTRY(___raw_bit_clear_asm) - r2 = r1; - r1 = 1; - r1 <<= r2; - jump ___raw_atomic_clear_asm + r2 = 1; + r2 <<= r1; + r1 = ~r2; + jump ___raw_atomic_and_asm ENDPROC(___raw_bit_clear_asm) /* -- cgit v1.2.3 From e6942b7de2dfe44ebde9bae57dadece5abca9de8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 23 Apr 2014 19:32:50 +0200 Subject: atomic: Provide atomic_{or,xor,and} Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner --- arch/blackfin/include/asm/atomic.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/blackfin') diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index eafa55b81a7b..2d6a7a3823c3 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -28,8 +28,6 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); #define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i) #define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i)) -#define CONFIG_ARCH_HAS_ATOMIC_OR - #define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i) #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) #define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) -- cgit v1.2.3 From de9e432cb5de1bf2952919dc0b22e4bec0ed8d53 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 24 Apr 2015 01:12:32 +0200 Subject: atomic: Collapse all atomic_{set,clear}_mask definitions Move the now generic definitions of atomic_{set,clear}_mask() into linux/atomic.h to avoid endless and pointless repetition. Also, provide an atomic_andnot() wrapper for those few archs that can implement that. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner --- arch/blackfin/include/asm/atomic.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'arch/blackfin') diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index 2d6a7a3823c3..1c1c42330c99 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -32,16 +32,6 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) #define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - atomic_and(~mask, v); -} - -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - atomic_or(mask, v); -} - #endif #include -- cgit v1.2.3 From 805de8f43c20ba8b479bb598b543fa86b20067f6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 24 Apr 2015 01:12:32 +0200 Subject: atomic: Replace atomic_{set,clear}_mask() usage Replace the deprecated atomic_{set,clear}_mask() usage with the now ubiquous atomic_{or,andnot}() functions. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner --- arch/blackfin/mach-common/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/blackfin') diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 1c7259597395..0030e21cfceb 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) local_irq_save(flags); for_each_cpu(cpu, cpumask) { bfin_ipi_data = &per_cpu(bfin_ipi, cpu); - atomic_set_mask((1 << msg), &bfin_ipi_data->bits); + atomic_or((1 << msg), &bfin_ipi_data->bits); atomic_inc(&bfin_ipi_data->count); } local_irq_restore(flags); -- cgit v1.2.3