aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/spinlock.h5
-rw-r--r--include/linux/lockref.h11
-rw-r--r--lib/Kconfig10
-rw-r--r--lib/lockref.c60
5 files changed, 84 insertions, 3 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b32ebf92b0ce..67e00740531c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -16,6 +16,7 @@ config X86_64
def_bool y
depends on 64BIT
select X86_DEV_DMA_OPS
+ select ARCH_USE_CMPXCHG_LOCKREF
### Arch settings
config X86
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index e3ddd7db723f..e0e668422c75 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -34,6 +34,11 @@
# define UNLOCK_LOCK_PREFIX
#endif
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.head == lock.tickets.tail;
+}
+
/*
* Ticket locks are conceptually two parts, one indicating the current head of
* the queue, and the other indicating the current tail. The lock is acquired
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 4c0af31c8d47..ca07b5028b01 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -17,8 +17,15 @@
#include <linux/spinlock.h>
struct lockref {
- spinlock_t lock;
- unsigned int count;
+ union {
+#ifdef CONFIG_CMPXCHG_LOCKREF
+ aligned_u64 lock_count;
+#endif
+ struct {
+ spinlock_t lock;
+ unsigned int count;
+ };
+ };
};
extern void lockref_get(struct lockref *);
diff --git a/lib/Kconfig b/lib/Kconfig
index 71d9f81f6eed..65561716c16c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -48,6 +48,16 @@ config STMP_DEVICE
config PERCPU_RWSEM
boolean
+config ARCH_USE_CMPXCHG_LOCKREF
+ bool
+
+config CMPXCHG_LOCKREF
+ def_bool y if ARCH_USE_CMPXCHG_LOCKREF
+ depends on SMP
+ depends on !GENERIC_LOCKBREAK
+ depends on !DEBUG_SPINLOCK
+ depends on !DEBUG_LOCK_ALLOC
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/lockref.c b/lib/lockref.c
index a9a4f4e1eff5..7819c2d1d315 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -1,6 +1,33 @@
#include <linux/export.h>
#include <linux/lockref.h>
+#ifdef CONFIG_CMPXCHG_LOCKREF
+
+/*
+ * Note that the "cmpxchg()" reloads the "old" value for the
+ * failure case.
+ */
+#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
+ struct lockref old; \
+ BUILD_BUG_ON(sizeof(old) != 8); \
+ old.lock_count = ACCESS_ONCE(lockref->lock_count); \
+ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
+ struct lockref new = old, prev = old; \
+ CODE \
+ old.lock_count = cmpxchg(&lockref->lock_count, \
+ old.lock_count, new.lock_count); \
+ if (likely(old.lock_count == prev.lock_count)) { \
+ SUCCESS; \
+ } \
+ } \
+} while (0)
+
+#else
+
+#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
+
+#endif
+
/**
* lockref_get - Increments reference count unconditionally
* @lockcnt: pointer to lockref structure
@@ -10,6 +37,12 @@
*/
void lockref_get(struct lockref *lockref)
{
+ CMPXCHG_LOOP(
+ new.count++;
+ ,
+ return;
+ );
+
spin_lock(&lockref->lock);
lockref->count++;
spin_unlock(&lockref->lock);
@@ -23,9 +56,18 @@ EXPORT_SYMBOL(lockref_get);
*/
int lockref_get_not_zero(struct lockref *lockref)
{
- int retval = 0;
+ int retval;
+
+ CMPXCHG_LOOP(
+ new.count++;
+ if (!old.count)
+ return 0;
+ ,
+ return 1;
+ );
spin_lock(&lockref->lock);
+ retval = 0;
if (lockref->count) {
lockref->count++;
retval = 1;
@@ -43,6 +85,14 @@ EXPORT_SYMBOL(lockref_get_not_zero);
*/
int lockref_get_or_lock(struct lockref *lockref)
{
+ CMPXCHG_LOOP(
+ new.count++;
+ if (!old.count)
+ break;
+ ,
+ return 1;
+ );
+
spin_lock(&lockref->lock);
if (!lockref->count)
return 0;
@@ -59,6 +109,14 @@ EXPORT_SYMBOL(lockref_get_or_lock);
*/
int lockref_put_or_lock(struct lockref *lockref)
{
+ CMPXCHG_LOOP(
+ new.count--;
+ if (old.count <= 1)
+ break;
+ ,
+ return 1;
+ );
+
spin_lock(&lockref->lock);
if (lockref->count <= 1)
return 0;