diff options
author | Nicholas Mc Guire <der.herr@hofr.at> | 2013-12-01 23:03:52 -0500 |
---|---|---|
committer | Anders Roxell <anders.roxell@linaro.org> | 2015-06-24 21:13:00 +0200 |
commit | bf0939465b3b207b65ae887e9d4c49632d3ac9e4 (patch) | |
tree | 9e86c35d34bc094bb58291d265ffcfefa8dd6b53 | |
parent | cb512520cf5d6b97c7e300dfd9561dbda2c6a469 (diff) |
seqlock: consolidate spin_lock/unlock waiting with spin_unlock_wait
since c2f21ce ("locking: Implement new raw_spinlock")
include/linux/spinlock.h includes spin_unlock_wait() to wait for a concurren
holder of a lock. this patch just moves over to that API. spin_unlock_wait
covers both raw_spinlock_t and spinlock_t so it should be safe here as well.
the added rt-variant of read_seqbegin in include/linux/seqlock.h that is being
modified, was introduced by patch:
seqlock-prevent-rt-starvation.patch
behavior should be unchanged.
Signed-off-by: Nicholas Mc Guire <der.herr@hofr.at>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | include/linux/seqlock.h | 11 |
1 files changed, 5 insertions, 6 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index de343cda4680..4acd0e2fb5cb 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -335,8 +335,7 @@ repeat: * Take the lock and let the writer proceed (i.e. evtl * boost it), otherwise we could loop here forever. */ - spin_lock(&sl->lock); - spin_unlock(&sl->lock); + spin_unlock_wait(&sl->lock); goto repeat; } return ret; @@ -356,7 +355,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock(seqlock_t *sl) @@ -368,7 +367,7 @@ static inline void write_sequnlock(seqlock_t *sl) static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock_bh(seqlock_t *sl) @@ -380,7 +379,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl) static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock_irq(seqlock_t *sl) @@ -394,7 +393,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - __write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); return flags; } |