aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/spinlock_rt.h1
-rw-r--r--kernel/rtmutex.c31
-rw-r--r--kernel/timer.c2
3 files changed, 29 insertions, 5 deletions
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 3b555b4b52cf..28edba7a833c 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -20,6 +20,7 @@ extern void __lockfunc rt_spin_lock(spinlock_t *lock);
extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
+extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock);
extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 7e0c4d7a0b5b..744d41f038a2 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -951,10 +951,8 @@ static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
/*
* Slow path to release a rt_mutex spin_lock style
*/
-static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock)
{
- raw_spin_lock(&lock->wait_lock);
-
debug_rt_mutex_unlock(lock);
rt_mutex_deadlock_account_unlock(current);
@@ -973,6 +971,23 @@ static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
rt_mutex_adjust_prio(current);
}
+static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+{
+ raw_spin_lock(&lock->wait_lock);
+ __rt_spin_lock_slowunlock(lock);
+}
+
+static void noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock)
+{
+ int ret;
+
+ do {
+ ret = raw_spin_trylock(&lock->wait_lock);
+ } while (!ret);
+
+ __rt_spin_lock_slowunlock(lock);
+}
+
void __lockfunc rt_spin_lock(spinlock_t *lock)
{
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
@@ -1003,6 +1018,13 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock)
}
EXPORT_SYMBOL(rt_spin_unlock);
+void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock)
+{
+ /* NOTE: we always pass in '1' for nested, for simplicity */
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq);
+}
+
void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
{
rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
@@ -1236,7 +1258,8 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
{
int ret = 0;
- raw_spin_lock(&lock->wait_lock);
+ if (!raw_spin_trylock(&lock->wait_lock))
+ return ret;
init_lists(lock);
if (likely(rt_mutex_owner(lock) != current)) {
diff --git a/kernel/timer.c b/kernel/timer.c
index 4fe22a09578a..badd2d2066dc 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1336,7 +1336,7 @@ unsigned long get_next_timer_interrupt(unsigned long now)
if (time_before_eq(base->next_timer, base->timer_jiffies))
base->next_timer = __next_timer_interrupt(base);
expires = base->next_timer;
- rt_spin_unlock(&base->lock);
+ rt_spin_unlock_after_trylock_in_irq(&base->lock);
} else {
expires = now + 1;
}