aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAnders Roxell <anders.roxell@linaro.org>2014-06-30 15:44:45 +0200
committerAnders Roxell <anders.roxell@linaro.org>2014-06-30 16:16:08 +0200
commit9e32061a8e77bf33d1d9955ca5acb3470935fd06 (patch)
tree1a6bc5cc8510a5233b7905ed9f13f2478a7f4109 /kernel
parent0cbd8a2041247e23ce09e99253c312027c76073e (diff)
Changes since v3.14.2-rt2
- rwsem readers are now not allowed to nest. A patch rom Steven Rostedt. - a few bugs were fixed in the hotplug code which were made during the v3.14 port. Fixed by Mike Galbraith. - Mike Galbraith sent a patch which might fix lazy preempt on x86_64. Patch applied and my machine still explodes therefore lazy preempt remains off on x86_64. - Mike Galbraith sent a few patches to get cpu hoplug to work. This includes lg_global_trylock_relax(). - A few push downs of migrate_disable() (where we call migrate_disable() after the rt_mutex_trylock()) have been reverted. It seems hotplug is not too happy about this. A patch by Steven Rostedt and and Mike Galbraith - There was a complaint about a backrace from run_local_timers() in UP mode because a spin_try_lock() failed. _This_ particular case was not an error. This optimization was for FULL_NO_HZ which is pointless on UP because there is no spare CPU. Therefore, this optimization is disabled in UP mode and the backtrace is gone. Reported by Stanislav Meduna. - block-mq notifier uses now a spinlock and runs during CPU_POST_DEAD instead at CPU_DEAD time. lockdep complained about the sleeping ctx->lock within the rawlock (blk_mq_cpu_notify_lock) and CPU_DEAD runs with irqs off. Known issues: - bcache is disabled. - lazy preempt on x86_64 leads to a crash with some load. - CPU hotplug works in general. Steven's test script however deadlocks usually on the second invocation. Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/locking/rt.c60
-rw-r--r--kernel/stop_machine.c2
3 files changed, 24 insertions, 40 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 041fada58f4c..ce0032983fc9 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -649,7 +649,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
/* CPU didn't die: tell everyone. Can't complain. */
smpboot_unpark_threads(cpu);
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
- goto out_cancel;
+ goto out_release;
}
BUG_ON(cpu_online(cpu));
diff --git a/kernel/locking/rt.c b/kernel/locking/rt.c
index 5d177272ffb4..055a3df13c2a 100644
--- a/kernel/locking/rt.c
+++ b/kernel/locking/rt.c
@@ -180,12 +180,14 @@ EXPORT_SYMBOL(_mutex_unlock);
*/
int __lockfunc rt_write_trylock(rwlock_t *rwlock)
{
- int ret = rt_mutex_trylock(&rwlock->lock);
+ int ret;
- if (ret) {
+ migrate_disable();
+ ret = rt_mutex_trylock(&rwlock->lock);
+ if (ret)
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
- migrate_disable();
- }
+ else
+ migrate_enable();
return ret;
}
@@ -212,11 +214,13 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
* write locked.
*/
if (rt_mutex_owner(lock) != current) {
+ migrate_disable();
ret = rt_mutex_trylock(lock);
- if (ret) {
+ if (ret)
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
- migrate_disable();
- }
+ else
+ migrate_enable();
+
} else if (!rwlock->read_depth) {
ret = 0;
}
@@ -240,13 +244,14 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock)
{
struct rt_mutex *lock = &rwlock->lock;
+
/*
* recursive read locks succeed when current owns the lock
*/
if (rt_mutex_owner(lock) != current) {
- rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
- __rt_spin_lock(lock);
migrate_disable();
+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __rt_spin_lock(lock);
}
rwlock->read_depth++;
}
@@ -316,10 +321,8 @@ EXPORT_SYMBOL(rt_up_write);
void rt_up_read(struct rw_semaphore *rwsem)
{
- if (--rwsem->read_depth == 0) {
- rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
- rt_mutex_unlock(&rwsem->lock);
- }
+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+ rt_mutex_unlock(&rwsem->lock);
}
EXPORT_SYMBOL(rt_up_read);
@@ -330,7 +333,6 @@ EXPORT_SYMBOL(rt_up_read);
void rt_downgrade_write(struct rw_semaphore *rwsem)
{
BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
- rwsem->read_depth = 1;
}
EXPORT_SYMBOL(rt_downgrade_write);
@@ -367,37 +369,20 @@ void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
int rt_down_read_trylock(struct rw_semaphore *rwsem)
{
- struct rt_mutex *lock = &rwsem->lock;
- int ret = 1;
-
- /*
- * recursive read locks succeed when current owns the rwsem,
- * but not when read_depth == 0 which means that the rwsem is
- * write locked.
- */
- if (rt_mutex_owner(lock) != current) {
- ret = rt_mutex_trylock(&rwsem->lock);
- if (ret)
- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
- } else if (!rwsem->read_depth) {
- ret = 0;
- }
+ int ret;
+ ret = rt_mutex_trylock(&rwsem->lock);
if (ret)
- rwsem->read_depth++;
+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+
return ret;
}
EXPORT_SYMBOL(rt_down_read_trylock);
static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
{
- struct rt_mutex *lock = &rwsem->lock;
-
- if (rt_mutex_owner(lock) != current) {
- rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
- rt_mutex_lock(&rwsem->lock);
- }
- rwsem->read_depth++;
+ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
+ rt_mutex_lock(&rwsem->lock);
}
void rt_down_read(struct rw_semaphore *rwsem)
@@ -422,7 +407,6 @@ void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
lockdep_init_map(&rwsem->dep_map, name, key, 0);
#endif
- rwsem->read_depth = 0;
rwsem->lock.save_state = 0;
}
EXPORT_SYMBOL(__rt_rwsem_init);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index a933173675bf..bcbae9c962a9 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -675,7 +675,7 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
ret = multi_cpu_stop(&msdata);
/* Busy wait for completion. */
- while (!atomic_read(&done.nr_todo))
+ while (atomic_read(&done.nr_todo))
cpu_relax();
mutex_unlock(&stop_cpus_mutex);