diff options
author | Gary S. Robertson <gary.robertson@linaro.org> | 2015-12-16 20:33:52 -0600 |
---|---|---|
committer | Gary S. Robertson <gary.robertson@linaro.org> | 2015-12-16 20:33:52 -0600 |
commit | 75e2d5300fc98dccb3b99031daf1081a8952a738 (patch) | |
tree | df130108bbac39067b9ad1cbda2f4fd4116da59e /kernel/sched/core.c | |
parent | fe7e08f8621d68222f1c65599d8c8d10f0cf5164 (diff) | |
parent | 6844488aaa6b23d45106c9c3c3ba2f2ee4a612f9 (diff) |
Merge tag 'lsk-v4.1-15.11' of http://git.linaro.org/kernel/linux-linaro-stable into linux-linaro-lng-v4.1linux-lng-4.1.13-2015.12
LSK 15.11 v4.1
Signed-off-by: Gary S. Robertson <gary.robertson@linaro.org>
Conflicts:
linaro/configs/preempt-rt.conf
linaro/configs/vexpress64.conf
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 31 |
1 files changed, 19 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 430f04c6b471..a8a1bab4e595 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2217,11 +2217,11 @@ static struct rq *finish_task_switch(struct task_struct *prev) * If a task dies, then it sets TASK_DEAD in tsk->state and calls * schedule one last time. The schedule call will never return, and * the scheduled task must drop that reference. - * The test for TASK_DEAD must occur while the runqueue locks are - * still held, otherwise prev could be scheduled on another cpu, die - * there before we look at prev->state, and then the reference would - * be dropped twice. - * Manfred Spraul <manfred@colorfullife.com> + * + * We must observe prev->state before clearing prev->on_cpu (in + * finish_lock_switch), otherwise a concurrent wakeup can get prev + * running on another CPU and we could rave with its RUNNING -> DEAD + * transition, resulting in a double drop. */ prev_state = prev->state; vtime_task_switch(prev); @@ -2358,13 +2358,20 @@ unsigned long nr_running(void) /* * Check if only the current task is running on the cpu. + * + * Caution: this function does not check that the caller has disabled + * preemption, thus the result might have a time-of-check-to-time-of-use + * race. The caller is responsible to use it correctly, for example: + * + * - from a non-preemptable section (of course) + * + * - from a thread that is bound to a single CPU + * + * - in a loop with very short iterations (e.g. a polling loop) */ bool single_task_running(void) { - if (cpu_rq(smp_processor_id())->nr_running == 1) - return true; - else - return false; + return raw_rq()->nr_running == 1; } EXPORT_SYMBOL(single_task_running); @@ -4239,7 +4246,7 @@ SYSCALL_DEFINE0(sched_yield) int __sched _cond_resched(void) { - if (should_resched()) { + if (should_resched(0)) { preempt_schedule_common(); return 1; } @@ -4257,7 +4264,7 @@ EXPORT_SYMBOL(_cond_resched); */ int __cond_resched_lock(spinlock_t *lock) { - int resched = should_resched(); + int resched = should_resched(PREEMPT_LOCK_OFFSET); int ret = 0; lockdep_assert_held(lock); @@ -4279,7 +4286,7 @@ int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); - if (should_resched()) { + if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { local_bh_enable(); preempt_schedule_common(); local_bh_disable(); |