aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorGary S. Robertson <gary.robertson@linaro.org>2016-01-20 15:36:59 -0600
committerGary S. Robertson <gary.robertson@linaro.org>2016-01-20 15:36:59 -0600
commit370e05d463d66eaada3b4bb9266f019563920eb0 (patch)
tree348834a36436338eea1e9f17f1823cae213f92fc /kernel/sched/core.c
parentf93328edc6d139b9b1b5ed6944f7b80b8699cdf7 (diff)
parent906d9eadd3b6698fa6ceb9bcdf41da6aa56d0a24 (diff)
Merge tag 'lsk-v4.1-15.12-rt' of http://git.linaro.org/kernel/linux-linaro-stable into linux-linaro-lng-v4.1-rtlinux-lng-preempt-rt-4.1.14-2016.03linux-lng-preempt-rt-4.1.14-2016.02linux-lng-preempt-rt-4.1.14-2016.01linux-linaro-lng-v4.1-rt
LSK RT 15.12 v4.1 Conflicts: linaro/configs/linaro-base.conf linaro/configs/preempt-rt.conf linaro/configs/vexpress64.conf
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c47
1 files changed, 32 insertions, 15 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5f032b682fca..26717308d9a3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1558,9 +1558,9 @@ static void
ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
check_preempt_curr(rq, p, wake_flags);
- trace_sched_wakeup(p, true);
-
p->state = TASK_RUNNING;
+ trace_sched_wakeup(p);
+
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
@@ -1784,6 +1784,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
if (!(wake_flags & WF_LOCK_SLEEPER))
p->saved_state = TASK_RUNNING;
+ trace_sched_waking(p);
+
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);
@@ -2188,7 +2190,7 @@ void wake_up_new_task(struct task_struct *p)
rq = __task_rq_lock(p);
activate_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
- trace_sched_wakeup_new(p, true);
+ trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
@@ -2311,11 +2313,11 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* If a task dies, then it sets TASK_DEAD in tsk->state and calls
* schedule one last time. The schedule call will never return, and
* the scheduled task must drop that reference.
- * The test for TASK_DEAD must occur while the runqueue locks are
- * still held, otherwise prev could be scheduled on another cpu, die
- * there before we look at prev->state, and then the reference would
- * be dropped twice.
- * Manfred Spraul <manfred@colorfullife.com>
+ *
+ * We must observe prev->state before clearing prev->on_cpu (in
+ * finish_lock_switch), otherwise a concurrent wakeup can get prev
+ * running on another CPU and we could rave with its RUNNING -> DEAD
+ * transition, resulting in a double drop.
*/
prev_state = prev->state;
vtime_task_switch(prev);
@@ -2456,13 +2458,20 @@ unsigned long nr_running(void)
/*
* Check if only the current task is running on the cpu.
+ *
+ * Caution: this function does not check that the caller has disabled
+ * preemption, thus the result might have a time-of-check-to-time-of-use
+ * race. The caller is responsible to use it correctly, for example:
+ *
+ * - from a non-preemptable section (of course)
+ *
+ * - from a thread that is bound to a single CPU
+ *
+ * - in a loop with very short iterations (e.g. a polling loop)
*/
bool single_task_running(void)
{
- if (cpu_rq(smp_processor_id())->nr_running == 1)
- return true;
- else
- return false;
+ return raw_rq()->nr_running == 1;
}
EXPORT_SYMBOL(single_task_running);
@@ -4489,7 +4498,7 @@ SYSCALL_DEFINE0(sched_yield)
int __sched _cond_resched(void)
{
- if (should_resched()) {
+ if (should_resched(0)) {
preempt_schedule_common();
return 1;
}
@@ -4507,7 +4516,7 @@ EXPORT_SYMBOL(_cond_resched);
*/
int __cond_resched_lock(spinlock_t *lock)
{
- int resched = should_resched();
+ int resched = should_resched(PREEMPT_LOCK_OFFSET);
int ret = 0;
lockdep_assert_held(lock);
@@ -4530,7 +4539,7 @@ int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
- if (should_resched()) {
+ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
local_bh_enable();
preempt_schedule_common();
local_bh_disable();
@@ -5686,6 +5695,14 @@ static int sched_cpu_active(struct notifier_block *nfb,
case CPU_STARTING:
set_cpu_rq_start_time();
return NOTIFY_OK;
+ case CPU_ONLINE:
+ /*
+ * At this point a starting CPU has marked itself as online via
+ * set_cpu_online(). But it might not yet have marked itself
+ * as active, which is essential from here on.
+ *
+ * Thus, fall-through and help the starting CPU along.
+ */
case CPU_DOWN_FAILED:
set_cpu_active((long)hcpu, true);
return NOTIFY_OK;