aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2016-11-11 12:26:49 +0000
committerJon Medhurst <tixy@linaro.org>2016-11-11 14:13:38 +0000
commit6713ef82ba27ddc4eb98133ab4ba1a2ce76426c2 (patch)
tree32b87e0dd52d83111fcdd3571c4c238ad1dc5ecf
parent107d4a259350eba2ed9b777b84126e8fa6e343e0 (diff)
sched/HMP: Close race-condition window by holding task ref longer.
It is possible that we are attempting to move a task which is about to exit. If the timing is exactly correct, we take the task struct pointer and then in the small window between us releasing the rq lock and calling get_task on p, the task begins to exit. In order for this to cause a problem, the operation must have started but be very early in the cleanup process when we call get_task. If we call get_task when we select the candidate (and hold the source rq lock, so it cannot be removed), we should be able to ensure that we hold the last reference on the task throughout the time we have a copy of the task_struct pointer. Reported-by: Xiong Gao <gaoxiong@leadcoretech.com> Tested-by: Xiong Gao <gaoxiong@leadcoretech.com> Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org>
-rw-r--r--kernel/sched/fair.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 22ce83eb73f8..05ae4bff6af0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7275,7 +7275,10 @@ static unsigned int hmp_idle_pull(int this_cpu)
curr->avg.load_avg_ratio > ratio &&
cpumask_test_cpu(this_cpu,
tsk_cpus_allowed(task_of(curr)))) {
+ if(p)
+ put_task_struct(p);
p = task_of(curr);
+ get_task_struct(p);
target = rq;
ratio = curr->avg.load_avg_ratio;
}
@@ -7288,7 +7291,6 @@ static unsigned int hmp_idle_pull(int this_cpu)
/* now we have a candidate */
raw_spin_lock_irqsave(&target->lock, flags);
if (!target->active_balance && task_rq(p) == target) {
- get_task_struct(p);
target->push_cpu = this_cpu;
target->migrate_task = p;
trace_sched_hmp_migrate(p, target->push_cpu, HMP_MIGRATE_IDLE_PULL);
@@ -7305,6 +7307,8 @@ static unsigned int hmp_idle_pull(int this_cpu)
target->active_balance = 1;
force = 1;
}
+ } else {
+ put_task_struct(p);
}
raw_spin_unlock_irqrestore(&target->lock, flags);