aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2012-09-21 13:27:47 -0700
committerViresh Kumar <viresh.kumar@linaro.org>2012-10-08 08:52:37 +0530
commite553b8d123721a3dc57b8a9856d754bbe93769a8 (patch)
tree6a9a78f40d1ba68b9e909191c5a29d2b33d9ef44
parentf174f34d6962790f636f71c9f0f916d48bf7b3c3 (diff)
sched: add an rq migration call-back to sched_class
Since we are now doing bottom up load accumulation we need explicit notification when a task has been re-parented so that the old hierarchy can be updated. Adds: migrate_task_rq(struct task_struct *p, int next_cpu) (The alternative is to do this out of __set_task_cpu, but it was suggested that this would be a cleaner encapsulation.) Signed-off-by: Paul Turner <pjt@google.com> Reviewed-by: Ben Segall <bsegall@google.com>
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c12
3 files changed, 15 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f377254521a..6446bd9f8a9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1105,6 +1105,7 @@ struct sched_class {
#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
+ void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe3f3293b60..c106214f64b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1109,6 +1109,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
+ if (p->sched_class->migrate_task_rq)
+ p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b93f0cf1c77..973a6b8b28d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3075,6 +3075,17 @@ unlock:
return new_cpu;
}
+
+/*
+ * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
+ * cfs_rq_of(p) references at time of call are still valid and identify the
+ * previous cpu. However, the caller only guarantees p->pi_lock is held; no
+ * other assumptions, including the state of rq->lock, should be made.
+ */
+static void
+migrate_task_rq_fair(struct task_struct *p, int next_cpu)
+{
+}
#endif /* CONFIG_SMP */
static unsigned long
@@ -5640,6 +5651,7 @@ const struct sched_class fair_sched_class = {
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_fair,
+ .migrate_task_rq = migrate_task_rq_fair,
.rq_online = rq_online_fair,
.rq_offline = rq_offline_fair,