aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMorten Rasmussen <morten.rasmussen@arm.com>2015-01-23 14:20:38 +0000
committerRobin Randhawa <robin.randhawa@arm.com>2015-04-09 12:26:14 +0100
commit2359e465f1f15ff9e3f7c59ea70442f6ae54e4a5 (patch)
tree3315fea131a4c10f6e476fb8b70c4e0135dcdcf6
parent10b7c4b6b249816aea7275ba0a3d0d551ce76961 (diff)
sched: Bias new task wakeups towards higher capacity cpus
Make wake-ups of new tasks (find_idlest_group) aware of any differences in cpu compute capacity so new tasks don't get handed off to a cpus with lower capacity. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
-rw-r--r--kernel/sched/fair.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 646744664884..a76777fa6079 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4659,6 +4659,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
{
struct sched_group *idlest = NULL, *group = sd->groups;
unsigned long min_load = ULONG_MAX, this_load = 0;
+ unsigned long this_cpu_cap = 0, idlest_cpu_cap = 0;
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
@@ -4666,7 +4667,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
load_idx = sd->wake_idx;
do {
- unsigned long load, avg_load;
+ unsigned long load, avg_load, cpu_capacity;
int local_group;
int i;
@@ -4680,6 +4681,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
/* Tally up the load of all CPUs in the group */
avg_load = 0;
+ cpu_capacity = 0;
for_each_cpu(i, sched_group_cpus(group)) {
/* Bias balancing toward cpus of our domain */
@@ -4689,6 +4691,9 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
load = target_load(i, load_idx);
avg_load += load;
+
+ if (cpu_capacity < capacity_of(i))
+ cpu_capacity = capacity_of(i);
}
/* Adjust by relative CPU capacity of the group */
@@ -4696,14 +4701,20 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
if (local_group) {
this_load = avg_load;
+ this_cpu_cap = cpu_capacity;
} else if (avg_load < min_load) {
min_load = avg_load;
idlest = group;
+ idlest_cpu_cap = cpu_capacity;
}
} while (group = group->next, group != sd->groups);
- if (!idlest || 100*this_load < imbalance*min_load)
+ if (!idlest)
+ return NULL;
+
+ if (100*this_load < imbalance*min_load && this_cpu_cap >= idlest_cpu_cap)
return NULL;
+
return idlest;
}