diff options
author | Alex Shi <alex.shi@intel.com> | 2012-12-19 17:11:14 +0800 |
---|---|---|
committer | Viresh Kumar <viresh.kumar@linaro.org> | 2013-03-12 15:01:51 +0530 |
commit | 56f2c5357692853ea09b7172fa203b9ede0c99fa (patch) | |
tree | a77f06b9002d8c919f46cf3045dda0ed9d8817ba | |
parent | 12e191a189226d7a15f859f869b525b6dbd0dd15 (diff) |
sched: consider runnable load average in effective_load
effective_load calculates the load change as seen from the
root_task_group. It needs to engage the runnable average
of changed task.
Thanks for Morten Rasmussen's reminder of this.
Signed-off-by: Alex Shi <alex.shi@intel.com>
-rw-r--r-- | kernel/sched/fair.c | 27 |
1 files changed, 20 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 96a4ddce4b8..cf97b67fded 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2979,7 +2979,8 @@ static void task_waking_fair(struct task_struct *p) #ifdef CONFIG_FAIR_GROUP_SCHED /* - * effective_load() calculates the load change as seen from the root_task_group + * effective_load() calculates the runnable load average change as seen from + * the root_task_group * * Adding load to a group doesn't make a group heavier, but can cause movement * of group shares between cpus. Assuming the shares were perfectly aligned one @@ -3027,6 +3028,9 @@ static void task_waking_fair(struct task_struct *p) * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7) * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 - * 4/7) times the weight of the group. + * + * After get effective_load of the load moving, will engaged the sched entity's + * runnable avg. */ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) { @@ -3101,6 +3105,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) struct task_group *tg; unsigned long weight; int balanced; + int runnable_avg; idx = sd->wake_idx; this_cpu = smp_processor_id(); @@ -3116,13 +3121,19 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) if (sync) { tg = task_group(current); weight = current->se.load.weight; + runnable_avg = current->se.avg.runnable_avg_sum * NICE_0_LOAD + / (current->se.avg.runnable_avg_period + 1); - this_load += effective_load(tg, this_cpu, -weight, -weight); - load += effective_load(tg, prev_cpu, 0, -weight); + this_load += effective_load(tg, this_cpu, -weight, -weight) + * runnable_avg >> NICE_0_SHIFT; + load += effective_load(tg, prev_cpu, 0, -weight) + * runnable_avg >> NICE_0_SHIFT; } tg = task_group(p); weight = p->se.load.weight; + runnable_avg = p->se.avg.runnable_avg_sum * NICE_0_LOAD + / (p->se.avg.runnable_avg_period + 1); /* * In low-load situations, where prev_cpu is idle and this_cpu is idle @@ -3134,16 +3145,18 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) * task to be woken on this_cpu. */ if (this_load > 0) { - s64 this_eff_load, prev_eff_load; + s64 this_eff_load, prev_eff_load, tmp_eff_load; this_eff_load = 100; this_eff_load *= power_of(prev_cpu); - this_eff_load *= this_load + - effective_load(tg, this_cpu, weight, weight); + tmp_eff_load = effective_load(tg, this_cpu, weight, weight) + * runnable_avg >> NICE_0_SHIFT; + this_eff_load *= this_load + tmp_eff_load; prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; prev_eff_load *= power_of(this_cpu); - prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); + prev_eff_load *= load + (effective_load(tg, prev_cpu, 0, weight) + * runnable_avg >> NICE_0_SHIFT); balanced = this_eff_load <= prev_eff_load; } else |