aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormorten.rasmussen@arm.com <morten.rasmussen@arm.com>2015-07-07 19:24:19 +0100
committerVincent Guittot <vincent.guittot@linaro.org>2015-07-27 17:59:42 +0200
commit0a5999b31a8b86cf507b15046ca8d4a4343ada53 (patch)
tree966678123b871a1eb8be8947ae46337d24edb803
parent6f0be45c349aced521695106428361b8a431637e (diff)
sched: Prevent unnecessary active balance of single task in sched group
Scenarios with the busiest group having just one task and the local being idle on topologies with sched groups with different numbers of cpus manage to dodge all load-balance bailout conditions resulting the nr_balance_failed counter to be incremented. This eventually causes an pointless active migration of the task. This patch prevents this by not incrementing the counter when the busiest group only has one task. ASYM_PACKING migrations and migrations due to reduced capacity should still take place as these are explicitly captured by need_active_balance(). A better solution would be to not attempt the load-balance in the first place, but that requires significant changes to the order of bailout conditions and statistics gathering. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
-rw-r--r--kernel/sched/fair.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0004f65eae53..1c9fd9fbb11a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6115,6 +6115,7 @@ struct lb_env {
int new_dst_cpu;
enum cpu_idle_type idle;
long imbalance;
+ unsigned int src_grp_nr_running;
/* The set of CPUs under consideration for load-balancing */
struct cpumask *cpus;
@@ -7138,6 +7139,8 @@ next_group:
if (env->sd->flags & SD_NUMA)
env->fbq_type = fbq_classify_group(&sds->busiest_stat);
+ env->src_grp_nr_running = sds->busiest_stat.sum_nr_running;
+
if (!env->sd->parent) {
/* update overload indicator if we are at root domain */
if (env->dst_rq->rd->overload != overload)
@@ -7776,7 +7779,8 @@ more_balance:
* excessive cache_hot migrations and active balances.
*/
if (idle != CPU_NEWLY_IDLE)
- sd->nr_balance_failed++;
+ if (env.src_grp_nr_running > 1)
+ sd->nr_balance_failed++;
if (need_active_balance(&env)) {
raw_spin_lock_irqsave(&busiest->lock, flags);