summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-01-15 18:17:45 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-01-22 15:09:42 +0100
commit22f667c97aadbf481e2cae2d6feabdf431e27b31 (patch)
tree0fc6178d3bbc8ecbef825d781d73f173a4cdd69c
parent547a77d02f8cfb345631ce23b5b548d27afa0fc4 (diff)
sched: Don't run cpu-online with balance_push() enabled
We don't need to push away tasks when we come online, mark the push complete right before the CPU dies. XXX hotplug state machine has trouble with rollback here. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Tested-by: Valentin Schneider <valentin.schneider@arm.com> Link: https://lkml.kernel.org/r/20210121103506.415606087@infradead.org
-rw-r--r--kernel/sched/core.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 627534fa9196..8da0fd7f3cca 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7320,10 +7320,12 @@ static void balance_push_set(int cpu, bool on)
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
- if (on)
+ if (on) {
+ WARN_ON_ONCE(rq->balance_callback);
rq->balance_callback = &balance_push_callback;
- else
+ } else if (rq->balance_callback == &balance_push_callback) {
rq->balance_callback = NULL;
+ }
rq_unlock_irqrestore(rq, &rf);
}
@@ -7441,6 +7443,10 @@ int sched_cpu_activate(unsigned int cpu)
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
+ /*
+ * Make sure that when the hotplug state machine does a roll-back
+ * we clear balance_push. Ideally that would happen earlier...
+ */
balance_push_set(cpu, false);
#ifdef CONFIG_SCHED_SMT
@@ -7608,6 +7614,12 @@ int sched_cpu_dying(unsigned int cpu)
}
rq_unlock_irqrestore(rq, &rf);
+ /*
+ * Now that the CPU is offline, make sure we're welcome
+ * to new tasks once we come back up.
+ */
+ balance_push_set(cpu, false);
+
calc_load_migrate(rq);
update_max_interval();
nohz_balance_exit_idle(rq);