aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2023-12-20 17:59:53 +0100
committerVincent Guittot <vincent.guittot@linaro.org>2023-12-23 09:50:08 +0100
commit7b0220d0a188d1d855a2c03fe29acf4c03bd1919 (patch)
tree3b29f340678642414683e055b0f305eed1574e3b
parent418146e39891ef1fb2284dee4cabbfe616cd21cf (diff)
sched: Fix tg->load when offlining a CPUsched/fix-tg-load-cpu-offline
When a CPU taken offline, the contribution of its cfs_rqs to task_groups' load may remain and impact the calculation of the share of the online CPUs. Clear the contribution of an offlining CPU to task groups' load and skip its contribution while it is inactive. Reported-by: Imran Khan <imran.f.khan@oracle.com> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
-rw-r--r--kernel/sched/fair.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bcea3d55d95d..4b09237d24b9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4100,6 +4100,10 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
if (cfs_rq->tg == &root_task_group)
return;
+ /* rq has been offline and doesn't contribute anymore to the share */
+ if (!cpu_active(cpu_of(rq_of(cfs_rq))))
+ return;
+
/*
* For migration heavy workloads, access to tg->load_avg can be
* unbound. Limit the update rate to at most once per ms.
@@ -4116,6 +4120,49 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
}
+static inline void clear_tg_load_avg(struct cfs_rq *cfs_rq)
+{
+ long delta;
+ u64 now;
+
+ /*
+ * No need to update load_avg for root_task_group as it is not used.
+ */
+ if (cfs_rq->tg == &root_task_group)
+ return;
+
+ now = sched_clock_cpu(cpu_of(rq_of(cfs_rq)));
+ delta = 0 - cfs_rq->tg_load_avg_contrib;
+ atomic_long_add(delta, &cfs_rq->tg->load_avg);
+ cfs_rq->tg_load_avg_contrib = 0;
+ cfs_rq->last_update_tg_load_avg = now;
+}
+
+/* cpu offline callback */
+static void __maybe_unused clear_tg_offline_cfs_rqs(struct rq *rq)
+{
+ struct task_group *tg;
+
+ lockdep_assert_rq_held(rq);
+
+ /*
+ * The rq clock has already been updated in the
+ * set_rq_offline(), so we should skip updating
+ * the rq clock again in unthrottle_cfs_rq().
+ */
+ rq_clock_start_loop_update(rq);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tg, &task_groups, list) {
+ struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+
+ clear_tg_load_avg(cfs_rq);
+ }
+ rcu_read_unlock();
+
+ rq_clock_stop_loop_update(rq);
+}
+
/*
* Called within set_task_rq() right before setting a task's CPU. The
* caller only guarantees p->pi_lock is held; no other assumptions,
@@ -4412,6 +4459,8 @@ static inline bool skip_blocked_update(struct sched_entity *se)
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
+static inline void clear_tg_offline_cfs_rqs(struct rq *rq) {}
+
static inline int propagate_entity_load_avg(struct sched_entity *se)
{
return 0;
@@ -12446,6 +12495,9 @@ static void rq_offline_fair(struct rq *rq)
/* Ensure any throttled groups are reachable by pick_next_task */
unthrottle_offline_cfs_rqs(rq);
+
+ /* Ensure that we remove rq contribution to group share */
+ clear_tg_offline_cfs_rqs(rq);
}
#endif /* CONFIG_SMP */