aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c44
1 files changed, 42 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1a839dc713f7..c42e25856f18 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1582,7 +1582,12 @@ void scheduler_ipi(void)
*/
preempt_fold_need_resched();
- if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
+ if (llist_empty(&this_rq()->wake_list)
+ && !got_nohz_idle_kick()
+#ifdef CONFIG_SCHED_HMP
+ && !this_rq()->wake_for_idle_pull
+#endif
+ )
return;
/*
@@ -1608,6 +1613,11 @@ void scheduler_ipi(void)
this_rq()->idle_balance = 1;
raise_softirq_irqoff(SCHED_SOFTIRQ);
}
+#ifdef CONFIG_SCHED_HMP
+ else if (unlikely(this_rq()->wake_for_idle_pull))
+ raise_softirq_irqoff(SCHED_SOFTIRQ);
+#endif
+
irq_exit();
}
@@ -1834,6 +1844,20 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.vruntime = 0;
INIT_LIST_HEAD(&p->se.group_node);
+#ifdef CONFIG_SCHED_HMP
+ /* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
+#define LOAD_AVG_MAX 47742
+ p->se.avg.hmp_last_up_migration = 0;
+ p->se.avg.hmp_last_down_migration = 0;
+ if (hmp_task_should_forkboost(p)) {
+ p->se.avg.load_avg_ratio = 1023;
+ p->se.avg.load_avg_contrib =
+ (1023 * scale_load_down(p->se.load.weight));
+ p->se.avg.runnable_avg_period = LOAD_AVG_MAX;
+ p->se.avg.runnable_avg_sum = LOAD_AVG_MAX;
+ p->se.avg.usage_avg_sum = LOAD_AVG_MAX;
+ }
+#endif
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
@@ -3332,6 +3356,8 @@ static void __setscheduler_params(struct task_struct *p,
set_load_weight(p);
}
+extern struct cpumask hmp_slow_cpu_mask;
+
/* Actually do priority change: must hold pi & rq lock. */
static void __setscheduler(struct rq *rq, struct task_struct *p,
const struct sched_attr *attr, bool keep_boost)
@@ -3349,8 +3375,18 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
if (dl_prio(p->prio))
p->sched_class = &dl_sched_class;
- else if (rt_prio(p->prio))
+ else if (rt_prio(p->prio)) {
p->sched_class = &rt_sched_class;
+#ifdef CONFIG_SCHED_HMP
+ if (!cpumask_empty(&hmp_slow_cpu_mask))
+ if (cpumask_equal(&p->cpus_allowed,
+ cpu_possible_mask)) {
+ p->nr_cpus_allowed =
+ cpumask_weight(&hmp_slow_cpu_mask);
+ do_set_cpus_allowed(p, &hmp_slow_cpu_mask);
+ }
+#endif
+ }
else
p->sched_class = &fair_sched_class;
}
@@ -6242,6 +6278,10 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
#endif
} else {
sd->flags |= SD_PREFER_SIBLING;
+#ifdef CONFIG_SCHED_HMP
+ /* Disable load balance on DIE level */
+ sd->flags &= ~SD_LOAD_BALANCE;
+#endif
sd->cache_nice_tries = 1;
sd->busy_idx = 2;
sd->idle_idx = 1;