diff options
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r-- | kernel/sched/sched.h | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f698089e10ca..9d09f277d915 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -219,6 +219,7 @@ struct task_group { #ifdef CONFIG_SMP atomic_long_t load_avg; atomic_t runnable_avg; + atomic_t usage_avg; #endif #endif @@ -352,6 +353,7 @@ struct cfs_rq { #ifdef CONFIG_FAIR_GROUP_SCHED /* Required to track per-cpu representation of a task_group */ u32 tg_runnable_contrib; + u32 tg_usage_contrib; unsigned long tg_load_contrib; /* @@ -586,6 +588,10 @@ struct rq { int active_balance; int push_cpu; struct cpu_stop_work active_balance_work; +#ifdef CONFIG_SCHED_HMP + struct task_struct *migrate_task; + int wake_for_idle_pull; +#endif /* cpu of this runqueue: */ int cpu; int online; @@ -805,6 +811,12 @@ static inline unsigned int group_first_cpu(struct sched_group *group) extern int group_balance_cpu(struct sched_group *sg); +#ifdef CONFIG_SCHED_HMP +static LIST_HEAD(hmp_domains); +DECLARE_PER_CPU(struct hmp_domain *, hmp_cpu_domain); +#define hmp_cpu_domain(cpu) (per_cpu(hmp_cpu_domain, (cpu))) +#endif /* CONFIG_SCHED_HMP */ + #else static inline void sched_ttwu_pending(void) { } |