aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMorten Rasmussen <morten.rasmussen@arm.com>2015-01-02 17:08:52 +0000
committerJuri Lelli <juri.lelli@arm.com>2015-03-03 16:42:57 +0000
commit7dc8235d02c9a9c34a4f6e223bbc913bc97b5dc6 (patch)
tree4bb3f4a4a5413b739c650a1f06c7ab89a104817f
parent1a7c669f17271b162bf2672fbec20db0fd8cf8f5 (diff)
sched: Highest energy aware balancing sched_domain level pointer
Add another member to the family of per-cpu sched_domain shortcut pointers. This one, sd_ea, points to the highest level at which energy model is provided. At this level and all levels below all sched_groups have energy model data attached. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/sched.h1
2 files changed, 11 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c42bc7659eb..bf57bd19ad9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5069,11 +5069,12 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym);
#ifdef CONFIG_NUMA_BALANCING
DEFINE_PER_CPU(struct sched_domain *, sd_numa);
#endif
+DEFINE_PER_CPU(struct sched_domain *, sd_ea);
static void update_top_cache_domain(int cpu)
{
struct sched_domain *sd;
- struct sched_domain *busy_sd = NULL;
+ struct sched_domain *busy_sd = NULL, *ea_sd = NULL;
int id = cpu;
int size = 1;
@@ -5096,6 +5097,14 @@ static void update_top_cache_domain(int cpu)
sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
+
+ for_each_domain(cpu, sd) {
+ if (sd->groups->sge)
+ ea_sd = sd;
+ else
+ break;
+ }
+ rcu_assign_pointer(per_cpu(sd_ea, cpu), ea_sd);
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b0e097461a8..291323ed12c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -611,6 +611,7 @@ DECLARE_PER_CPU(int, sd_llc_id);
DECLARE_PER_CPU(struct sched_domain *, sd_numa);
DECLARE_PER_CPU(struct sched_domain *, sd_busy);
DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+DECLARE_PER_CPU(struct sched_domain *, sd_ea);
struct sched_group_capacity {
atomic_t ref;