aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2015-04-22 19:01:59 +0200
committerVincent Guittot <vincent.guittot@linaro.org>2015-04-22 19:08:02 +0200
commitf5f6e5cdce0b3da764ff750bd3630a5eee5f8c60 (patch)
tree0911103a228856bf706d00b32dc8447c8bed9e5b
parentf7beb132f2fc1b8485c5e8742c834d3842f6785a (diff)
arm64: add sched domain topology and frequency invariancetest-sched-tasks-packing-mt
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi4
-rw-r--r--arch/arm64/include/asm/topology.h5
-rw-r--r--arch/arm64/kernel/topology.c101
3 files changed, 110 insertions, 0 deletions
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 6ad47aa779db..698a4d67a604 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -50,6 +50,7 @@
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53";
+ clock-frequency = <1508000000>;
reg = <0x000>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
@@ -69,6 +70,7 @@
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53";
+ clock-frequency = <1508000000>;
reg = <0x001>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
@@ -77,6 +79,7 @@
cpu2: cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a57";
+ clock-frequency = <1807000000>;
reg = <0x100>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
@@ -96,6 +99,7 @@
cpu3: cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a57";
+ clock-frequency = <1807000000>;
reg = <0x101>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 7ebcd31ce51c..f880ab0b62dc 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -24,6 +24,11 @@ void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+#define arch_scale_freq_capacity arm64_arch_scale_freq_capacity
+struct sched_domain;
+extern unsigned long arm64_arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
+
+
#else
static inline void init_cpu_topology(void) { }
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 8a5d77f1b0de..74ec308ef32e 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -343,6 +343,48 @@ static void update_cpu_capacity(unsigned int cpu)
cpu, arch_scale_cpu_capacity(NULL, cpu));
}
+
+/*
+ * Scheduler load-tracking scale-invariance
+ *
+ * Provides the scheduler with a scale-invariance correction factor that
+ * compensates for frequency scaling.
+ */
+
+static DEFINE_PER_CPU(atomic_long_t, cpu_freq_capacity);
+static DEFINE_PER_CPU(atomic_long_t, cpu_max_freq);
+
+/* cpufreq callback function setting current cpu frequency */
+void arch_scale_set_curr_freq(int cpu, unsigned long freq)
+{
+ unsigned long max = atomic_long_read(&per_cpu(cpu_max_freq, cpu));
+ unsigned long curr;
+
+ if (!max)
+ return;
+
+ curr = (freq * SCHED_CAPACITY_SCALE) / max;
+
+ atomic_long_set(&per_cpu(cpu_freq_capacity, cpu), curr);
+}
+
+/* cpufreq callback function setting max cpu frequency */
+void arch_scale_set_max_freq(int cpu, unsigned long freq)
+{
+ atomic_long_set(&per_cpu(cpu_max_freq, cpu), freq);
+}
+
+/* arch_scale_freq_capacity() implementation called from scheduler */
+unsigned long arm64_arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
+{
+ unsigned long curr = atomic_long_read(&per_cpu(cpu_freq_capacity, cpu));
+
+ if (!curr)
+ return SCHED_CAPACITY_SCALE;
+
+ return curr;
+}
+
/*
* cpu topology table
*/
@@ -354,6 +396,15 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
return &cpu_topology[cpu].core_sibling;
}
+/*
+ * The current assumption is that we can power gate each core independently.
+ * This will be superseded by DT binding once available.
+ */
+const struct cpumask *cpu_corepower_mask(int cpu)
+{
+ return &cpu_topology[cpu].thread_sibling;
+}
+
static void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -418,6 +469,54 @@ topology_populated:
update_cpu_capacity(cpuid);
}
+
+static inline int cpu_corepower_flags(void)
+{
+ return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
+}
+
+/*
+ * power threshold should be filled according to platform info that can come
+ * from DT as an example. For now use default table
+ */
+static int core_pack_threshold[2][2] = {
+ /* pack, perf */
+ { 30, 100},
+ { 20, 100},
+};
+
+static int cpu_core_th(int cpu, int index)
+{
+ if (arch_scale_cpu_capacity(NULL, cpu) < SCHED_CAPACITY_SCALE)
+ return (core_pack_threshold[1][index] * 1024) / 100;
+
+ return (core_pack_threshold[0][index] * 1024) / 100;
+}
+
+static int cluster_pack_threshold[2][2] = {
+ /* pack, perf */
+ { 50, 100},
+ { 50, 70},
+};
+
+static int cpu_cluster_th(int cpu, int index)
+{
+
+ if (arch_scale_cpu_capacity(NULL, cpu) < SCHED_CAPACITY_SCALE)
+ return (cluster_pack_threshold[1][index] * 1024) / 100;
+
+ return (cluster_pack_threshold[0][index] * 1024) / 100;
+}
+
+static struct sched_domain_topology_level arm_topology[] = {
+#ifdef CONFIG_SCHED_MC
+ { cpu_corepower_mask, cpu_corepower_flags, cpu_core_th, SD_INIT_NAME(GMC) },
+ { cpu_coregroup_mask, cpu_core_flags, cpu_core_th, SD_INIT_NAME(MC) },
+#endif
+ { cpu_cpu_mask, NULL, cpu_cluster_th, SD_INIT_NAME(DIE) },
+ { NULL, },
+};
+
static void __init reset_cpu_topology(void)
{
unsigned int cpu;
@@ -457,4 +556,6 @@ void __init init_cpu_topology(void)
reset_cpu_capacity();
parse_dt_cpu_capacity();
+ /* Set scheduler topology descriptor */
+ set_sched_topology(arm_topology);
}