aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2013-06-17 15:48:15 +0100
committerJon Medhurst <tixy@linaro.org>2013-07-17 11:32:28 +0100
commitede58a69a32b187899e6cccbbd299a04d3f50b71 (patch)
treea976514d5148fdd56bcb0983ad7a2f9e389c0ba9
parent7362251d8a422dcba5c56408b92fc2b6ad03b10c (diff)
HMP: Use unweighted load for hmp migration decisions
Normal task and runqueue loading is scaled according to priority to end up with a weighted load, known as the contribution. We want the CPU time to be allotted according to priority, but we also want to make big/little decisions based upon raw load. It is common, for example, for Android apps following the dev guide to end up with all their long-running or async action threads as low priority unless they override the AsyncThread constructor. All these threads are such low priority that they become invisible to the hmp_offload routine. Using unweighted load here allows us to maximise CPU usage in busy situations. Signed-off-by: Chris Redpath <chris.redpath@arm.com>
-rw-r--r--kernel/sched/fair.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 90f61d848cb..a90a63807cf 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3841,20 +3841,24 @@ static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
int *min_cpu)
{
int cpu;
- int min_load = INT_MAX;
- int min_cpu_temp = NR_CPUS;
+ int min_cpu_runnable_temp = NR_CPUS;
+ unsigned long min_runnable_load = INT_MAX;
+ unsigned long contrib;
for_each_cpu_mask(cpu, hmpd->cpus) {
- if (cpu_rq(cpu)->cfs.tg_load_contrib < min_load) {
- min_load = cpu_rq(cpu)->cfs.tg_load_contrib;
- min_cpu_temp = cpu;
+ /* don't use the divisor in the loop, just at the end */
+ contrib = cpu_rq(cpu)->avg.runnable_avg_sum * scale_load_down(1024);
+ if (contrib < min_runnable_load) {
+ min_runnable_load = contrib;
+ min_cpu_runnable_temp = cpu;
}
}
if (min_cpu)
- *min_cpu = min_cpu_temp;
+ *min_cpu = min_cpu_runnable_temp;
- return min_load;
+ /* domain will often have at least one empty CPU */
+ return min_runnable_load ? min_runnable_load / (LOAD_AVG_MAX + 1) : 0;
}
/*
@@ -3882,22 +3886,18 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
return NR_CPUS;
/* Is the current domain fully loaded? */
- /* load < ~94% */
+ /* load < ~50% */
min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL);
- if (min_usage < NICE_0_LOAD-64)
- return NR_CPUS;
-
- /* Is the cpu oversubscribed? */
- /* load < ~194% */
- if (cpu_rq(cpu)->cfs.tg_load_contrib < 2*NICE_0_LOAD-64)
+ if (min_usage < (NICE_0_LOAD>>1))
return NR_CPUS;
/* Is the task alone on the cpu? */
- if (cpu_rq(cpu)->nr_running < 2)
+ if (cpu_rq(cpu)->cfs.nr_running < 2)
return NR_CPUS;
/* Is the task actually starving? */
- if (hmp_task_starvation(se) > 768) /* <25% waiting */
+ /* >=25% ratio running/runnable = starving */
+ if (hmp_task_starvation(se) > 768)
return NR_CPUS;
/* Does the slower domain have spare cycles? */
@@ -3908,6 +3908,7 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus))
return dest_cpu;
+
return NR_CPUS;
}
#endif /* CONFIG_SCHED_HMP */