aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2013-05-09 16:21:15 +0100
committerJon Medhurst <tixy@linaro.org>2013-07-17 11:32:29 +0100
commit08d7db89a214a138516419a85e17272b09180abd (patch)
tree3c33a65ffd1e6c20c885a19681f69bdccb1aab08
parentede58a69a32b187899e6cccbbd299a04d3f50b71 (diff)
HMP: Select least-loaded CPU when performing HMP Migrations
The reference patch set always selects the first CPU in an HMP domain as a migration target. In busy situations, this means that the migrated thread cannot make immediate use of an idle CPU but must share a busy one until the load balancer runs across the big domain. This patch uses the hmp_domain_min_load function introduced in global balancing to figure out which of the CPUs is the least busy and selects that as a migration target - in both directions. This essentially implements a task-spread strategy and is intended to maximise performance of migrated threads but is likely to use more power than the packing strategy previously employed. Signed-off-by: Chris Redpath <chris.redpath@arm.com>
-rw-r--r--kernel/sched/fair.c24
1 files changed, 22 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a90a63807cf..d7303c5fd1e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3598,6 +3598,8 @@ unsigned int hmp_next_down_threshold = 4096;
static unsigned int hmp_up_migration(int cpu, struct sched_entity *se);
static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
+static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
+ int *min_cpu);
/* Check if cpu is in fastest hmp_domain */
static inline unsigned int hmp_cpu_is_fastest(int cpu)
@@ -3642,7 +3644,16 @@ static inline struct hmp_domain *hmp_faster_domain(int cpu)
static inline unsigned int hmp_select_faster_cpu(struct task_struct *tsk,
int cpu)
{
- return cpumask_any_and(&hmp_faster_domain(cpu)->cpus,
+ int lowest_cpu=NR_CPUS;
+ __always_unused int lowest_ratio = hmp_domain_min_load(hmp_faster_domain(cpu), &lowest_cpu);
+ /*
+ * If the lowest-loaded CPU in the domain is allowed by the task affinity
+ * select that one, otherwise select one which is allowed
+ */
+ if(lowest_cpu != NR_CPUS && cpumask_test_cpu(lowest_cpu,tsk_cpus_allowed(tsk)))
+ return lowest_cpu;
+ else
+ return cpumask_any_and(&hmp_faster_domain(cpu)->cpus,
tsk_cpus_allowed(tsk));
}
@@ -3653,7 +3664,16 @@ static inline unsigned int hmp_select_faster_cpu(struct task_struct *tsk,
static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk,
int cpu)
{
- return cpumask_any_and(&hmp_slower_domain(cpu)->cpus,
+ int lowest_cpu=NR_CPUS;
+ __always_unused int lowest_ratio = hmp_domain_min_load(hmp_slower_domain(cpu), &lowest_cpu);
+ /*
+ * If the lowest-loaded CPU in the domain is allowed by the task affinity
+ * select that one, otherwise select one which is allowed
+ */
+ if(lowest_cpu != NR_CPUS && cpumask_test_cpu(lowest_cpu,tsk_cpus_allowed(tsk)))
+ return lowest_cpu;
+ else
+ return cpumask_any_and(&hmp_slower_domain(cpu)->cpus,
tsk_cpus_allowed(tsk));
}