aboutsummaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2013-05-27 14:09:50 -0400
committerJon Medhurst <tixy@linaro.org>2013-06-12 16:03:05 +0100
commita33e0eb552d7e91996f55ac9d2e1dcd22ef276c6 (patch)
tree21a9e589d33788486652accc269b561dc4df436c /arch/arm
parentce7174fe6130d892cc3f5bd63d0bb06cb789e703 (diff)
ARM: bL_switcher: change pairing rule to keep identical logical CPU profiles
Let's keep only those logical CPUs with same initial CPU cluster to create a uniform scheduler profile without having to modify any of the probed topology and compute capacity data. This has the potential to create a non contiguous CPU numbering space when the switcher is active with potential impact on buggy user space tools. It is however better to fix those tools rather than making the switcher more intrusive. Signed-off-by: Nicolas Pitre <nico@linaro.org>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/common/bL_switcher.c30
1 files changed, 21 insertions, 9 deletions
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index f89e5912df8..4204cd5dfd8 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -293,7 +293,7 @@ struct bL_thread {
void *completer_cookie;
};
-static struct bL_thread bL_threads[MAX_CPUS_PER_CLUSTER];
+static struct bL_thread bL_threads[NR_CPUS];
static int bL_switcher_thread(void *arg)
{
@@ -374,7 +374,7 @@ int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
{
struct bL_thread *t;
- if (cpu >= MAX_CPUS_PER_CLUSTER) {
+ if (cpu >= ARRAY_SIZE(bL_threads)) {
pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
return -EINVAL;
}
@@ -408,7 +408,7 @@ EXPORT_SYMBOL_GPL(bL_switch_request_cb);
static DEFINE_MUTEX(bL_switcher_activation_lock);
static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
static unsigned int bL_switcher_active;
-static unsigned int bL_switcher_cpu_original_cluster[MAX_CPUS_PER_CLUSTER];
+static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
static cpumask_t bL_switcher_removed_logical_cpus;
int bL_switcher_register_notifier(struct notifier_block *nb)
@@ -444,8 +444,8 @@ static void bL_switcher_restore_cpus(void)
static int bL_switcher_halve_cpus(void)
{
- int i, j, gic_id, ret;
- unsigned int cpu, cluster, cntpart, mask;
+ int i, j, cluster_0, gic_id, ret;
+ unsigned int cpu, cluster, mask;
cpumask_t available_cpus;
/* First pass to validate what we have */
@@ -468,18 +468,30 @@ static int bL_switcher_halve_cpus(void)
/*
* Now let's do the pairing. We match each CPU with another CPU
- * from a different cluster. To keep the logical CPUs contiguous,
- * the pairing is done backward from the end of the CPU list.
+ * from a different cluster. To get a uniform scheduling behavior
+ * without fiddling with CPU topology and compute capacity data,
+ * we'll use logical CPUs initially belonging to the same cluster.
*/
memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
cpumask_copy(&available_cpus, cpu_online_mask);
+ cluster_0 = -1;
for_each_cpu(i, &available_cpus) {
int match = -1;
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+ if (cluster_0 == -1)
+ cluster_0 = cluster;
+ if (cluster != cluster_0)
+ continue;
cpumask_clear_cpu(i, &available_cpus);
for_each_cpu(j, &available_cpus) {
- cntpart = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
- if (cntpart != cluster)
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
+ /*
+ * Let's remember the last match to create "odd"
+ * pairing on purpose in order for other code not
+ * to assume any relation between physical and
+ * logical CPU numbers.
+ */
+ if (cluster != cluster_0)
match = j;
}
if (match != -1) {