aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Martin <dave.martin@linaro.org>2012-12-12 18:13:44 +0000
committerAndrey Konovalov <andrey.konovalov@linaro.org>2013-05-25 13:23:08 +0400
commit9d1704fba4635ee9916a71861a66172df5ea59e7 (patch)
treec96fdd8ed401200e2c6ab2b329a4f3b883e5758f
parentda0a0ae272e5c117f43da3522a01a28219ca9a31 (diff)
ARM: perf: [WIP] Initial bL switcher support
This patch adds preliminary, highly experimental perf support for CONFIG_BL_SWITCHER=y. In this configuration, every PMU is registered as valid for every logical CPU, in a way which covers all the combinations which will be seen at runtime, regardless of whether the switcher is enabled or not. Tracking of which PMUs are active at a given point in time is delegated to the lower-level abstractions in perf_event_v7.c. Warning: this patch does not handle PMU interrupt affinities correctly. Because of the way the switcher pairs up CPUs, this does not cause a problem when the switcher is active; however, interrupts may be directed to the wrong CPU when the switcher is disabled. This will result in spurious interrupts and wrong event counts. Signed-off-by: Dave Martin <dave.martin@linaro.org>
-rw-r--r--arch/arm/include/asm/pmu.h3
-rw-r--r--arch/arm/kernel/perf_event_cpu.c71
-rw-r--r--arch/arm/kernel/perf_event_v7.c40
3 files changed, 101 insertions, 13 deletions
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index b11a01a8c7c..c5ce783cbf3 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -77,8 +77,7 @@ struct arm_cpu_pmu {
bool valid;
bool active;
- u32 midr_match;
- u32 midr_mask;
+ u32 mpidr;
struct perf_event *hw_events[ARMPMU_MAX_HWEVENTS];
unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index b2f202be922..e9d1995b002 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -30,9 +30,12 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <asm/bL_switcher.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
+#include <asm/smp_plat.h>
+#include <asm/topology.h>
static LIST_HEAD(cpu_pmus_list);
@@ -319,6 +322,33 @@ static void cpu_pmu_free(struct arm_pmu *pmu)
kfree(pmu);
}
+/*
+ * HACK: Find a b.L switcher partner for CPU cpu on the specified cluster
+ * This information should be obtained from an interface provided by the
+ * Switcher itself, if possible.
+ */
+#ifdef CONFIG_BL_SWITCHER
+static int bL_get_partner(int cpu, int cluster)
+{
+ unsigned int i;
+
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_topology[i].thread_id == cpu_topology[cpu].thread_id &&
+ cpu_topology[i].core_id == cpu_topology[cpu].core_id &&
+ cpu_topology[i].socket_id == cluster)
+ return i;
+ }
+
+ return -1; /* no partner found */
+}
+#else
+static int bL_get_partner(int __always_unused cpu, int __always_unused cluster)
+{
+ return -1;
+}
+#endif
+
static int cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
@@ -340,6 +370,7 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
struct device_node *ncluster;
int cluster = -1;
cpumask_t sibling_mask;
+ unsigned int i;
ncluster = of_parse_phandle(node, "cluster", 0);
if (ncluster) {
@@ -350,12 +381,50 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
cluster = be32_to_cpup(hwid);
}
/* set sibling mask to all cpu mask if socket is not specified */
- if (cluster == -1 ||
+ /*
+ * In a switcher kernel, we affine all PMUs to CPUs and
+ * abstract the runtime presence/absence of PMUs at a lower
+ * level.
+ */
+ if (cluster == -1 || IS_ENABLED(CONFIG_BL_SWITCHER) ||
cluster_to_logical_mask(cluster, &sibling_mask))
cpumask_copy(&sibling_mask, cpu_possible_mask);
+ if (bL_switcher_get_enabled())
+ /*
+ * The switcher initialises late now, so it should not
+ * have initialised yet:
+ */
+ BUG();
+
+ /*
+ * HACK: Deduce how the switcher will modify the topology
+ * in order to fill in PMU<->CPU combinations which don't
+ * make sense when the switcher is disabled. Ideally, this
+ * knowledge should come from the swithcer somehow.
+ */
+ for (i = 0; i < NR_CPUS; i++) {
+ int cpu = i;
+
+ if (cpu_topology[i].socket_id != cluster) {
+ int partner = bL_get_partner(i, cluster);
+
+ if (partner != -1)
+ cpu = partner;
+ }
+
+ per_cpu_ptr(cpu_pmus, i)->mpidr =
+ cpu_logical_map(cpu);
+ }
+
+ /*
+ * This relies on an MP view of the system to choose the right
+ * CPU to run init_fn:
+ */
smp_call_function_any(&sibling_mask, init_fn, pmu, 1);
+ bL_switcher_put_enabled();
+
/* now set the valid_cpus after init */
cpumask_copy(&pmu->valid_cpus, &sibling_mask);
} else {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 1ed779ab52b..24e195a00af 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -176,6 +176,16 @@ __def_v7_pmu_reg(PMOVSSET, RW, 0, c14, 3)
#define __v7_pmu_restore_reg(cpupmu, name) \
__v7_pmu_write_physical(name, \
__v7_pmu_read_logical(cpupmu, name))
+static u32 read_mpidr(void)
+{
+ u32 result;
+
+ asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (result));
+
+ return result;
+}
+
+static void armv7pmu_reset(void *info);
/*
* Common ARMv7 event types
@@ -1133,6 +1143,8 @@ static void armv7pmu_restore_regs(struct arm_pmu *pmu,
u32 pmcr;
struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
+ armv7pmu_reset(pmu);
+
if (!cpupmu->active)
return;
@@ -1245,7 +1257,12 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
struct pt_regs *regs;
int idx;
- BUG_ON(!cpupmu->active);
+ if (!cpupmu->active) {
+ pr_warn_ratelimited("%s: Spurious interrupt for inactive PMU %s: event counts will be wrong.\n",
+ __func__, pmu->name);
+ pr_warn_once("This is a known interrupt affinity bug in the b.L switcher perf support.\n");
+ return IRQ_NONE;
+ }
/*
* Get and reset the IRQ flags
@@ -1379,19 +1396,24 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event,
return 0;
}
+static bool check_active(struct arm_cpu_pmu *cpupmu)
+{
+ u32 mpidr = read_mpidr();
+
+ BUG_ON(!(mpidr & 0x80000000)); /* this won't work on uniprocessor */
+
+ cpupmu->active = ((mpidr ^ cpupmu->mpidr) & 0xFFFFFF) == 0;
+ return cpupmu->active;
+}
+
static void armv7pmu_reset(void *info)
{
struct arm_pmu *pmu = (struct arm_pmu *)info;
struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
u32 idx, nb_cnt = pmu->num_events;
- bool active = cpupmu->active;
- /*
- * The purpose of this function is to get the physical CPU into a
- * sane state, so make sure we're not operating on the logical CPU
- * instead:
- */
- cpupmu->active = true;
+ if (!check_active(cpupmu))
+ return;
/* The counter and interrupt enable registers are unknown at reset. */
for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
@@ -1401,8 +1423,6 @@ static void armv7pmu_reset(void *info)
/* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write(cpupmu, ARMV7_PMNC_P | ARMV7_PMNC_C);
-
- cpupmu->active = active;
}
static int armv7_a8_map_event(struct perf_event *event)