aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Martin <dave.martin@linaro.org>2012-12-12 18:13:44 +0000
committerAndrey Konovalov <andrey.konovalov@linaro.org>2013-05-25 13:21:21 +0400
commite7a123aeb0117cc41f5dfde53d3554308a1d592e (patch)
tree0e772cebf90fff374802c79893a8fb5f95d34248
parentb52a4b4ba21d5d27ac49a2ff2f40f621b23db8f9 (diff)
ARM: perf: Allow multiple CPU PMUs per CPU
In a system where Linux logical CPUs can migrate between different physical CPUs, multiple CPU PMUs can logically count events for each logical CPU, as logical CPUs migrate from one cluster to another. This patch allows multiple PMUs to be registered against each CPU. The pairing of a PMU and a CPU is reperesented by a struct arm_cpu_pmu, with existing per-CPU state used by perf moving into this structure. arm_cpu_pmus are per-cpu-allocated, and hang off the relevant arm_pmu structure. This arrangement allows us to find all the CPU-PMU pairings for a given PMU, but not for a given CPU. Do do the latter, a list of all registered CPU PMUs is maintained, and we iterate over that when we need to find all of a CPU's CPU PMUs. This is not elegent, but it shouldn't be a heavy cost since the number of different CPU PMUs across the system is currently expected to be low (i.e., 2 or fewer). This could be improved later. As a side-effect, the get_hw_events() method no longer has enough context to provide an answer, because there may be multiple candidate PMUs for a CPU. This patch adds the struct arm_pmu * for the relevant PMU to this interface to resolve this problem, resulting in trivial changes to various ARM PMU implementations. Signed-off-by: Dave Martin <dave.martin@linaro.org>
-rw-r--r--arch/arm/include/asm/pmu.h21
-rw-r--r--arch/arm/kernel/perf_event.c14
-rw-r--r--arch/arm/kernel/perf_event_cpu.c141
-rw-r--r--arch/arm/kernel/perf_event_v6.c12
-rw-r--r--arch/arm/kernel/perf_event_v7.c10
-rw-r--r--arch/arm/kernel/perf_event_xscale.c20
-rw-r--r--drivers/bus/arm-cci.c13
7 files changed, 146 insertions, 85 deletions
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 0cd7824ca76..b42e63fbb21 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -13,7 +13,9 @@
#define __ARM_PMU_H__
#include <linux/interrupt.h>
+#include <linux/percpu.h>
#include <linux/perf_event.h>
+#include <linux/types.h>
/*
* struct arm_pmu_platdata - ARM PMU platform data
@@ -71,6 +73,18 @@ struct cpupmu_regs {
u32 pmxevtcnt[8];
};
+struct arm_cpu_pmu {
+ bool valid;
+
+ u32 midr_match;
+ u32 midr_mask;
+
+ struct perf_event *hw_events[ARMPMU_MAX_HWEVENTS];
+ unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
+ struct pmu_hw_events cpu_hw_events;
+ struct cpupmu_regs cpu_pmu_regs;
+};
+
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
@@ -98,11 +112,16 @@ struct arm_pmu {
struct mutex reserve_mutex;
u64 max_period;
struct platform_device *plat_device;
- struct pmu_hw_events *(*get_hw_events)(void);
+ struct pmu_hw_events *(*get_hw_events)(struct arm_pmu *);
+
+ struct list_head class_pmus_list;
+ struct arm_cpu_pmu __percpu *cpu_pmus;
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+#define for_each_pmu(pmu, head) list_for_each_entry(pmu, head, class_pmus_list)
+
extern const struct dev_pm_ops armpmu_dev_pm_ops;
int armpmu_register(struct arm_pmu *armpmu, int type);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d847c622a7b..a60faca1f29 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -205,7 +205,7 @@ static void
armpmu_del(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events(armpmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -223,7 +223,7 @@ static int
armpmu_add(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events(armpmu);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
@@ -467,8 +467,14 @@ static int armpmu_event_init(struct perf_event *event)
static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
- struct pmu_hw_events *hw_events = armpmu->get_hw_events();
- int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events(armpmu);
+ int enabled;
+
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
+
+ BUG_ON(!hw_events->used_mask); /* TEMPORARY */
+ enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
if (enabled)
armpmu->start(armpmu);
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 0b48a38e3cf..8b503bc6ee9 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -19,10 +19,13 @@
#define pr_fmt(fmt) "CPU PMU: " fmt
#include <linux/bitmap.h>
+#include <linux/cpumask.h>
#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/of.h>
+#include <linux/percpu.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -31,14 +34,22 @@
#include <asm/irq_regs.h>
#include <asm/pmu.h>
-/* Set at runtime when we know what CPU type we are. */
-static DEFINE_PER_CPU(struct arm_pmu *, cpu_pmu);
+static LIST_HEAD(cpu_pmus_list);
-static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
-static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
-static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+#define cpu_for_each_pmu(pmu, cpu_pmu, cpu) \
+ for_each_pmu(pmu, &cpu_pmus_list) \
+ if (((cpu_pmu) = per_cpu_ptr((pmu)->cpu_pmus, cpu))->valid)
-static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);
+static struct arm_pmu *__cpu_find_any_pmu(unsigned int cpu)
+{
+ struct arm_pmu *pmu;
+ struct arm_cpu_pmu *cpu_pmu;
+
+ cpu_for_each_pmu(pmu, cpu_pmu, cpu)
+ return pmu;
+
+ return NULL;
+}
/*
* Despite the names, these two functions are CPU-specific and are used
@@ -46,7 +57,7 @@ static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);
*/
const char *perf_pmu_name(void)
{
- struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+ struct arm_pmu *pmu = __cpu_find_any_pmu(0);
if (!pmu)
return NULL;
@@ -56,7 +67,7 @@ EXPORT_SYMBOL_GPL(perf_pmu_name);
int perf_num_counters(void)
{
- struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+ struct arm_pmu *pmu = __cpu_find_any_pmu(0);
if (!pmu)
return 0;
@@ -70,9 +81,9 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
#include "perf_event_v6.c"
#include "perf_event_v7.c"
-static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
+static struct pmu_hw_events *cpu_pmu_get_cpu_events(struct arm_pmu *pmu)
{
- return &__get_cpu_var(cpu_hw_events);
+ return &this_cpu_ptr(pmu->cpu_pmus)->cpu_hw_events;
}
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
@@ -140,23 +151,27 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return 0;
}
-static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
+static void cpu_pmu_init(struct arm_pmu *pmu)
{
int cpu;
- for_each_cpu_mask(cpu, cpu_pmu->valid_cpus) {
- struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
- events->events = per_cpu(hw_events, cpu);
- events->used_mask = per_cpu(used_mask, cpu);
+ for_each_cpu_mask(cpu, pmu->valid_cpus) {
+ struct arm_cpu_pmu *cpu_pmu = per_cpu_ptr(pmu->cpu_pmus, cpu);
+ struct pmu_hw_events *events = &cpu_pmu->cpu_hw_events;
+
+ events->events = cpu_pmu->hw_events;
+ events->used_mask = cpu_pmu->used_mask;
raw_spin_lock_init(&events->pmu_lock);
+
+ cpu_pmu->valid = true;
}
- cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
- cpu_pmu->request_irq = cpu_pmu_request_irq;
- cpu_pmu->free_irq = cpu_pmu_free_irq;
+ pmu->get_hw_events = cpu_pmu_get_cpu_events;
+ pmu->request_irq = cpu_pmu_request_irq;
+ pmu->free_irq = cpu_pmu_free_irq;
/* Ensure the PMU has sane values out of reset. */
- if (cpu_pmu->reset)
- on_each_cpu_mask(&cpu_pmu->valid_cpus, cpu_pmu->reset, cpu_pmu, 1);
+ if (pmu->reset)
+ on_each_cpu_mask(&pmu->valid_cpus, pmu->reset, pmu, 1);
}
/*
@@ -168,36 +183,42 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
unsigned long action, void *hcpu)
{
- struct arm_pmu *pmu = per_cpu(cpu_pmu, (long)hcpu);
+ struct arm_pmu *pmu;
+ struct arm_cpu_pmu *cpu_pmu;
+ int ret = NOTIFY_DONE;
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;
- if (pmu && pmu->reset)
- pmu->reset(pmu);
- else
- return NOTIFY_DONE;
+ cpu_for_each_pmu(pmu, cpu_pmu, (unsigned int)hcpu)
+ if (pmu->reset) {
+ pmu->reset(pmu);
+ ret = NOTIFY_OK;
+ }
- return NOTIFY_OK;
+ return ret;
}
static int cpu_pmu_pm_notify(struct notifier_block *b,
unsigned long action, void *hcpu)
{
int cpu = smp_processor_id();
- struct arm_pmu *pmu = per_cpu(cpu_pmu, cpu);
- struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);
+ struct arm_pmu *pmu;
+ struct arm_cpu_pmu *cpu_pmu;
+ int ret = NOTIFY_DONE;
- if (!pmu)
- return NOTIFY_DONE;
+ cpu_for_each_pmu(pmu, cpu_pmu, cpu) {
+ struct cpupmu_regs *pmuregs = &cpu_pmu->cpu_pmu_regs;
- if (action == CPU_PM_ENTER && pmu->save_regs) {
- pmu->save_regs(pmu, pmuregs);
- } else if (action == CPU_PM_EXIT && pmu->restore_regs) {
- pmu->restore_regs(pmu, pmuregs);
+ if (action == CPU_PM_ENTER && pmu->save_regs)
+ pmu->save_regs(pmu, pmuregs);
+ else if (action == CPU_PM_EXIT && pmu->restore_regs)
+ pmu->restore_regs(pmu, pmuregs);
+
+ ret = NOTIFY_OK;
}
- return NOTIFY_OK;
+ return ret;
}
static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
@@ -286,19 +307,30 @@ static int probe_current_pmu(struct arm_pmu *pmu)
return ret;
}
+static void cpu_pmu_free(struct arm_pmu *pmu)
+{
+ if (!pmu)
+ return;
+
+ free_percpu(pmu->cpu_pmus);
+ kfree(pmu);
+}
+
static int cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct device_node *node = pdev->dev.of_node;
struct arm_pmu *pmu;
+ struct arm_cpu_pmu __percpu *cpu_pmus;
int ret = 0;
- int cpu;
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
- if (!pmu) {
- pr_info("failed to allocate PMU device!");
- return -ENOMEM;
- }
+ if (!pmu)
+ goto error_nomem;
+
+ pmu->cpu_pmus = cpu_pmus = alloc_percpu(struct arm_cpu_pmu);
+ if (!cpu_pmus)
+ goto error_nomem;
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
smp_call_func_t init_fn = (smp_call_func_t)of_id->data;
@@ -317,9 +349,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
/* set sibling mask to all cpu mask if socket is not specified */
if (cluster == -1 ||
cluster_to_logical_mask(cluster, &sibling_mask))
- cpumask_setall(&sibling_mask);
+ cpumask_copy(&sibling_mask, cpu_possible_mask);
smp_call_function_any(&sibling_mask, init_fn, pmu, 1);
+ pmu->cpu_pmus = cpu_pmus; /* clobbered by init_fn */
/* now set the valid_cpus after init */
cpumask_copy(&pmu->valid_cpus, &sibling_mask);
@@ -327,24 +360,26 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
ret = probe_current_pmu(pmu);
}
- if (ret) {
- pr_info("failed to probe PMU!");
- goto out_free;
- }
-
- for_each_cpu_mask(cpu, pmu->valid_cpus)
- per_cpu(cpu_pmu, cpu) = pmu;
+ if (ret)
+ goto error;
pmu->plat_device = pdev;
cpu_pmu_init(pmu);
ret = armpmu_register(pmu, -1);
- if (!ret)
- return 0;
+ if (ret)
+ goto error;
-out_free:
- pr_info("failed to register PMU devices!");
- kfree(pmu);
+ list_add(&pmu->class_pmus_list, &cpu_pmus_list);
+ goto out;
+
+error_nomem:
+ pr_warn("out of memory\n");
+ ret = -ENOMEM;
+error:
+ pr_warn("failed to register PMU device(s)!\n");
+ cpu_pmu_free(pmu);
+out:
return ret;
}
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 03664b0e8fa..a191bdb9ebd 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -439,7 +439,7 @@ static void armv6pmu_enable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -477,7 +477,7 @@ armv6pmu_handle_irq(int irq_num,
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(cpu_pmu);
struct pt_regs *regs;
int idx;
@@ -533,7 +533,7 @@ armv6pmu_handle_irq(int irq_num,
static void armv6pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
@@ -545,7 +545,7 @@ static void armv6pmu_start(struct arm_pmu *cpu_pmu)
static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
@@ -586,7 +586,7 @@ static void armv6pmu_disable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -621,7 +621,7 @@ static void armv6mpcore_pmu_disable_event(struct perf_event *event)
unsigned long val, mask, flags, evt = 0;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index dd1ad0ec29e..d2b75fb71e3 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1000,7 +1000,7 @@ static void armv7pmu_enable_event(struct perf_event *event)
unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
@@ -1046,7 +1046,7 @@ static void armv7pmu_disable_event(struct perf_event *event)
unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
@@ -1078,7 +1078,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
u32 pmnc;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(cpu_pmu);
struct pt_regs *regs;
int idx;
@@ -1138,7 +1138,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
static void armv7pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
@@ -1149,7 +1149,7 @@ static void armv7pmu_start(struct arm_pmu *cpu_pmu)
static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 63990c42fac..cd670eafbb5 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -225,7 +225,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
unsigned long pmnc;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(cpu_pmu);
struct pt_regs *regs;
int idx;
@@ -285,7 +285,7 @@ static void xscale1pmu_enable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
switch (idx) {
@@ -321,7 +321,7 @@ static void xscale1pmu_disable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
switch (idx) {
@@ -374,7 +374,7 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
@@ -386,7 +386,7 @@ static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
@@ -572,7 +572,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
unsigned long pmnc, of_flags;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(cpu_pmu);
struct pt_regs *regs;
int idx;
@@ -626,7 +626,7 @@ static void xscale2pmu_enable_event(struct perf_event *event)
unsigned long flags, ien, evtsel;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
ien = xscale2pmu_read_int_enable();
@@ -672,7 +672,7 @@ static void xscale2pmu_disable_event(struct perf_event *event)
unsigned long flags, ien, evtsel, of_flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
ien = xscale2pmu_read_int_enable();
@@ -738,7 +738,7 @@ out:
static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
@@ -750,7 +750,7 @@ static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc();
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index b110645bc56..eee1c5722fd 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -156,7 +156,8 @@ static u32 cci_pmu_get_max_counters(void)
return n_cnts + 1;
}
-static struct pmu_hw_events *cci_pmu_get_hw_events(void)
+static struct pmu_hw_events *cci_pmu_get_hw_events(
+ struct arm_pmu *__always_unused pmu)
{
return &cci_hw_events;
}
@@ -233,7 +234,7 @@ static int cci_pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler)
static irqreturn_t cci_pmu_handle_irq(int irq_num, void *dev)
{
struct arm_pmu *cci_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct pmu_hw_events *events = cci_pmu->get_hw_events(cci_pmu);
struct perf_sample_data data;
struct pt_regs *regs;
int idx;
@@ -285,7 +286,7 @@ static void cci_pmu_enable_event(struct perf_event *event)
{
unsigned long flags;
struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct pmu_hw_events *events = cci_pmu->get_hw_events(cci_pmu);
struct hw_perf_event *hw_counter = &event->hw;
int idx = hw_counter->idx;
@@ -309,7 +310,7 @@ static void cci_pmu_disable_event(struct perf_event *event)
{
unsigned long flags;
struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct pmu_hw_events *events = cci_pmu->get_hw_events(cci_pmu);
struct hw_perf_event *hw_counter = &event->hw;
int idx = hw_counter->idx;
@@ -330,7 +331,7 @@ static void cci_pmu_start(struct arm_pmu *cci_pmu)
u32 val;
unsigned long flags;
struct cci_drvdata *info = platform_get_drvdata(cci_pmu->plat_device);
- struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct pmu_hw_events *events = cci_pmu->get_hw_events(cci_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
@@ -346,7 +347,7 @@ static void cci_pmu_stop(struct arm_pmu *cci_pmu)
u32 val;
unsigned long flags;
struct cci_drvdata *info = platform_get_drvdata(cci_pmu->plat_device);
- struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct pmu_hw_events *events = cci_pmu->get_hw_events(cci_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);