aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/perf_event.h9
-rw-r--r--arch/arm/include/asm/pmu.h128
-rw-r--r--arch/arm/include/asm/topology.h35
3 files changed, 105 insertions, 67 deletions
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index e074948d814..625cd621a43 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,6 +12,13 @@
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
-/* Nothing to see here... */
+/*
+ * The ARMv7 CPU PMU supports up to 32 event counters.
+ */
+#define ARMPMU_MAX_HWEVENTS 32
+
+#define HW_OP_UNSUPPORTED 0xFFFF
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xFFFF
#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 4432305f4a2..4bb9cac6079 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -16,69 +16,30 @@
#include <linux/perf_event.h>
/*
- * Types of PMUs that can be accessed directly and require mutual
- * exclusion between profiling tools.
- */
-enum arm_pmu_type {
- ARM_PMU_DEVICE_CPU = 0,
- ARM_NUM_PMU_DEVICES,
-};
-
-/*
* struct arm_pmu_platdata - ARM PMU platform data
*
* @handle_irq: an optional handler which will be called from the
* interrupt and passed the address of the low level handler,
* and can be used to implement any platform specific handling
* before or after calling it.
- * @enable_irq: an optional handler which will be called after
- * request_irq and be used to handle some platform specific
- * irq enablement
- * @disable_irq: an optional handler which will be called before
- * free_irq and be used to handle some platform specific
- * irq disablement
+ * @runtime_resume: an optional handler which will be called by the
+ * runtime PM framework following a call to pm_runtime_get().
+ * Note that if pm_runtime_get() is called more than once in
+ * succession this handler will only be called once.
+ * @runtime_suspend: an optional handler which will be called by the
+ * runtime PM framework following a call to pm_runtime_put().
+ * Note that if pm_runtime_get() is called more than once in
+ * succession this handler will only be called following the
+ * final call to pm_runtime_put() that actually disables the
+ * hardware.
*/
struct arm_pmu_platdata {
irqreturn_t (*handle_irq)(int irq, void *dev,
irq_handler_t pmu_handler);
- void (*enable_irq)(int irq);
- void (*disable_irq)(int irq);
+ int (*runtime_resume)(struct device *dev);
+ int (*runtime_suspend)(struct device *dev);
};
-#ifdef CONFIG_CPU_HAS_PMU
-
-/**
- * reserve_pmu() - reserve the hardware performance counters
- *
- * Reserve the hardware performance counters in the system for exclusive use.
- * Returns 0 on success or -EBUSY if the lock is already held.
- */
-extern int
-reserve_pmu(enum arm_pmu_type type);
-
-/**
- * release_pmu() - Relinquish control of the performance counters
- *
- * Release the performance counters and allow someone else to use them.
- */
-extern void
-release_pmu(enum arm_pmu_type type);
-
-#else /* CONFIG_CPU_HAS_PMU */
-
-#include <linux/err.h>
-
-static inline int
-reserve_pmu(enum arm_pmu_type type)
-{
- return -ENODEV;
-}
-
-static inline void
-release_pmu(enum arm_pmu_type type) { }
-
-#endif /* CONFIG_CPU_HAS_PMU */
-
#ifdef CONFIG_HW_PERF_EVENTS
/* The events for a given PMU register set. */
@@ -101,24 +62,37 @@ struct pmu_hw_events {
raw_spinlock_t pmu_lock;
};
+struct cpupmu_regs {
+ u32 pmc;
+ u32 pmcntenset;
+ u32 pmuseren;
+ u32 pmintenset;
+ u32 pmxevttype[8];
+ u32 pmxevtcnt[8];
+};
+
struct arm_pmu {
struct pmu pmu;
- enum arm_pmu_type type;
cpumask_t active_irqs;
+ cpumask_t cpus;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
- void (*enable)(struct hw_perf_event *evt, int idx);
- void (*disable)(struct hw_perf_event *evt, int idx);
+ void (*enable)(struct perf_event *event);
+ void (*disable)(struct perf_event *event);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
- struct hw_perf_event *hwc);
+ struct perf_event *event);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
- u32 (*read_counter)(int idx);
- void (*write_counter)(int idx, u32 val);
- void (*start)(void);
- void (*stop)(void);
+ u32 (*read_counter)(struct perf_event *event);
+ void (*write_counter)(struct perf_event *event, u32 val);
+ void (*start)(struct arm_pmu *);
+ void (*stop)(struct arm_pmu *);
void (*reset)(void *);
+ int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
+ void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
+ void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
+ void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);
int num_events;
atomic_t active_events;
struct mutex reserve_mutex;
@@ -129,15 +103,37 @@ struct arm_pmu {
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
+struct cpu_pmu_info {
+ struct list_head entry;
+ char compatible[32];
+ unsigned long impl;
+ unsigned long part;
+ int (*init)(struct arm_pmu *);
+};
+
+#define CPUPMU_INFO_ENTRY(_compatible, _impl, _part, _init) {\
+ .compatible = _compatible, \
+ .impl = _impl, \
+ .part = _part, \
+ .init = _init, \
+ }
+
+int cpu_pmu_register(struct cpu_pmu_info *cpu_pmu);
+
+extern const struct dev_pm_ops armpmu_dev_pm_ops;
+
+int armpmu_register(struct arm_pmu *armpmu, char *name, int type);
+
+u64 armpmu_event_update(struct perf_event *event);
-u64 armpmu_event_update(struct perf_event *event,
- struct hw_perf_event *hwc,
- int idx);
+int armpmu_event_set_period(struct perf_event *event);
-int armpmu_event_set_period(struct perf_event *event,
- struct hw_perf_event *hwc,
- int idx);
+int armpmu_map_event(struct perf_event *event,
+ const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+ const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u32 raw_event_mask);
#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 58b8b84adcd..167597d40e2 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -26,11 +26,46 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask);
+
+#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
+/* Common values for CPUs */
+#ifndef SD_CPU_INIT
+#define SD_CPU_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .idle_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
+ \
+ .flags = 0*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_BALANCE_WAKE \
+ | 1*SD_WAKE_AFFINE \
+ | 0*SD_PREFER_LOCAL \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+}
+#endif
+#endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */
#else
static inline void init_cpu_topology(void) { }
static inline void store_cpu_topology(unsigned int cpuid) { }
+static inline int cluster_to_logical_mask(unsigned int socket_id,
+ cpumask_t *cluster_mask) { return -EINVAL; }
#endif