aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/perf
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 17:09:55 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 17:09:55 -0800
commit9d3cae26acb471d5954cfdc25d1438b32060babe (patch)
tree77e93b6fb207438f7f1f30a201cc86bc5b0ec82b /arch/powerpc/perf
parentdf24eef3e794afbac69a377d1d2e2e3f5869f67a (diff)
parent8520e443aa56cc157b015205ea53e7b9fc831291 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt: "So from the depth of frozen Minnesota, here's the powerpc pull request for 3.9. It has a few interesting highlights, in addition to the usual bunch of bug fixes, minor updates, embedded device tree updates and new boards: - Hand tuned asm implementation of SHA1 (by Paulus & Michael Ellerman) - Support for Doorbell interrupts on Power8 (kind of fast thread-thread IPIs) by Ian Munsie - Long overdue cleanup of the way we handle relocation of our open firmware trampoline (prom_init.c) on 64-bit by Anton Blanchard - Support for saving/restoring & context switching the PPR (Processor Priority Register) on server processors that support it. This allows the kernel to preserve thread priorities established by userspace. By Haren Myneni. - DAWR (new watchpoint facility) support on Power8 by Michael Neuling - Ability to change the DSCR (Data Stream Control Register) which controls cache prefetching on a running process via ptrace by Alexey Kardashevskiy - Support for context switching the TAR register on Power8 (new branch target register meant to be used by some new specific userspace perf event interrupt facility which is yet to be enabled) by Ian Munsie. - Improve preservation of the CFAR register (which captures the origin of a branch) on various exception conditions by Paulus. - Move the Bestcomm DMA driver from arch powerpc to drivers/dma where it belongs by Philippe De Muyter - Support for Transactional Memory on Power8 by Michael Neuling (based on original work by Matt Evans). For those curious about the feature, the patch contains a pretty good description." (See commit db8ff907027b: "powerpc: Documentation for transactional memory on powerpc" for the mentioned description added to the file Documentation/powerpc/transactional_memory.txt) * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (140 commits) powerpc/kexec: Disable hard IRQ before kexec powerpc/85xx: l2sram - Add compatible string for BSC9131 platform powerpc/85xx: bsc9131 - Correct typo in SDHC device node powerpc/e500/qemu-e500: enable coreint powerpc/mpic: allow coreint to be determined by MPIC version powerpc/fsl_pci: Store the pci ctlr device ptr in the pci ctlr struct powerpc/85xx: Board support for ppa8548 powerpc/fsl: remove extraneous DIU platform functions arch/powerpc/platforms/85xx/p1022_ds.c: adjust duplicate test powerpc: Documentation for transactional memory on powerpc powerpc: Add transactional memory to pseries and ppc64 defconfigs powerpc: Add config option for transactional memory powerpc: Add transactional memory to POWER8 cpu features powerpc: Add new transactional memory state to the signal context powerpc: Hook in new transactional memory code powerpc: Routines for FP/VSX/VMX unavailable during a transaction powerpc: Add transactional memory unavaliable execption handler powerpc: Add reclaim and recheckpoint functions for context switching transactional memory processes powerpc: Add FP/VSX and VMX register load functions for transactional memory powerpc: Add helper functions for transactional memory context switching ...
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r--arch/powerpc/perf/core-book3s.c93
-rw-r--r--arch/powerpc/perf/e500-pmu.c2
2 files changed, 66 insertions, 29 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index fa476d50791..65362e98eb2 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -880,8 +880,16 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
+ /*
+ * This event may have been disabled/stopped in record_and_restart()
+ * because we exceeded the ->event_limit. If re-starting the event,
+ * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
+ * notification is re-enabled.
+ */
if (!(ef_flags & PERF_EF_START))
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ else
+ event->hw.state = 0;
/*
* If group events scheduling transaction was started,
@@ -1359,6 +1367,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
+ if (delta == 0)
+ left++;
if (period) {
if (left <= 0) {
left += period;
@@ -1422,11 +1432,8 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
return regs->nip;
}
-static bool pmc_overflow(unsigned long val)
+static bool pmc_overflow_power7(unsigned long val)
{
- if ((int)val < 0)
- return true;
-
/*
* Events on POWER7 can roll back if a speculative event doesn't
* eventually complete. Unfortunately in some rare cases they will
@@ -1438,7 +1445,15 @@ static bool pmc_overflow(unsigned long val)
* PMCs because a user might set a period of less than 256 and we
* don't want to mistakenly reset them.
*/
- if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))
+ if ((0x80000000 - val) <= 256)
+ return true;
+
+ return false;
+}
+
+static bool pmc_overflow(unsigned long val)
+{
+ if ((int)val < 0)
return true;
return false;
@@ -1449,11 +1464,11 @@ static bool pmc_overflow(unsigned long val)
*/
static void perf_event_interrupt(struct pt_regs *regs)
{
- int i;
+ int i, j;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
- unsigned long val;
- int found = 0;
+ unsigned long val[8];
+ int found, active;
int nmi;
if (cpuhw->n_limited)
@@ -1468,33 +1483,53 @@ static void perf_event_interrupt(struct pt_regs *regs)
else
irq_enter();
- for (i = 0; i < cpuhw->n_events; ++i) {
- event = cpuhw->event[i];
- if (!event->hw.idx || is_limited_pmc(event->hw.idx))
+ /* Read all the PMCs since we'll need them a bunch of times */
+ for (i = 0; i < ppmu->n_counter; ++i)
+ val[i] = read_pmc(i + 1);
+
+ /* Try to find what caused the IRQ */
+ found = 0;
+ for (i = 0; i < ppmu->n_counter; ++i) {
+ if (!pmc_overflow(val[i]))
continue;
- val = read_pmc(event->hw.idx);
- if ((int)val < 0) {
- /* event has overflowed */
- found = 1;
- record_and_restart(event, val, regs);
+ if (is_limited_pmc(i + 1))
+ continue; /* these won't generate IRQs */
+ /*
+ * We've found one that's overflowed. For active
+ * counters we need to log this. For inactive
+ * counters, we need to reset it anyway
+ */
+ found = 1;
+ active = 0;
+ for (j = 0; j < cpuhw->n_events; ++j) {
+ event = cpuhw->event[j];
+ if (event->hw.idx == (i + 1)) {
+ active = 1;
+ record_and_restart(event, val[i], regs);
+ break;
+ }
}
+ if (!active)
+ /* reset non active counters that have overflowed */
+ write_pmc(i + 1, 0);
}
-
- /*
- * In case we didn't find and reset the event that caused
- * the interrupt, scan all events and reset any that are
- * negative, to avoid getting continual interrupts.
- * Any that we processed in the previous loop will not be negative.
- */
- if (!found) {
- for (i = 0; i < ppmu->n_counter; ++i) {
- if (is_limited_pmc(i + 1))
+ if (!found && pvr_version_is(PVR_POWER7)) {
+ /* check active counters for special buggy p7 overflow */
+ for (i = 0; i < cpuhw->n_events; ++i) {
+ event = cpuhw->event[i];
+ if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
- val = read_pmc(i + 1);
- if (pmc_overflow(val))
- write_pmc(i + 1, 0);
+ if (pmc_overflow_power7(val[event->hw.idx - 1])) {
+ /* event has overflowed in a buggy way*/
+ found = 1;
+ record_and_restart(event,
+ val[event->hw.idx - 1],
+ regs);
+ }
}
}
+ if ((!found) && printk_ratelimit())
+ printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
/*
* Reset MMCR0 to its normal value. This will set PMXE and
diff --git a/arch/powerpc/perf/e500-pmu.c b/arch/powerpc/perf/e500-pmu.c
index cb2e2949c8d..fb664929f5d 100644
--- a/arch/powerpc/perf/e500-pmu.c
+++ b/arch/powerpc/perf/e500-pmu.c
@@ -24,6 +24,8 @@ static int e500_generic_events[] = {
[PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
[PERF_COUNT_HW_BRANCH_MISSES] = 15,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 18,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 19,
};
#define C(x) PERF_COUNT_HW_CACHE_##x