From fe1952fc0afb9a2e4c79f103c08aef5d13db1873 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 1 Mar 2012 12:45:27 +1100 Subject: powerpc: Rework runlatch code This moves the inlines into system.h and changes the runlatch code to use the thread local flags (non-atomic) rather than the TIF flags (atomic) to keep track of the latch state. The code to turn it back on in an asynchronous interrupt is now simplified and partially inlined. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/system.h | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'arch/powerpc/include/asm/system.h') diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index c377457d1b8..a02883d5af4 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -550,5 +550,43 @@ extern void reloc_got2(unsigned long); extern struct dentry *powerpc_debugfs_root; +#ifdef CONFIG_PPC64 + +extern void __ppc64_runlatch_on(void); +extern void __ppc64_runlatch_off(void); + +/* + * We manually hard enable-disable, this is called + * in the idle loop and we don't want to mess up + * with soft-disable/enable & interrupt replay. + */ +#define ppc64_runlatch_off() \ + do { \ + if (cpu_has_feature(CPU_FTR_CTRL) && \ + test_thread_local_flags(_TLF_RUNLATCH)) { \ + unsigned long msr = mfmsr(); \ + __hard_irq_disable(); \ + __ppc64_runlatch_off(); \ + if (msr & MSR_EE) \ + __hard_irq_enable(); \ + } \ + } while (0) + +#define ppc64_runlatch_on() \ + do { \ + if (cpu_has_feature(CPU_FTR_CTRL) && \ + !test_thread_local_flags(_TLF_RUNLATCH)) { \ + unsigned long msr = mfmsr(); \ + __hard_irq_disable(); \ + __ppc64_runlatch_on(); \ + if (msr & MSR_EE) \ + __hard_irq_enable(); \ + } \ + } while (0) +#else +#define ppc64_runlatch_on() +#define ppc64_runlatch_off() +#endif /* CONFIG_PPC64 */ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SYSTEM_H */ -- cgit v1.2.3