aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-10 20:24:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-10 20:24:39 -0700
commitbf83e61464803d386d0ec3fc92e5449d7963a409 (patch)
tree8bce8f80bad1650a776042cee13a7a0ba38efb6c /arch
parenta60d4b9874dc62cf0fd0d42b247baaaef75d30f8 (diff)
parent363edbe2614aa90df706c0f19ccfa2a6c06af0be (diff)
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Ben Herrenschmidt: "Here are a handful of small powerpc fixes. A couple of section mismatches (always worth fixing), a missing export of a new symbol causing build failures of modules, a page fault deadlock fix (interestingly that bug has been around for a LONG time, though it seems to be more easily triggered by KVM) and fixing pseries default idle loop in the absence of the cpuidle drivers (such as during boot)" * 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc: Default arch idle could cede processor on pseries fbdev/ps3fb: Fix section mismatch warning for ps3fb_probe powerpc: Fix section mismatch warning for prom_rtas_call powerpc: Fix possible deadlock on page fault powerpc: Export cpu_to_chip_id() to fix build error
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/smp.c1
-rw-r--r--arch/powerpc/mm/fault.c13
-rw-r--r--arch/powerpc/platforms/pseries/setup.c31
4 files changed, 34 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 7b6391b68fb..12e656ffe60 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1297,7 +1297,8 @@ static void __init prom_query_opal(void)
prom_opal_align = 0x10000;
}
-static int prom_rtas_call(int token, int nargs, int nret, int *outputs, ...)
+static int __init prom_rtas_call(int token, int nargs, int nret,
+ int *outputs, ...)
{
struct rtas_args rtas_args;
va_list list;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 442d8e23f8f..8e59abc237d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -611,6 +611,7 @@ int cpu_to_chip_id(int cpu)
of_node_put(np);
return of_get_ibm_chip_id(np);
}
+EXPORT_SYMBOL(cpu_to_chip_id);
/* Helper routines for cpu to core mapping */
int cpu_core_index_of_thread(int cpu)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 76d8e7cc780..2dd69bf4af4 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -206,7 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
int trap = TRAP(regs);
int is_exec = trap == 0x400;
int fault;
- int rc = 0;
+ int rc = 0, store_update_sp = 0;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/*
@@ -280,6 +280,14 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ /*
+ * We want to do this outside mmap_sem, because reading code around nip
+ * can result in fault, which will cause a deadlock when called with
+ * mmap_sem held
+ */
+ if (user_mode(regs))
+ store_update_sp = store_updates_sp(regs);
+
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
@@ -345,8 +353,7 @@ retry:
* between the last mapped region and the stack will
* expand the stack rather than segfaulting.
*/
- if (address + 2048 < uregs->gpr[1]
- && (!user_mode(regs) || !store_updates_sp(regs)))
+ if (address + 2048 < uregs->gpr[1] && !store_update_sp)
goto bad_area;
}
if (expand_stack(vma, address))
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index d64feb3ea0b..1f97e2b87a6 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -354,7 +354,7 @@ static int alloc_dispatch_log_kmem_cache(void)
}
early_initcall(alloc_dispatch_log_kmem_cache);
-static void pSeries_idle(void)
+static void pseries_lpar_idle(void)
{
/* This would call on the cpuidle framework, and the back-end pseries
* driver to go to idle states
@@ -362,10 +362,22 @@ static void pSeries_idle(void)
if (cpuidle_idle_call()) {
/* On error, execute default handler
* to go into low thread priority and possibly
- * low power mode.
+ * low power mode by cedeing processor to hypervisor
*/
- HMT_low();
- HMT_very_low();
+
+ /* Indicate to hypervisor that we are idle. */
+ get_lppaca()->idle = 1;
+
+ /*
+ * Yield the processor to the hypervisor. We return if
+ * an external interrupt occurs (which are driven prior
+ * to returning here) or if a prod occurs from another
+ * processor. When returning here, external interrupts
+ * are enabled.
+ */
+ cede_processor();
+
+ get_lppaca()->idle = 0;
}
}
@@ -456,15 +468,14 @@ static void __init pSeries_setup_arch(void)
pSeries_nvram_init();
- if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
- ppc_md.power_save = pSeries_idle;
- }
-
- if (firmware_has_feature(FW_FEATURE_LPAR))
+ ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
- else
+ } else {
+ /* No special idle routine */
ppc_md.enable_pmcs = power4_enable_pmcs;
+ }
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;