aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/crash.c147
-rw-r--r--arch/powerpc/kernel/traps.c27
-rw-r--r--include/asm-powerpc/kexec.h8
-rw-r--r--kernel/kexec.c6
4 files changed, 142 insertions, 46 deletions
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index e253a45dcf1..b537cfa4e09 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -24,9 +24,11 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/types.h>
+#include <linux/irq.h>
#include <asm/processor.h>
#include <asm/machdep.h>
+#include <asm/kexec.h>
#include <asm/kdump.h>
#include <asm/lmb.h>
#include <asm/firmware.h>
@@ -41,6 +43,7 @@
/* This keeps a track of which one is crashing cpu. */
int crashing_cpu = -1;
+static cpumask_t cpus_in_crash = CPU_MASK_NONE;
static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
size_t data_len)
@@ -98,34 +101,66 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
}
#ifdef CONFIG_SMP
-static atomic_t waiting_for_crash_ipi;
+static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
void crash_ipi_callback(struct pt_regs *regs)
{
int cpu = smp_processor_id();
- if (cpu == crashing_cpu)
- return;
-
if (!cpu_online(cpu))
return;
- if (ppc_md.kexec_cpu_down)
- ppc_md.kexec_cpu_down(1, 1);
-
local_irq_disable();
+ if (!cpu_isset(cpu, cpus_in_crash))
+ crash_save_this_cpu(regs, cpu);
+ cpu_set(cpu, cpus_in_crash);
- crash_save_this_cpu(regs, cpu);
- atomic_dec(&waiting_for_crash_ipi);
+ /*
+ * Entered via soft-reset - could be the kdump
+ * process is invoked using soft-reset or user activated
+ * it if some CPU did not respond to an IPI.
+ * For soft-reset, the secondary CPU can enter this func
+ * twice. 1 - using IPI, and 2. soft-reset.
+ * Tell the kexec CPU that entered via soft-reset and ready
+ * to go down.
+ */
+ if (cpu_isset(cpu, cpus_in_sr)) {
+ cpu_clear(cpu, cpus_in_sr);
+ atomic_inc(&enter_on_soft_reset);
+ }
+
+ /*
+ * Starting the kdump boot.
+ * This barrier is needed to make sure that all CPUs are stopped.
+ * If not, soft-reset will be invoked to bring other CPUs.
+ */
+ while (!cpu_isset(crashing_cpu, cpus_in_crash))
+ cpu_relax();
+
+ if (ppc_md.kexec_cpu_down)
+ ppc_md.kexec_cpu_down(1, 1);
kexec_smp_wait();
/* NOTREACHED */
}
-static void crash_kexec_prepare_cpus(void)
+/*
+ * Wait until all CPUs are entered via soft-reset.
+ */
+static void crash_soft_reset_check(int cpu)
+{
+ unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+
+ cpu_clear(cpu, cpus_in_sr);
+ while (atomic_read(&enter_on_soft_reset) != ncpus)
+ cpu_relax();
+}
+
+
+static void crash_kexec_prepare_cpus(int cpu)
{
unsigned int msecs;
- atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+ unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
crash_send_ipi(crash_ipi_callback);
smp_wmb();
@@ -133,14 +168,13 @@ static void crash_kexec_prepare_cpus(void)
/*
* FIXME: Until we will have the way to stop other CPUSs reliabally,
* the crash CPU will send an IPI and wait for other CPUs to
- * respond. If not, proceed the kexec boot even though we failed to
- * capture other CPU states.
+ * respond.
* Delay of at least 10 seconds.
*/
- printk(KERN_ALERT "Sending IPI to other cpus...\n");
+ printk(KERN_EMERG "Sending IPI to other cpus...\n");
msecs = 10000;
- while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) {
- barrier();
+ while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
+ cpu_relax();
mdelay(1);
}
@@ -149,18 +183,71 @@ static void crash_kexec_prepare_cpus(void)
/*
* FIXME: In case if we do not get all CPUs, one possibility: ask the
* user to do soft reset such that we get all.
- * IPI handler is already set by the panic cpu initially. Therefore,
- * all cpus could invoke this handler from die() and the panic CPU
- * will call machine_kexec() directly from this handler to do
- * kexec boot.
+ * Soft-reset will be used until better mechanism is implemented.
+ */
+ if (cpus_weight(cpus_in_crash) < ncpus) {
+ printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
+ ncpus - cpus_weight(cpus_in_crash));
+ printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
+ cpus_in_sr = CPU_MASK_NONE;
+ atomic_set(&enter_on_soft_reset, 0);
+ while (cpus_weight(cpus_in_crash) < ncpus)
+ cpu_relax();
+ }
+ /*
+ * Make sure all CPUs are entered via soft-reset if the kdump is
+ * invoked using soft-reset.
*/
- if (atomic_read(&waiting_for_crash_ipi))
- printk(KERN_ALERT "done waiting: %d cpus not responding\n",
- atomic_read(&waiting_for_crash_ipi));
+ if (cpu_isset(cpu, cpus_in_sr))
+ crash_soft_reset_check(cpu);
/* Leave the IPI callback set */
}
+
+/*
+ * This function will be called by secondary cpus or by kexec cpu
+ * if soft-reset is activated to stop some CPUs.
+ */
+void crash_kexec_secondary(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ unsigned long flags;
+ int msecs = 5;
+
+ local_irq_save(flags);
+ /* Wait 5ms if the kexec CPU is not entered yet. */
+ while (crashing_cpu < 0) {
+ if (--msecs < 0) {
+ /*
+ * Either kdump image is not loaded or
+ * kdump process is not started - Probably xmon
+ * exited using 'x'(exit and recover) or
+ * kexec_should_crash() failed for all running tasks.
+ */
+ cpu_clear(cpu, cpus_in_sr);
+ local_irq_restore(flags);
+ return;
+ }
+ mdelay(1);
+ cpu_relax();
+ }
+ if (cpu == crashing_cpu) {
+ /*
+ * Panic CPU will enter this func only via soft-reset.
+ * Wait until all secondary CPUs entered and
+ * then start kexec boot.
+ */
+ crash_soft_reset_check(cpu);
+ cpu_set(crashing_cpu, cpus_in_crash);
+ if (ppc_md.kexec_cpu_down)
+ ppc_md.kexec_cpu_down(1, 0);
+ machine_kexec(kexec_crash_image);
+ /* NOTREACHED */
+ }
+ crash_ipi_callback(regs);
+}
+
#else
-static void crash_kexec_prepare_cpus(void)
+static void crash_kexec_prepare_cpus(int cpu)
{
/*
* move the secondarys to us so that we can copy
@@ -171,6 +258,10 @@ static void crash_kexec_prepare_cpus(void)
smp_release_cpus();
}
+void crash_kexec_secondary(struct pt_regs *regs)
+{
+ cpus_in_sr = CPU_MASK_NONE;
+}
#endif
void default_machine_crash_shutdown(struct pt_regs *regs)
@@ -199,14 +290,14 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
desc->handler->disable(irq);
}
- if (ppc_md.kexec_cpu_down)
- ppc_md.kexec_cpu_down(1, 0);
-
/*
* Make a note of crashing cpu. Will be used in machine_kexec
* such that another IPI will not be sent.
*/
crashing_cpu = smp_processor_id();
- crash_kexec_prepare_cpus();
crash_save_this_cpu(regs, crashing_cpu);
+ crash_kexec_prepare_cpus(crashing_cpu);
+ cpu_set(crashing_cpu, cpus_in_crash);
+ if (ppc_md.kexec_cpu_down)
+ ppc_md.kexec_cpu_down(1, 0);
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 52f5659534f..fa6bd97b6b9 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -52,9 +52,13 @@
#include <asm/firmware.h>
#include <asm/processor.h>
#endif
+#include <asm/kexec.h>
#ifdef CONFIG_PPC64 /* XXX */
#define _IO_BASE pci_io_base
+#ifdef CONFIG_KEXEC
+cpumask_t cpus_in_sr = CPU_MASK_NONE;
+#endif
#endif
#ifdef CONFIG_DEBUGGER
@@ -97,7 +101,7 @@ static DEFINE_SPINLOCK(die_lock);
int die(const char *str, struct pt_regs *regs, long err)
{
- static int die_counter, crash_dump_start = 0;
+ static int die_counter;
if (debugger(regs))
return 1;
@@ -137,21 +141,12 @@ int die(const char *str, struct pt_regs *regs, long err)
print_modules();
show_regs(regs);
bust_spinlocks(0);
+ spin_unlock_irq(&die_lock);
- if (!crash_dump_start && kexec_should_crash(current)) {
- crash_dump_start = 1;
- spin_unlock_irq(&die_lock);
+ if (kexec_should_crash(current) ||
+ kexec_sr_activated(smp_processor_id()))
crash_kexec(regs);
- /* NOTREACHED */
- }
- spin_unlock_irq(&die_lock);
- if (crash_dump_start)
- /*
- * Only for soft-reset: Other CPUs will be responded to an IPI
- * sent by first kexec CPU.
- */
- for(;;)
- ;
+ crash_kexec_secondary(regs);
if (in_interrupt())
panic("Fatal exception in interrupt");
@@ -215,6 +210,10 @@ void system_reset_exception(struct pt_regs *regs)
return;
}
+#ifdef CONFIG_KEXEC
+ cpu_set(smp_processor_id(), cpus_in_sr);
+#endif
+
die("System Reset", regs, SIGABRT);
/* Must die if the interrupt is not recoverable */
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
index 234bd684c7f..8f7fd5cfec3 100644
--- a/include/asm-powerpc/kexec.h
+++ b/include/asm-powerpc/kexec.h
@@ -114,6 +114,11 @@ extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
extern int crashing_cpu;
extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
+extern cpumask_t cpus_in_sr;
+static inline int kexec_sr_activated(int cpu)
+{
+ return cpu_isset(cpu,cpus_in_sr);
+}
#endif /* __powerpc64 __ */
struct kimage;
@@ -123,10 +128,13 @@ extern int default_machine_kexec_prepare(struct kimage *image);
extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern void machine_kexec_simple(struct kimage *image);
+extern void crash_kexec_secondary(struct pt_regs *regs);
extern int overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
#else /* !CONFIG_KEXEC */
+static inline int kexec_sr_activated(int cpu) { return 0; }
+static inline void crash_kexec_secondary(struct pt_regs *regs) { }
static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
{
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 58f0f382597..50087ecf337 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1042,7 +1042,6 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
void crash_kexec(struct pt_regs *regs)
{
- struct kimage *image;
int locked;
@@ -1056,12 +1055,11 @@ void crash_kexec(struct pt_regs *regs)
*/
locked = xchg(&kexec_lock, 1);
if (!locked) {
- image = xchg(&kexec_crash_image, NULL);
- if (image) {
+ if (kexec_crash_image) {
struct pt_regs fixed_regs;
crash_setup_regs(&fixed_regs, regs);
machine_crash_shutdown(&fixed_regs);
- machine_kexec(image);
+ machine_kexec(kexec_crash_image);
}
xchg(&kexec_lock, 0);
}