aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2008-01-18 15:50:30 +1100
committerPaul Mackerras <paulus@samba.org>2008-01-25 22:52:50 +1100
commitc3b75bd7bbf4a0438dc140033b80657995fd30ed (patch)
tree4621ebbf620d6262fccf61811824386b9ba50915 /arch/powerpc
parentcaa34c9e9cab6afb243a3da406cce272805c48c5 (diff)
[POWERPC] Make setjmp/longjmp code usable outside of xmon
This makes the setjmp/longjmp code used by xmon, generically available to other code. It also removes the requirement for debugger hooks to be only called on 0x300 (data storage) exception. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/misc.S65
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/powerpc/xmon/setjmp.S61
-rw-r--r--arch/powerpc/xmon/xmon.c6
4 files changed, 68 insertions, 70 deletions
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 74ce0c7a7b1..7b916022069 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -8,6 +8,8 @@
* Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*
+ * setjmp/longjmp code by Paul Mackerras.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -15,6 +17,8 @@
*/
#include <asm/ppc_asm.h>
#include <asm/unistd.h>
+#include <asm/asm-compat.h>
+#include <asm/asm-offsets.h>
.text
@@ -51,3 +55,64 @@ _GLOBAL(kernel_execve)
bnslr
neg r3,r3
blr
+
+_GLOBAL(setjmp)
+ mflr r0
+ PPC_STL r0,0(r3)
+ PPC_STL r1,SZL(r3)
+ PPC_STL r2,2*SZL(r3)
+ mfcr r0
+ PPC_STL r0,3*SZL(r3)
+ PPC_STL r13,4*SZL(r3)
+ PPC_STL r14,5*SZL(r3)
+ PPC_STL r15,6*SZL(r3)
+ PPC_STL r16,7*SZL(r3)
+ PPC_STL r17,8*SZL(r3)
+ PPC_STL r18,9*SZL(r3)
+ PPC_STL r19,10*SZL(r3)
+ PPC_STL r20,11*SZL(r3)
+ PPC_STL r21,12*SZL(r3)
+ PPC_STL r22,13*SZL(r3)
+ PPC_STL r23,14*SZL(r3)
+ PPC_STL r24,15*SZL(r3)
+ PPC_STL r25,16*SZL(r3)
+ PPC_STL r26,17*SZL(r3)
+ PPC_STL r27,18*SZL(r3)
+ PPC_STL r28,19*SZL(r3)
+ PPC_STL r29,20*SZL(r3)
+ PPC_STL r30,21*SZL(r3)
+ PPC_STL r31,22*SZL(r3)
+ li r3,0
+ blr
+
+_GLOBAL(longjmp)
+ PPC_LCMPI r4,0
+ bne 1f
+ li r4,1
+1: PPC_LL r13,4*SZL(r3)
+ PPC_LL r14,5*SZL(r3)
+ PPC_LL r15,6*SZL(r3)
+ PPC_LL r16,7*SZL(r3)
+ PPC_LL r17,8*SZL(r3)
+ PPC_LL r18,9*SZL(r3)
+ PPC_LL r19,10*SZL(r3)
+ PPC_LL r20,11*SZL(r3)
+ PPC_LL r21,12*SZL(r3)
+ PPC_LL r22,13*SZL(r3)
+ PPC_LL r23,14*SZL(r3)
+ PPC_LL r24,15*SZL(r3)
+ PPC_LL r25,16*SZL(r3)
+ PPC_LL r26,17*SZL(r3)
+ PPC_LL r27,18*SZL(r3)
+ PPC_LL r28,19*SZL(r3)
+ PPC_LL r29,20*SZL(r3)
+ PPC_LL r30,21*SZL(r3)
+ PPC_LL r31,22*SZL(r3)
+ PPC_LL r0,3*SZL(r3)
+ mtcrf 0x38,r0
+ PPC_LL r0,0(r3)
+ PPC_LL r1,SZL(r3)
+ PPC_LL r2,2*SZL(r3)
+ mtlr r0
+ mr r3,r4
+ blr
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 10dda224a36..7b251079926 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -167,10 +167,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
if (notify_page_fault(regs))
return 0;
- if (trap == 0x300) {
- if (debugger_fault_handler(regs))
- return 0;
- }
+ if (unlikely(debugger_fault_handler(regs)))
+ return 0;
/* On a kernel SLB miss we can only check for a valid exception entry */
if (!user_mode(regs) && (address >= TASK_SIZE))
diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/xmon/setjmp.S
index 96a91f10e2e..04c0b305ad4 100644
--- a/arch/powerpc/xmon/setjmp.S
+++ b/arch/powerpc/xmon/setjmp.S
@@ -12,67 +12,6 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
-_GLOBAL(xmon_setjmp)
- mflr r0
- PPC_STL r0,0(r3)
- PPC_STL r1,SZL(r3)
- PPC_STL r2,2*SZL(r3)
- mfcr r0
- PPC_STL r0,3*SZL(r3)
- PPC_STL r13,4*SZL(r3)
- PPC_STL r14,5*SZL(r3)
- PPC_STL r15,6*SZL(r3)
- PPC_STL r16,7*SZL(r3)
- PPC_STL r17,8*SZL(r3)
- PPC_STL r18,9*SZL(r3)
- PPC_STL r19,10*SZL(r3)
- PPC_STL r20,11*SZL(r3)
- PPC_STL r21,12*SZL(r3)
- PPC_STL r22,13*SZL(r3)
- PPC_STL r23,14*SZL(r3)
- PPC_STL r24,15*SZL(r3)
- PPC_STL r25,16*SZL(r3)
- PPC_STL r26,17*SZL(r3)
- PPC_STL r27,18*SZL(r3)
- PPC_STL r28,19*SZL(r3)
- PPC_STL r29,20*SZL(r3)
- PPC_STL r30,21*SZL(r3)
- PPC_STL r31,22*SZL(r3)
- li r3,0
- blr
-
-_GLOBAL(xmon_longjmp)
- PPC_LCMPI r4,0
- bne 1f
- li r4,1
-1: PPC_LL r13,4*SZL(r3)
- PPC_LL r14,5*SZL(r3)
- PPC_LL r15,6*SZL(r3)
- PPC_LL r16,7*SZL(r3)
- PPC_LL r17,8*SZL(r3)
- PPC_LL r18,9*SZL(r3)
- PPC_LL r19,10*SZL(r3)
- PPC_LL r20,11*SZL(r3)
- PPC_LL r21,12*SZL(r3)
- PPC_LL r22,13*SZL(r3)
- PPC_LL r23,14*SZL(r3)
- PPC_LL r24,15*SZL(r3)
- PPC_LL r25,16*SZL(r3)
- PPC_LL r26,17*SZL(r3)
- PPC_LL r27,18*SZL(r3)
- PPC_LL r28,19*SZL(r3)
- PPC_LL r29,20*SZL(r3)
- PPC_LL r30,21*SZL(r3)
- PPC_LL r31,22*SZL(r3)
- PPC_LL r0,3*SZL(r3)
- mtcrf 0x38,r0
- PPC_LL r0,0(r3)
- PPC_LL r1,SZL(r3)
- PPC_LL r2,2*SZL(r3)
- mtlr r0
- mr r3,r4
- blr
-
/*
* Grab the register values as they are now.
* This won't do a particularily good job because we really
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 865e36751f2..a34172ddc46 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -40,6 +40,7 @@
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/firmware.h>
+#include <asm/setjmp.h>
#ifdef CONFIG_PPC64
#include <asm/hvcall.h>
@@ -71,12 +72,9 @@ static unsigned long ncsum = 4096;
static int termch;
static char tmpstr[128];
-#define JMP_BUF_LEN 23
static long bus_error_jmp[JMP_BUF_LEN];
static int catch_memory_errors;
static long *xmon_fault_jmp[NR_CPUS];
-#define setjmp xmon_setjmp
-#define longjmp xmon_longjmp
/* Breakpoint stuff */
struct bpt {
@@ -162,8 +160,6 @@ int xmon_no_auto_backtrace;
extern void xmon_enter(void);
extern void xmon_leave(void);
-extern long setjmp(long *);
-extern void longjmp(long *, long);
extern void xmon_save_regs(struct pt_regs *);
#ifdef CONFIG_PPC64