aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-10-26 15:18:37 -0700
committerDavid S. Miller <davem@davemloft.net>2012-10-26 15:18:37 -0700
commit517ffce4e1a03aea979fe3a18a3dd1761a24fafb (patch)
treec470fe3e6266dd96c7c6f0c9df881712d89df546 /arch/sparc/include
parent1d47091ac6bf1286d708ebcd3f2b69d7c682916b (diff)
sparc64: Make montmul/montsqr/mpmul usable in 32-bit threads.
The Montgomery Multiply, Montgomery Square, and Multiple-Precision Multiply instructions work by loading a combination of the floating point and multiple register windows worth of integer registers with the inputs. These values are 64-bit. But for 32-bit userland processes we only save the low 32-bits of each integer register during a register spill. This is because the register window save area is in the user stack and has a fixed layout. Therefore, the only way to use these instruction in 32-bit mode is to perform the following sequence: 1) Load the top-32bits of a choosen integer register with a sentinel, say "-1". This will be in the outer-most register window. The idea is that we're trying to see if the outer-most register window gets spilled, and thus the 64-bit values were truncated. 2) Load all the inputs for the montmul/montsqr/mpmul instruction, down to the inner-most register window. 3) Execute the opcode. 4) Traverse back up to the outer-most register window. 5) Check the sentinel, if it's still "-1" store the results. Otherwise retry the entire sequence. This retry is extremely troublesome. If you're just unlucky and an interrupt or other trap happens, it'll push that outer-most window to the stack and clear the sentinel when we restore it. We could retry forever and never make forward progress if interrupts arrive at a fast enough rate (consider perf events as one example). So we have do limited retries and fallback to software which is extremely non-deterministic. Luckily it's very straightforward to provide a mechanism to let 32-bit applications use a 64-bit stack. Stacks in 64-bit mode are biased by 2047 bytes, which means that the lowest bit is set in the actual %sp register value. So if we see bit zero set in a 32-bit application's stack we treat it like a 64-bit stack. Runtime detection of such a facility is tricky, and cumbersome at best. For example, just trying to use a biased stack and seeing if it works is hard to recover from (the signal handler will need to use an alt stack, plus something along the lines of longjmp). Therefore, we add a system call to report a bitmask of arch specific features like this in a cheap and less hairy way. With help from Andy Polyakov. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/include')
-rw-r--r--arch/sparc/include/asm/compat.h5
-rw-r--r--arch/sparc/include/asm/thread_info_64.h5
-rw-r--r--arch/sparc/include/asm/ttable.h24
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h6
4 files changed, 29 insertions, 11 deletions
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index cef99fbc0a2..830502fe62b 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -232,9 +232,10 @@ static inline void __user *arch_compat_alloc_user_space(long len)
struct pt_regs *regs = current_thread_info()->kregs;
unsigned long usp = regs->u_regs[UREG_I6];
- if (!(test_thread_flag(TIF_32BIT)))
+ if (test_thread_64bit_stack(usp))
usp += STACK_BIAS;
- else
+
+ if (test_thread_flag(TIF_32BIT))
usp &= 0xffffffffUL;
usp -= len;
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 4e227663108..a3fe4dcc0aa 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -259,6 +259,11 @@ static inline bool test_and_clear_restore_sigmask(void)
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
+#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
+#define test_thread_64bit_stack(__SP) \
+ ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
+ false : true)
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
index 48f2807d326..71b5a67522a 100644
--- a/arch/sparc/include/asm/ttable.h
+++ b/arch/sparc/include/asm/ttable.h
@@ -372,7 +372,9 @@ etrap_spill_fixup_64bit: \
/* Normal 32bit spill */
#define SPILL_2_GENERIC(ASI) \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, (. - (128 + 4)); \
+ srl %sp, 0, %sp; \
stwa %l0, [%sp + %g0] ASI; \
mov 0x04, %g3; \
stwa %l1, [%sp + %g3] ASI; \
@@ -398,14 +400,16 @@ etrap_spill_fixup_64bit: \
stwa %i6, [%g1 + %g0] ASI; \
stwa %i7, [%g1 + %g3] ASI; \
saved; \
- retry; nop; nop; \
+ retry; \
b,a,pt %xcc, spill_fixup_dax; \
b,a,pt %xcc, spill_fixup_mna; \
b,a,pt %xcc, spill_fixup;
#define SPILL_2_GENERIC_ETRAP \
etrap_user_spill_32bit: \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, etrap_user_spill_64bit; \
+ srl %sp, 0, %sp; \
stwa %l0, [%sp + 0x00] %asi; \
stwa %l1, [%sp + 0x04] %asi; \
stwa %l2, [%sp + 0x08] %asi; \
@@ -427,7 +431,7 @@ etrap_user_spill_32bit: \
ba,pt %xcc, etrap_save; \
wrpr %g1, %cwp; \
nop; nop; nop; nop; \
- nop; nop; nop; nop; \
+ nop; nop; \
ba,a,pt %xcc, etrap_spill_fixup_32bit; \
ba,a,pt %xcc, etrap_spill_fixup_32bit; \
ba,a,pt %xcc, etrap_spill_fixup_32bit;
@@ -592,7 +596,9 @@ user_rtt_fill_64bit: \
/* Normal 32bit fill */
#define FILL_2_GENERIC(ASI) \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, (. - (128 + 4)); \
+ srl %sp, 0, %sp; \
lduwa [%sp + %g0] ASI, %l0; \
mov 0x04, %g2; \
mov 0x08, %g3; \
@@ -616,14 +622,16 @@ user_rtt_fill_64bit: \
lduwa [%g1 + %g3] ASI, %i6; \
lduwa [%g1 + %g5] ASI, %i7; \
restored; \
- retry; nop; nop; nop; nop; \
+ retry; nop; nop; \
b,a,pt %xcc, fill_fixup_dax; \
b,a,pt %xcc, fill_fixup_mna; \
b,a,pt %xcc, fill_fixup;
#define FILL_2_GENERIC_RTRAP \
user_rtt_fill_32bit: \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, user_rtt_fill_64bit; \
+ srl %sp, 0, %sp; \
lduwa [%sp + 0x00] %asi, %l0; \
lduwa [%sp + 0x04] %asi, %l1; \
lduwa [%sp + 0x08] %asi, %l2; \
@@ -643,7 +651,7 @@ user_rtt_fill_32bit: \
ba,pt %xcc, user_rtt_pre_restore; \
restored; \
nop; nop; nop; nop; nop; \
- nop; nop; nop; nop; nop; \
+ nop; nop; nop; \
ba,a,pt %xcc, user_rtt_fill_fixup; \
ba,a,pt %xcc, user_rtt_fill_fixup; \
ba,a,pt %xcc, user_rtt_fill_fixup;
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index 8974ef7ae92..bed86a820d0 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -405,8 +405,12 @@
#define __NR_setns 337
#define __NR_process_vm_readv 338
#define __NR_process_vm_writev 339
+#define __NR_kern_features 340
-#define NR_syscalls 340
+#define NR_syscalls 341
+
+/* Bitmask values returned from kern_features system call. */
+#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,