aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-08-19 15:57:09 +0100
committerAmit Pundir <amit.pundir@linaro.org>2016-12-05 12:24:04 +0530
commit4511bfd29b265519875e2c8f2d94abb84755073c (patch)
treefc0662e59c64d3113d050162b3d229df5b3ca32a /arch
parentaabf8b35607c8bffdea4eca2971fb4bc8c6baf09 (diff)
UPSTREAM: arm64: entry: always restore x0 from the stack on syscall return
We have a micro-optimisation on the fast syscall return path where we take care to keep x0 live with the return value from the syscall so that we can avoid restoring it from the stack. The benefit of doing this is fairly suspect, since we will be restoring x1 from the stack anyway (which lives adjacent in the pt_regs structure) and the only additional cost is saving x0 back to pt_regs after the syscall handler, which could be seen as a poor man's prefetch. More importantly, this causes issues with the context tracking code. The ct_user_enter macro ends up branching into C code, which is free to use x0 as a scratch register and consequently leads to us returning junk back to userspace as the syscall return value. Rather than special case the context-tracking code, this patch removes the questionable optimisation entirely. Cc: <stable@vger.kernel.org> Cc: Larry Bassel <larry.bassel@linaro.org> Cc: Kevin Hilman <khilman@linaro.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reported-by: Hanjun Guo <hanjun.guo@linaro.org> Tested-by: Hanjun Guo <hanjun.guo@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Bug: 31432001 Change-Id: Ie8049daf594a012db884e85a317c7f01eaf002bc (cherry picked from commit 8ec41987436d566f7c4559c6871738b869f7ef07) Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/entry.S17
1 files changed, 6 insertions, 11 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 7f20c926f561..cef8f36f8cff 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -131,7 +131,7 @@
*/
.endm
- .macro kernel_exit, el, ret = 0
+ .macro kernel_exit, el
.if \el != 0
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
@@ -164,11 +164,7 @@ alternative_endif
.endif
msr elr_el1, x21 // set up the return data
msr spsr_el1, x22
- .if \ret
- ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
- .else
ldp x0, x1, [sp, #16 * 0]
- .endif
ldp x2, x3, [sp, #16 * 1]
ldp x4, x5, [sp, #16 * 2]
ldp x6, x7, [sp, #16 * 3]
@@ -631,22 +627,21 @@ ENDPROC(cpu_switch_to)
*/
ret_fast_syscall:
disable_irq // disable interrupts
+ str x0, [sp, #S_X0] // returned x0
ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
cbnz x2, ret_fast_syscall_trace
and x2, x1, #_TIF_WORK_MASK
- cbnz x2, fast_work_pending
+ cbnz x2, work_pending
enable_step_tsk x1, x2
- kernel_exit 0, ret = 1
+ kernel_exit 0
ret_fast_syscall_trace:
enable_irq // enable interrupts
- b __sys_trace_return
+ b __sys_trace_return_skipped // we already saved x0
/*
* Ok, we need to do extra processing, enter the slow path.
*/
-fast_work_pending:
- str x0, [sp, #S_X0] // returned x0
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
@@ -670,7 +665,7 @@ ret_to_user:
cbnz x2, work_pending
enable_step_tsk x1, x2
no_work_pending:
- kernel_exit 0, ret = 0
+ kernel_exit 0
ENDPROC(ret_to_user)
/*