aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile7
-rw-r--r--arch/arm64/kernel/entry.S7
-rw-r--r--arch/arm64/kernel/kuser32.S1
-rw-r--r--arch/arm64/kernel/opcodes.c72
-rw-r--r--arch/arm64/kernel/process.c66
-rw-r--r--arch/arm64/kernel/ptrace.c22
-rw-r--r--arch/arm64/kernel/setup.c13
-rw-r--r--arch/arm64/kernel/signal32.c10
-rw-r--r--arch/arm64/kernel/swp_emulate.c223
-rw-r--r--arch/arm64/kernel/sys_compat.c2
-rw-r--r--arch/arm64/kernel/traps.c44
11 files changed, 460 insertions, 7 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index ac389d32ccde..23e19f94d449 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -15,9 +15,10 @@ CFLAGS_REMOVE_return_address.o = -pg
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o cpu_ops.o insn.o return_address.o
+ hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \
+ opcodes.o
-arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
+arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
@@ -33,6 +34,8 @@ arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
+obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
+
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
head-y := head.o
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 0437186e4c7d..e6681c26d489 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -25,9 +25,9 @@
#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/esr.h>
+#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
-#include <asm/unistd32.h>
/*
* Bad Abort numbers
@@ -666,6 +666,10 @@ __sys_trace:
mov x0, sp
bl syscall_trace_enter
adr lr, __sys_trace_return // return address
+ cmp w0, #RET_SKIP_SYSCALL_TRACE // skip syscall and tracing?
+ b.eq ret_to_user
+ cmp w0, #RET_SKIP_SYSCALL // skip syscall?
+ b.eq __sys_trace_return_skipped
uxtw scno, w0 // syscall number (possibly new)
mov x1, sp // pointer to regs
cmp scno, sc_nr // check upper syscall limit
@@ -679,6 +683,7 @@ __sys_trace:
__sys_trace_return:
str x0, [sp] // save returned x0
+__sys_trace_return_skipped: // x0 already in regs[0]
mov x0, sp
bl syscall_trace_exit
b ret_to_user
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 7787208e8cc6..9fb6d5a3cea7 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -28,6 +28,7 @@
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
+#include <asm/unistd.h>
#include <asm/unistd32.h>
.align 5
diff --git a/arch/arm64/kernel/opcodes.c b/arch/arm64/kernel/opcodes.c
new file mode 100644
index 000000000000..ceb5a04a1e12
--- /dev/null
+++ b/arch/arm64/kernel/opcodes.c
@@ -0,0 +1,72 @@
+/*
+ * Copied from linux/arch/arm/kernel/opcodes.c
+ *
+ * A32 condition code lookup feature moved from nwfpe/fpopcode.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <asm/opcodes.h>
+
+#define ARM_OPCODE_CONDITION_UNCOND 0xf
+
+/*
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+ 0xF0F0, /* EQ == Z set */
+ 0x0F0F, /* NE */
+ 0xCCCC, /* CS == C set */
+ 0x3333, /* CC */
+ 0xFF00, /* MI == N set */
+ 0x00FF, /* PL */
+ 0xAAAA, /* VS == V set */
+ 0x5555, /* VC */
+ 0x0C0C, /* HI == C set && Z clear */
+ 0xF3F3, /* LS == C clear || Z set */
+ 0xAA55, /* GE == (N==V) */
+ 0x55AA, /* LT == (N!=V) */
+ 0x0A05, /* GT == (!Z && (N==V)) */
+ 0xF5FA, /* LE == (Z || (N!=V)) */
+ 0xFFFF, /* AL always */
+ 0 /* NV */
+};
+
+/*
+ * Returns:
+ * ARM_OPCODE_CONDTEST_FAIL - if condition fails
+ * ARM_OPCODE_CONDTEST_PASS - if condition passes (including AL)
+ * ARM_OPCODE_CONDTEST_UNCOND - if NV condition, or separate unconditional
+ * opcode space from v5 onwards
+ *
+ * Code that tests whether a conditional instruction would pass its condition
+ * check should check that return value == ARM_OPCODE_CONDTEST_PASS.
+ *
+ * Code that tests if a condition means that the instruction would be executed
+ * (regardless of conditional or unconditional) should instead check that the
+ * return value != ARM_OPCODE_CONDTEST_FAIL.
+ */
+asmlinkage unsigned int arm_check_condition(u32 opcode, u64 psr)
+{
+ u32 cc_bits = opcode >> 28;
+ u32 psr_cond = (u32)(psr & 0xffffffff) >> 28;
+ unsigned int ret;
+
+ if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+ if ((cc_map[cc_bits] >> (psr_cond)) & 1)
+ ret = ARM_OPCODE_CONDTEST_PASS;
+ else
+ ret = ARM_OPCODE_CONDTEST_FAIL;
+ } else {
+ ret = ARM_OPCODE_CONDTEST_UNCOND;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(arm_check_condition);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 10ad4d99266b..c09f85f80a9e 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -161,6 +161,70 @@ void machine_restart(char *cmd)
while (1);
}
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+ int i, j;
+ int nlines;
+ u32 *p;
+
+ /*
+ * don't attempt to dump non-kernel addresses or
+ * values that are probably just small negative numbers
+ */
+ if (addr < PAGE_OFFSET || addr > -256UL)
+ return;
+
+ printk("\n%s: %#lx:\n", name, addr);
+
+ /*
+ * round address down to a 32 bit boundary
+ * and always dump a multiple of 32 bytes
+ */
+ p = (u32 *)(addr & ~(sizeof(u32) - 1));
+ nbytes += (addr & (sizeof(u32) - 1));
+ nlines = (nbytes + 31) / 32;
+
+
+ for (i = 0; i < nlines; i++) {
+ /*
+ * just display low 16 bits of address to keep
+ * each line of the dump < 80 characters
+ */
+ printk("%04lx ", (unsigned long)p & 0xffff);
+ for (j = 0; j < 8; j++) {
+ u32 data;
+ if (probe_kernel_address(p, data)) {
+ printk(" ********");
+ } else {
+ printk(" %08x", data);
+ }
+ ++p;
+ }
+ printk("\n");
+ }
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+ mm_segment_t fs;
+ unsigned int i;
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ show_data(regs->pc - nbytes, nbytes * 2, "PC");
+ show_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
+ show_data(regs->sp - nbytes, nbytes * 2, "SP");
+ for (i = 0; i < 30; i++) {
+ char name[4];
+ snprintf(name, sizeof(name), "X%u", i);
+ show_data(regs->regs[i] - nbytes, nbytes * 2, name);
+ }
+ set_fs(fs);
+}
+
void __show_regs(struct pt_regs *regs)
{
int i, top_reg;
@@ -187,6 +251,8 @@ void __show_regs(struct pt_regs *regs)
if (i % 2 == 0)
printk("\n");
}
+ if (!user_mode(regs))
+ show_extra_register_data(regs, 128);
printk("\n");
}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 70311740677a..e1f70225a919 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -19,6 +19,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/audit.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -26,6 +27,7 @@
#include <linux/smp.h>
#include <linux/ptrace.h>
#include <linux/user.h>
+#include <linux/seccomp.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/signal.h>
@@ -39,6 +41,7 @@
#include <asm/compat.h>
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
+#include <asm/syscall.h>
#include <asm/traps.h>
#include <asm/system_misc.h>
@@ -1066,7 +1069,19 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
- return ptrace_request(child, request, addr, data);
+ int ret;
+
+ switch (request) {
+ case PTRACE_SET_SYSCALL:
+ task_pt_regs(child)->syscallno = data;
+ ret = 0;
+ break;
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
}
enum ptrace_syscall_dir {
@@ -1104,6 +1119,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, regs->syscallno);
+ audit_syscall_entry(syscall_get_arch(), regs->syscallno,
+ regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);
+
return regs->syscallno;
}
@@ -1112,6 +1130,8 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_exit(regs, regs_return_value(regs));
+ audit_syscall_exit(regs);
+
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index e87b5fd07b8c..fdf3c5f4ce91 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -507,9 +507,20 @@ static int c_show(struct seq_file *m, void *v)
for (i = 0; hwcap_str[i]; i++)
if (elf_hwcap & (1 << i))
seq_printf(m, "%s ", hwcap_str[i]);
+#ifdef CONFIG_ARMV7_COMPAT_CPUINFO
+ if (is_compat_task()) {
+ /* Print out the non-optional ARMv8 HW capabilities */
+ seq_printf(m, "wp half thumb fastmult vfp edsp neon vfpv3 tlsi ");
+ seq_printf(m, "vfpv4 idiva idivt ");
+ }
+#endif
seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
- seq_printf(m, "CPU architecture: AArch64\n");
+ seq_printf(m, "CPU architecture: %s\n",
+#if IS_ENABLED(CONFIG_ARMV7_COMPAT_CPUINFO)
+ is_compat_task() ? "8" :
+#endif
+ "AArch64");
seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 02de43260332..e5cf0ab84bed 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -26,7 +26,7 @@
#include <asm/fpsimd.h>
#include <asm/signal32.h>
#include <asm/uaccess.h>
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
struct compat_sigcontext {
/* We always set these two fields to 0 */
@@ -183,6 +183,14 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
break;
+#ifdef __ARCH_SIGSYS
+ case __SI_SYS:
+ err |= __put_user((compat_uptr_t)(unsigned long)
+ from->si_call_addr, &to->si_call_addr);
+ err |= __put_user(from->si_syscall, &to->si_syscall);
+ err |= __put_user(from->si_arch, &to->si_arch);
+ break;
+#endif
default: /* this is just in case for now ... */
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
diff --git a/arch/arm64/kernel/swp_emulate.c b/arch/arm64/kernel/swp_emulate.c
new file mode 100644
index 000000000000..508fd2edb8ab
--- /dev/null
+++ b/arch/arm64/kernel/swp_emulate.c
@@ -0,0 +1,223 @@
+/*
+ * Derived from from linux/arch/arm/kernel/swp_emulate.c
+ *
+ * Copyright (C) 2009 ARM Limited
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Implements emulation of the SWP/SWPB instructions using load-exclusive and
+ * store-exclusive for processors that have them disabled (or future ones that
+ * might not implement them).
+ *
+ * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
+ * Where: Rt = destination
+ * Rt2 = source
+ * Rn = address
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/perf_event.h>
+
+#include <asm/opcodes.h>
+#include <asm/traps.h>
+#include <asm/uaccess.h>
+#include <asm/system_misc.h>
+#include <linux/debugfs.h>
+
+/*
+ * Error-checking SWP macros implemented using ldrex{b}/strex{b}
+ */
+
+static int swpb(u8 in, u8 *out, u8 *addr)
+{
+ u8 _out;
+ int res;
+ int err;
+
+ do {
+ __asm__ __volatile__(
+ "0: ldxrb %w1, %4\n"
+ "1: stxrb %w0, %w3, %4\n"
+ " mov %w2, #0\n"
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ " .align 2\n"
+ "3: mov %w2, %5\n"
+ " b 2b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .quad 0b, 3b\n"
+ " .quad 1b, 3b\n"
+ " .previous"
+ : "=&r" (res), "=r" (_out), "=r" (err)
+ : "r" (in), "Q" (*addr), "i" (-EFAULT)
+ : "cc", "memory");
+ } while (err == 0 && res != 0);
+
+ if (err == 0)
+ *out = _out;
+ return err;
+}
+
+static int swp(u32 in, u32 *out, u32 *addr)
+{
+ u32 _out;
+ int res;
+ int err = 0;
+
+ do {
+ __asm__ __volatile__(
+ "0: ldxr %w1, %4\n"
+ "1: stxr %w0, %w3, %4\n"
+ " mov %w2, #0\n"
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ " .align 2\n"
+ "3: mov %w2, %5\n"
+ " b 2b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .quad 0b, 3b\n"
+ " .quad 1b, 3b\n"
+ " .previous"
+ : "=&r" (res), "=r" (_out), "=r" (err)
+ : "r" (in), "Q" (*addr), "i" (-EFAULT)
+ : "cc", "memory");
+ } while (err == 0 && res != 0);
+
+ if (err == 0)
+ *out = _out;
+ return err;
+}
+/*
+ * Macros/defines for extracting register numbers from instruction.
+ */
+#define EXTRACT_REG_NUM(instruction, offset) \
+ (((instruction) & (0xf << (offset))) >> (offset))
+#define RN_OFFSET 16
+#define RT_OFFSET 12
+#define RT2_OFFSET 0
+/*
+ * Bit 22 of the instruction encoding distinguishes between
+ * the SWP and SWPB variants (bit set means SWPB).
+ */
+#define TYPE_SWPB (1 << 22)
+
+static pid_t previous_pid;
+
+u64 swpb_count = 0;
+u64 swp_count = 0;
+
+/*
+ * swp_handler logs the id of calling process, dissects the instruction, sanity
+ * checks the memory location, calls emulate_swpX for the actual operation and
+ * deals with fixup/error handling before returning
+ */
+static int swp_handler(struct pt_regs *regs, unsigned int instr)
+{
+ u32 destreg, data, type;
+ uintptr_t address;
+ unsigned int res = 0;
+ int err;
+ u32 temp32;
+ u8 temp8;
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+ res = arm_check_condition(instr, regs->pstate);
+ switch (res) {
+ case ARM_OPCODE_CONDTEST_PASS:
+ break;
+ case ARM_OPCODE_CONDTEST_FAIL:
+ /* Condition failed - return to next instruction */
+ regs->pc += 4;
+ return 0;
+ case ARM_OPCODE_CONDTEST_UNCOND:
+ /* If unconditional encoding - not a SWP, undef */
+ return -EFAULT;
+ default:
+ return -EINVAL;
+ }
+
+ if (current->pid != previous_pid) {
+ pr_warn("\"%s\" (%ld) uses obsolete SWP{B} instruction\n",
+ current->comm, (unsigned long)current->pid);
+ previous_pid = current->pid;
+ }
+
+ address = regs->regs[EXTRACT_REG_NUM(instr, RN_OFFSET)] & 0xffffffff;
+ data = regs->regs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
+ destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
+
+ type = instr & TYPE_SWPB;
+
+ /* Check access in reasonable access range for both SWP and SWPB */
+ if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
+ pr_debug("SWP{B} emulation: access to %p not allowed!\n",
+ (void *)address);
+ res = -EFAULT;
+ }
+ if (type == TYPE_SWPB) {
+ err = swpb((u8) data, &temp8, (u8 *) address);
+ if (err)
+ return err;
+ regs->regs[destreg] = temp8;
+ regs->pc += 4;
+ swpb_count++;
+ } else if (address & 0x3) {
+ /* SWP to unaligned address not permitted */
+ pr_debug("SWP instruction on unaligned pointer!\n");
+ return -EFAULT;
+ } else {
+ err = swp((u32) data, &temp32, (u32 *) address);
+ if (err)
+ return err;
+ regs->regs[destreg] = temp32;
+ regs->pc += 4;
+ swp_count++;
+ }
+
+ return 0;
+}
+
+/*
+ * Only emulate SWP/SWPB executed in ARM state/User mode.
+ * The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
+ */
+static struct undef_hook swp_hook = {
+ .instr_mask = 0x0fb00ff0,
+ .instr_val = 0x01000090,
+ .pstate_mask = COMPAT_PSR_MODE_MASK | COMPAT_PSR_T_BIT,
+ .pstate_val = COMPAT_PSR_MODE_USR,
+ .fn = swp_handler
+};
+
+/*
+ * Register handler and create status file in /proc/cpu
+ * Invoked as late_initcall, since not needed before init spawned.
+ */
+static int __init swp_emulation_init(void)
+{
+ struct dentry *dir;
+ dir = debugfs_create_dir("swp_emulate", NULL);
+ debugfs_create_u64("swp_count", S_IRUGO | S_IWUSR, dir, &swp_count);
+ debugfs_create_u64("swpb_count", S_IRUGO | S_IWUSR, dir, &swpb_count);
+
+ pr_notice("Registering SWP/SWPB emulation handler\n");
+ register_undef_hook(&swp_hook);
+
+
+ return 0;
+}
+
+late_initcall(swp_emulation_init);
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 78039927c807..dc47e53e9e28 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -26,7 +26,7 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
static inline void
do_compat_cache_op(unsigned long start, unsigned long end, int flags)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 7ffadddb645d..0da47699510b 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 1995-2009 Russell King
* Copyright (C) 2012 ARM Ltd.
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -257,15 +258,58 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
die(str, regs, err);
}
+static LIST_HEAD(undef_hook);
+
+void register_undef_hook(struct undef_hook *hook)
+{
+ list_add(&hook->node, &undef_hook);
+}
+
+static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+{
+ struct undef_hook *hook;
+ int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
+
+ list_for_each_entry(hook, &undef_hook, node)
+ if ((instr & hook->instr_mask) == hook->instr_val &&
+ (regs->pstate & hook->pstate_mask) == hook->pstate_val)
+ fn = hook->fn;
+
+ return fn ? fn(regs, instr) : 1;
+}
+
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
{
+ u32 instr;
siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
/* check for AArch32 breakpoint instructions */
if (!aarch32_break_handler(regs))
return;
+ if (user_mode(regs)) {
+ if (compat_thumb_mode(regs)) {
+ if (get_user(instr, (u16 __user *)pc))
+ goto die_sig;
+ if (is_wide_instruction(instr)) {
+ u32 instr2;
+ if (get_user(instr2, (u16 __user *)pc+1))
+ goto die_sig;
+ instr <<= 16;
+ instr |= instr2;
+ }
+ } else if (get_user(instr, (u32 __user *)pc)) {
+ goto die_sig;
+ }
+ } else {
+ /* kernel mode */
+ instr = *((u32 *)pc);
+ }
+
+ if (call_undef_hook(regs, instr) == 0)
+ return;
+die_sig:
if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
printk_ratelimit()) {
pr_info("%s[%d]: undefined instruction: pc=%p\n",