From a9cc77e2cdf373dc5a5eb9c44cfeb64f1dba36cb Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Wed, 30 Apr 2014 10:51:29 +0100 Subject: arm64: make a single hook to syscall_trace() for all syscall features Currently syscall_trace() is called only for ptrace. With additional TIF_xx flags defined, it is now called in all the cases of audit, ftrace and seccomp in addition to ptrace. Acked-by: Richard Guy Briggs Acked-by: Will Deacon Signed-off-by: AKASHI Takahiro Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 449f81a4da4d99980064943d504bb19d07e86aec) Signed-off-by: Alex Shi Conflicts: arch/arm64/kernel/entry.S --- arch/arm64/include/asm/thread_info.h | 13 +++++++++++++ arch/arm64/kernel/entry.S | 5 +++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 720e70b66ffd..0a8b2a97a32e 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -91,6 +91,9 @@ static inline struct thread_info *current_thread_info(void) /* * thread information flags: * TIF_SYSCALL_TRACE - syscall trace active + * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace + * TIF_SYSCALL_AUDIT - syscall auditing + * TIF_SECOMP - syscall secure computing * TIF_SIGPENDING - signal pending * TIF_NEED_RESCHED - rescheduling necessary * TIF_NOTIFY_RESUME - callback before returning to user @@ -101,6 +104,9 @@ static inline struct thread_info *current_thread_info(void) #define TIF_NEED_RESCHED 1 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_SYSCALL_TRACE 8 +#define TIF_SYSCALL_AUDIT 9 +#define TIF_SYSCALL_TRACEPOINT 10 +#define TIF_SECCOMP 11 #define TIF_POLLING_NRFLAG 16 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_FREEZE 19 @@ -112,10 +118,17 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME) +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) + #endif /* __KERNEL__ */ #endif /* __ASM_THREAD_INFO_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 39ac630d83de..f9f2caefeecb 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -631,8 +631,9 @@ el0_svc_naked: // compat entry point enable_irq get_thread_info tsk - ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing - tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls? + ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks + tst x16, #_TIF_SYSCALL_WORK + b.ne __sys_trace adr lr, ret_fast_syscall // return address cmp scno, sc_nr // check upper syscall limit b.hs ni_sys -- cgit v1.2.3 From 25a36250f5bfe884334e8ce36315eaddd96ecb68 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Wed, 30 Apr 2014 10:51:30 +0100 Subject: arm64: split syscall_trace() into separate functions for enter/exit As done in arm, this change makes it easy to confirm we invoke syscall related hooks, including syscall tracepoint, audit and seccomp which would be implemented later, in correct order. That is, undoing operations in the opposite order on exit that they were done on entry. Acked-by: Will Deacon Signed-off-by: AKASHI Takahiro Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 3157858feff89196635b01495d5ec9ebe206639e) Signed-off-by: Alex Shi --- arch/arm64/kernel/entry.S | 10 ++++------ arch/arm64/kernel/ptrace.c | 50 +++++++++++++++++++++++++++------------------- 2 files changed, 33 insertions(+), 27 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index f9f2caefeecb..00d6eb98f8df 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -649,9 +649,8 @@ ENDPROC(el0_svc) * switches, and waiting for our parent to respond. */ __sys_trace: - mov x1, sp - mov w0, #0 // trace entry - bl syscall_trace + mov x0, sp + bl syscall_trace_enter adr lr, __sys_trace_return // return address uxtw scno, w0 // syscall number (possibly new) mov x1, sp // pointer to regs @@ -666,9 +665,8 @@ __sys_trace: __sys_trace_return: str x0, [sp] // save returned x0 - mov x1, sp - mov w0, #1 // trace exit - bl syscall_trace + mov x0, sp + bl syscall_trace_exit b ret_to_user /* diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6a8928bba03c..6d666dc1cea6 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1058,35 +1058,43 @@ long arch_ptrace(struct task_struct *child, long request, return ptrace_request(child, request, addr, data); } -asmlinkage int syscall_trace(int dir, struct pt_regs *regs) +enum ptrace_syscall_dir { + PTRACE_SYSCALL_ENTER = 0, + PTRACE_SYSCALL_EXIT, +}; + +static void tracehook_report_syscall(struct pt_regs *regs, + enum ptrace_syscall_dir dir) { + int regno; unsigned long saved_reg; - if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return regs->syscallno; - - if (is_compat_task()) { - /* AArch32 uses ip (r12) for scratch */ - saved_reg = regs->regs[12]; - regs->regs[12] = dir; - } else { - /* - * Save X7. X7 is used to denote syscall entry/exit: - * X7 = 0 -> entry, = 1 -> exit - */ - saved_reg = regs->regs[7]; - regs->regs[7] = dir; - } + /* + * A scratch register (ip(r12) on AArch32, x7 on AArch64) is + * used to denote syscall entry/exit: + */ + regno = (is_compat_task() ? 12 : 7); + saved_reg = regs->regs[regno]; + regs->regs[regno] = dir; - if (dir) + if (dir == PTRACE_SYSCALL_EXIT) tracehook_report_syscall_exit(regs, 0); else if (tracehook_report_syscall_entry(regs)) regs->syscallno = ~0UL; - if (is_compat_task()) - regs->regs[12] = saved_reg; - else - regs->regs[7] = saved_reg; + regs->regs[regno] = saved_reg; +} + +asmlinkage int syscall_trace_enter(struct pt_regs *regs) +{ + if (test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); return regs->syscallno; } + +asmlinkage void syscall_trace_exit(struct pt_regs *regs) +{ + if (test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); +} -- cgit v1.2.3 From 5e5c374ee699752a8e1a9b2713d27e4c4f259b0c Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Wed, 30 Apr 2014 10:51:31 +0100 Subject: arm64: Add regs_return_value() in syscall.h This macro, regs_return_value, is used mainly for audit to record system call's results, but may also be used in test_kprobes.c. Acked-by: Will Deacon Acked-by: Richard Guy Briggs Signed-off-by: AKASHI Takahiro Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit d34a3ebd8d25cf691a94fae66a957a480cf46430) Signed-off-by: Alex Shi --- arch/arm64/include/asm/ptrace.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 0e7fa4963735..5800ec17f2a5 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -134,6 +134,11 @@ struct pt_regs { #define user_stack_pointer(regs) \ ((regs)->sp) +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + return regs->regs[0]; +} + /* * Are the current registers suitable for user mode? (used to maintain * security in signal handlers) -- cgit v1.2.3 From ab6c58757483ffa92d3da4b3d2672f6acfc3adf2 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Wed, 30 Apr 2014 10:51:32 +0100 Subject: arm64: is_compat_task is defined both in asm/compat.h and linux/compat.h Some kernel files may include both linux/compat.h and asm/compat.h directly or indirectly. Since both header files contain is_compat_task() under !CONFIG_COMPAT, compiling them with !CONFIG_COMPAT will eventually fail. Such files include kernel/auditsc.c, kernel/seccomp.c and init/do_mountfs.c (do_mountfs.c may read asm/compat.h via asm/ftrace.h once ftrace is implemented). So this patch proactively 1) removes is_compat_task() under !CONFIG_COMPAT from asm/compat.h 2) replaces asm/compat.h to linux/compat.h in kernel/*.c, but asm/compat.h is still necessary in ptrace.c and process.c because they use is_compat_thread(). Acked-by: Will Deacon Signed-off-by: AKASHI Takahiro Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit fd92d4a54a069953b4679958121317f2a25389cd) Signed-off-by: Alex Shi --- arch/arm64/include/asm/compat.h | 5 ----- arch/arm64/kernel/hw_breakpoint.c | 2 +- arch/arm64/kernel/process.c | 1 + arch/arm64/kernel/ptrace.c | 1 + arch/arm64/kernel/signal.c | 2 +- 5 files changed, 4 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index fda2704b3f9f..3b334f9ba994 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -305,11 +305,6 @@ static inline int is_compat_thread(struct thread_info *thread) #else /* !CONFIG_COMPAT */ -static inline int is_compat_task(void) -{ - return 0; -} - static inline int is_compat_thread(struct thread_info *thread) { return 0; diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index f17f581116fc..a45e2db6e502 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -20,6 +20,7 @@ #define pr_fmt(fmt) "hw-breakpoint: " fmt +#include #include #include #include @@ -27,7 +28,6 @@ #include #include -#include #include #include #include diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 1c0a9be2ffa8..fc8a3872a5bb 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -20,6 +20,7 @@ #include +#include #include #include #include diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6d666dc1cea6..4b58e812cf67 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -19,6 +19,7 @@ * along with this program. If not, see . */ +#include #include #include #include diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 890a591f75dd..4a0998962fc6 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -17,6 +17,7 @@ * along with this program. If not, see . */ +#include #include #include #include @@ -25,7 +26,6 @@ #include #include -#include #include #include #include -- cgit v1.2.3 From de8e380b1f7ba30ba9e6515b959bab2eef87ae3b Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Wed, 30 Apr 2014 18:54:29 +0900 Subject: ftrace: make CALLER_ADDRx macros more generic Most archs with HAVE_ARCH_CALLER_ADDR have the almost same definitions of CALLER_ADDRx(n), and so put them into linux/ftrace.h. Signed-off-by: AKASHI Takahiro Signed-off-by: Mark Brown (cherry picked from LSK commit f2f646bdd599cb551bfeb8f1934a9e14ccfeac84) Signed-off-by: Alex Shi Conflicts: arch/xtensa/include/asm/ftrace.h --- arch/arm/include/asm/ftrace.h | 10 +--------- arch/blackfin/include/asm/ftrace.h | 11 +---------- arch/parisc/include/asm/ftrace.h | 10 +--------- arch/sh/include/asm/ftrace.h | 10 +--------- include/linux/ftrace.h | 34 ++++++++++++++++++---------------- 5 files changed, 22 insertions(+), 53 deletions(-) diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index f89515adac60..eb577f4f5f70 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -52,15 +52,7 @@ extern inline void *return_address(unsigned int level) #endif -#define HAVE_ARCH_CALLER_ADDR - -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 ((unsigned long)return_address(1)) -#define CALLER_ADDR2 ((unsigned long)return_address(2)) -#define CALLER_ADDR3 ((unsigned long)return_address(3)) -#define CALLER_ADDR4 ((unsigned long)return_address(4)) -#define CALLER_ADDR5 ((unsigned long)return_address(5)) -#define CALLER_ADDR6 ((unsigned long)return_address(6)) +#define ftrace_return_addr(n) return_address(n) #endif /* ifndef __ASSEMBLY__ */ diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h index 8a029505d7b7..2f1c3c2657ad 100644 --- a/arch/blackfin/include/asm/ftrace.h +++ b/arch/blackfin/include/asm/ftrace.h @@ -66,16 +66,7 @@ extern inline void *return_address(unsigned int level) #endif /* CONFIG_FRAME_POINTER */ -#define HAVE_ARCH_CALLER_ADDR - -/* inline function or macro may lead to unexpected result */ -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 ((unsigned long)return_address(1)) -#define CALLER_ADDR2 ((unsigned long)return_address(2)) -#define CALLER_ADDR3 ((unsigned long)return_address(3)) -#define CALLER_ADDR4 ((unsigned long)return_address(4)) -#define CALLER_ADDR5 ((unsigned long)return_address(5)) -#define CALLER_ADDR6 ((unsigned long)return_address(6)) +#define ftrace_return_address(n) return_address(n) #endif /* __ASSEMBLY__ */ diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h index 72c0fafaa039..544ed8ef87eb 100644 --- a/arch/parisc/include/asm/ftrace.h +++ b/arch/parisc/include/asm/ftrace.h @@ -24,15 +24,7 @@ extern void return_to_handler(void); extern unsigned long return_address(unsigned int); -#define HAVE_ARCH_CALLER_ADDR - -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 return_address(1) -#define CALLER_ADDR2 return_address(2) -#define CALLER_ADDR3 return_address(3) -#define CALLER_ADDR4 return_address(4) -#define CALLER_ADDR5 return_address(5) -#define CALLER_ADDR6 return_address(6) +#define ftrace_return_address(n) return_address(n) #endif /* __ASSEMBLY__ */ diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 13e9966464c2..e79fb6ebaa42 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -40,15 +40,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) /* arch/sh/kernel/return_address.c */ extern void *return_address(unsigned int); -#define HAVE_ARCH_CALLER_ADDR - -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 ((unsigned long)return_address(1)) -#define CALLER_ADDR2 ((unsigned long)return_address(2)) -#define CALLER_ADDR3 ((unsigned long)return_address(3)) -#define CALLER_ADDR4 ((unsigned long)return_address(4)) -#define CALLER_ADDR5 ((unsigned long)return_address(5)) -#define CALLER_ADDR6 ((unsigned long)return_address(6)) +#define ftrace_return_address(n) return_address(n) #endif /* __ASSEMBLY__ */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f4233b195dab..1150c45384e5 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -603,25 +603,27 @@ static inline void __ftrace_enabled_restore(int enabled) #endif } -#ifndef HAVE_ARCH_CALLER_ADDR +/* All archs should have this, but we define it for consistency */ +#ifndef ftrace_return_address0 +# define ftrace_return_address0 __builtin_return_address(0) +#endif + +/* Archs may use other ways for ADDR1 and beyond */ +#ifndef ftrace_return_address # ifdef CONFIG_FRAME_POINTER -# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) -# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) -# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) -# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) -# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) -# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) +# define ftrace_return_address(n) __builtin_return_address(n) # else -# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -# define CALLER_ADDR1 0UL -# define CALLER_ADDR2 0UL -# define CALLER_ADDR3 0UL -# define CALLER_ADDR4 0UL -# define CALLER_ADDR5 0UL -# define CALLER_ADDR6 0UL +# define ftrace_return_address(n) 0UL # endif -#endif /* ifndef HAVE_ARCH_CALLER_ADDR */ +#endif + +#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) +#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) +#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) +#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) +#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) +#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) +#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) #ifdef CONFIG_IRQSOFF_TRACER extern void time_hardirqs_on(unsigned long a0, unsigned long a1); -- cgit v1.2.3 From c0b39882849bbd4eb48620665307469237aa2e97 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Sat, 15 Mar 2014 14:40:43 +0900 Subject: arm64: Add 'notrace' attribute to unwind_frame() for ftrace walk_stackframe() calls unwind_frame(), and if walk_stackframe() is "notrace", unwind_frame() should be also "notrace". Acked-by: Will Deacon Signed-off-by: AKASHI Takahiro Signed-off-by: Mark Brown (cherry picked from LSK commit 8370369c5c33b9d573575249f080a905323d7eb4) Signed-off-by: Alex Shi --- arch/arm64/kernel/stacktrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 38f0558f0c0a..55437ba1f5a4 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -35,7 +35,7 @@ * ldp x29, x30, [sp] * add sp, sp, #0x10 */ -int unwind_frame(struct stackframe *frame) +int notrace unwind_frame(struct stackframe *frame) { unsigned long high, low; unsigned long fp = frame->fp; -- cgit v1.2.3 From d6de3f5c0fd18e13bbd786c410e2596f6389b475 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Wed, 30 Apr 2014 18:54:32 +0900 Subject: ftrace: Add arm64 support to recordmcount Recordmcount utility under scripts is run, after compiling each object, to find out all the locations of calling _mcount() and put them into specific seciton named __mcount_loc. Then linker collects all such information into a table in the kernel image (between __start_mcount_loc and __stop_mcount_loc) for later use by ftrace. This patch adds arm64 specific definitions to identify such locations. There are two types of implementation, C and Perl. On arm64, only C version is used to build the kernel now that CONFIG_HAVE_C_RECORDMCOUNT is on. But Perl version is also maintained. This patch also contains a workaround just in case where a header file, elf.h, on host machine doesn't have definitions of EM_AARCH64 nor R_AARCH64_ABS64. Without them, compiling C version of recordmcount will fail. Acked-by: Will Deacon Signed-off-by: AKASHI Takahiro Signed-off-by: Mark Brown (cherry picked from LSK commit 6470430d6d73aa46e00cc28ee732b292538419c9) Signed-off-by: Alex Shi Conflicts: arch/arm64/Kconfig --- arch/arm64/Kconfig | 2 ++ scripts/recordmcount.c | 7 +++++++ scripts/recordmcount.pl | 5 +++++ 3 files changed, 14 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 27bbcfc7202a..340e344719c9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -27,12 +27,14 @@ config ARM64 select HARDIRQS_SW_RESEND select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_TRACEHOOK + select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_ATTRS select HAVE_DMA_CONTIGUOUS select HAVE_EFFICIENT_UNALIGNED_ACCESS + select HAVE_FTRACE_MCOUNT_RECORD select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_MEMBLOCK diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 9c22317778eb..e11aa4a156d2 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -40,6 +40,11 @@ #define R_METAG_NONE 3 #endif +#ifndef EM_AARCH64 +#define EM_AARCH64 183 +#define R_AARCH64_ABS64 257 +#endif + static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */ @@ -347,6 +352,8 @@ do_file(char const *const fname) case EM_ARM: reltype = R_ARM_ABS32; altmcount = "__gnu_mcount_nc"; break; + case EM_AARCH64: + reltype = R_AARCH64_ABS64; gpfx = '_'; break; case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break; case EM_METAG: reltype = R_METAG_ADDR32; altmcount = "_mcount_wrapper"; diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 91280b82da08..397b6b84e8c5 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -279,6 +279,11 @@ if ($arch eq "x86_64") { $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_ARM_(CALL|PC24|THM_CALL)" . "\\s+(__gnu_mcount_nc|mcount)\$"; +} elsif ($arch eq "arm64") { + $alignment = 3; + $section_type = '%progbits'; + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_AARCH64_CALL26\\s+_mcount\$"; + $type = ".quad"; } elsif ($arch eq "ia64") { $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$"; $type = "data8"; -- cgit v1.2.3 From 141a51565798c072310f3a9e91567ca729491ac7 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Wed, 30 Apr 2014 18:54:33 +0900 Subject: arm64: Add ftrace support This patch implements arm64 specific part to support function tracers, such as function (CONFIG_FUNCTION_TRACER), function_graph (CONFIG_FUNCTION_GRAPH_TRACER) and function profiler (CONFIG_FUNCTION_PROFILER). With 'function' tracer, all the functions in the kernel are traced with timestamps in ${sysfs}/tracing/trace. If function_graph tracer is specified, call graph is generated. The kernel must be compiled with -pg option so that _mcount() is inserted at the beginning of functions. This function is called on every function's entry as long as tracing is enabled. In addition, function_graph tracer also needs to be able to probe function's exit. ftrace_graph_caller() & return_to_handler do this by faking link register's value to intercept function's return path. More details on architecture specific requirements are described in Documentation/trace/ftrace-design.txt. Reviewed-by: Ganapatrao Kulkarni Acked-by: Will Deacon Signed-off-by: AKASHI Takahiro Signed-off-by: Mark Brown (cherry picked from LSK commit fc0c93936ae80bdffac7499a820d7e5105ef44d5) Signed-off-by: Alex Shi --- arch/arm64/Kconfig | 2 + arch/arm64/include/asm/ftrace.h | 23 +++++ arch/arm64/kernel/Makefile | 4 + arch/arm64/kernel/arm64ksyms.c | 4 + arch/arm64/kernel/entry-ftrace.S | 175 +++++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/ftrace.c | 64 ++++++++++++++ 6 files changed, 272 insertions(+) create mode 100644 arch/arm64/include/asm/ftrace.h create mode 100644 arch/arm64/kernel/entry-ftrace.S create mode 100644 arch/arm64/kernel/ftrace.c diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 340e344719c9..6b3fef6f8343 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -35,6 +35,8 @@ config ARM64 select HAVE_DMA_CONTIGUOUS select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_GRAPH_TRACER select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_MEMBLOCK diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h new file mode 100644 index 000000000000..58ea5951b198 --- /dev/null +++ b/arch/arm64/include/asm/ftrace.h @@ -0,0 +1,23 @@ +/* + * arch/arm64/include/asm/ftrace.h + * + * Copyright (C) 2013 Linaro Limited + * Author: AKASHI Takahiro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_FTRACE_H +#define __ASM_FTRACE_H + +#include + +#define MCOUNT_ADDR ((unsigned long)_mcount) +#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE + +#ifndef __ASSEMBLY__ +extern void _mcount(unsigned long); +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_FTRACE_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 2d4554b13410..ac67fd0e9af1 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -5,6 +5,9 @@ CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) +CFLAGS_REMOVE_ftrace.o = -pg +CFLAGS_REMOVE_insn.o = -pg + # Object file lists. arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ @@ -13,6 +16,7 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o +arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 338b568cd8ae..7f0512feaa13 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -56,3 +56,7 @@ EXPORT_SYMBOL(clear_bit); EXPORT_SYMBOL(test_and_clear_bit); EXPORT_SYMBOL(change_bit); EXPORT_SYMBOL(test_and_change_bit); + +#ifdef CONFIG_FUNCTION_TRACER +EXPORT_SYMBOL(_mcount); +#endif diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S new file mode 100644 index 000000000000..b2d8c4559cd8 --- /dev/null +++ b/arch/arm64/kernel/entry-ftrace.S @@ -0,0 +1,175 @@ +/* + * arch/arm64/kernel/entry-ftrace.S + * + * Copyright (C) 2013 Linaro Limited + * Author: AKASHI Takahiro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +/* + * Gcc with -pg will put the following code in the beginning of each function: + * mov x0, x30 + * bl _mcount + * [function's body ...] + * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic + * ftrace is enabled. + * + * Please note that x0 as an argument will not be used here because we can + * get lr(x30) of instrumented function at any time by winding up call stack + * as long as the kernel is compiled without -fomit-frame-pointer. + * (or CONFIG_FRAME_POINTER, this is forced on arm64) + * + * stack layout after mcount_enter in _mcount(): + * + * current sp/fp => 0:+-----+ + * in _mcount() | x29 | -> instrumented function's fp + * +-----+ + * | x30 | -> _mcount()'s lr (= instrumented function's pc) + * old sp => +16:+-----+ + * when instrumented | | + * function calls | ... | + * _mcount() | | + * | | + * instrumented => +xx:+-----+ + * function's fp | x29 | -> parent's fp + * +-----+ + * | x30 | -> instrumented function's lr (= parent's pc) + * +-----+ + * | ... | + */ + + .macro mcount_enter + stp x29, x30, [sp, #-16]! + mov x29, sp + .endm + + .macro mcount_exit + ldp x29, x30, [sp], #16 + ret + .endm + + .macro mcount_adjust_addr rd, rn + sub \rd, \rn, #AARCH64_INSN_SIZE + .endm + + /* for instrumented function's parent */ + .macro mcount_get_parent_fp reg + ldr \reg, [x29] + ldr \reg, [\reg] + .endm + + /* for instrumented function */ + .macro mcount_get_pc0 reg + mcount_adjust_addr \reg, x30 + .endm + + .macro mcount_get_pc reg + ldr \reg, [x29, #8] + mcount_adjust_addr \reg, \reg + .endm + + .macro mcount_get_lr reg + ldr \reg, [x29] + ldr \reg, [\reg, #8] + mcount_adjust_addr \reg, \reg + .endm + + .macro mcount_get_lr_addr reg + ldr \reg, [x29] + add \reg, \reg, #8 + .endm + +/* + * void _mcount(unsigned long return_address) + * @return_address: return address to instrumented function + * + * This function makes calls, if enabled, to: + * - tracer function to probe instrumented function's entry, + * - ftrace_graph_caller to set up an exit hook + */ +ENTRY(_mcount) +#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + ldr x0, =ftrace_trace_stop + ldr x0, [x0] // if ftrace_trace_stop + ret // return; +#endif + mcount_enter + + ldr x0, =ftrace_trace_function + ldr x2, [x0] + adr x0, ftrace_stub + cmp x0, x2 // if (ftrace_trace_function + b.eq skip_ftrace_call // != ftrace_stub) { + + mcount_get_pc x0 // function's pc + mcount_get_lr x1 // function's lr (= parent's pc) + blr x2 // (*ftrace_trace_function)(pc, lr); + +#ifndef CONFIG_FUNCTION_GRAPH_TRACER +skip_ftrace_call: // return; + mcount_exit // } +#else + mcount_exit // return; + // } +skip_ftrace_call: + ldr x1, =ftrace_graph_return + ldr x2, [x1] // if ((ftrace_graph_return + cmp x0, x2 // != ftrace_stub) + b.ne ftrace_graph_caller + + ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry + ldr x2, [x1] // != ftrace_graph_entry_stub)) + ldr x0, =ftrace_graph_entry_stub + cmp x0, x2 + b.ne ftrace_graph_caller // ftrace_graph_caller(); + + mcount_exit +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +ENDPROC(_mcount) + +ENTRY(ftrace_stub) + ret +ENDPROC(ftrace_stub) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * void ftrace_graph_caller(void) + * + * Called from _mcount() or ftrace_caller() when function_graph tracer is + * selected. + * This function w/ prepare_ftrace_return() fakes link register's value on + * the call stack in order to intercept instrumented function's return path + * and run return_to_handler() later on its exit. + */ +ENTRY(ftrace_graph_caller) + mcount_get_lr_addr x0 // pointer to function's saved lr + mcount_get_pc x1 // function's pc + mcount_get_parent_fp x2 // parent's fp + bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp) + + mcount_exit +ENDPROC(ftrace_graph_caller) + +/* + * void return_to_handler(void) + * + * Run ftrace_return_to_handler() before going back to parent. + * @fp is checked against the value passed by ftrace_graph_caller() + * only when CONFIG_FUNCTION_GRAPH_FP_TEST is enabled. + */ +ENTRY(return_to_handler) + str x0, [sp, #-16]! + mov x0, x29 // parent's fp + bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); + mov x30, x0 // restore the original return address + ldr x0, [sp], #16 + ret +END(return_to_handler) +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c new file mode 100644 index 000000000000..a559ab86999b --- /dev/null +++ b/arch/arm64/kernel/ftrace.c @@ -0,0 +1,64 @@ +/* + * arch/arm64/kernel/ftrace.c + * + * Copyright (C) 2013 Linaro Limited + * Author: AKASHI Takahiro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * function_graph tracer expects ftrace_return_to_handler() to be called + * on the way back to parent. For this purpose, this function is called + * in _mcount() or ftrace_caller() to replace return address (*parent) on + * the call stack to return_to_handler. + * + * Note that @frame_pointer is used only for sanity check later. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long)&return_to_handler; + unsigned long old; + struct ftrace_graph_ent trace; + int err; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Note: + * No protection against faulting at *parent, which may be seen + * on other archs. It's unlikely on AArch64. + */ + old = *parent; + *parent = return_hooker; + + trace.func = self_addr; + trace.depth = current->curr_ret_stack + 1; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + *parent = old; + return; + } + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, + frame_pointer); + if (err == -EBUSY) { + *parent = old; + return; + } +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -- cgit v1.2.3 From e4d7f7ff356d08c549db966bd0fcc39578ad7c02 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Wed, 30 Apr 2014 18:54:34 +0900 Subject: arm64: ftrace: Add dynamic ftrace support This patch allows "dynamic ftrace" if CONFIG_DYNAMIC_FTRACE is enabled. Here we can turn on and off tracing dynamically per-function base. On arm64, this is done by patching single branch instruction to _mcount() inserted by gcc -pg option. The branch is replaced to NOP initially at kernel start up, and later on, NOP to branch to ftrace_caller() when enabled or branch to NOP when disabled. Please note that ftrace_caller() is a counterpart of _mcount() in case of 'static' ftrace. More details on architecture specific requirements are described in Documentation/trace/ftrace-design.txt. Acked-by: Will Deacon Signed-off-by: AKASHI Takahiro Signed-off-by: Mark Brown (cherry picked from LSK commit 95d7e5533398803e3ced340771cb3461c9443407) Signed-off-by: Alex Shi Conflicts: arch/arm64/Kconfig --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/ftrace.h | 15 ++++++ arch/arm64/kernel/entry-ftrace.S | 43 +++++++++++++++ arch/arm64/kernel/ftrace.c | 112 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 171 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6b3fef6f8343..9ddb5a6790be 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -34,6 +34,7 @@ config ARM64 select HAVE_DMA_ATTRS select HAVE_DMA_CONTIGUOUS select HAVE_EFFICIENT_UNALIGNED_ACCESS + select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 58ea5951b198..ed5c448ece99 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -18,6 +18,21 @@ #ifndef __ASSEMBLY__ extern void _mcount(unsigned long); + +struct dyn_arch_ftrace { + /* No extra data needed for arm64 */ +}; + +extern unsigned long ftrace_graph_call; + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * addr is the address of the mcount call instruction. + * recordmcount does the necessary offset calculation. + */ + return addr; +} #endif /* __ASSEMBLY__ */ #endif /* __ASM_FTRACE_H */ diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index b2d8c4559cd8..b051871f2965 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -86,6 +86,7 @@ add \reg, \reg, #8 .endm +#ifndef CONFIG_DYNAMIC_FTRACE /* * void _mcount(unsigned long return_address) * @return_address: return address to instrumented function @@ -134,6 +135,48 @@ skip_ftrace_call: #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ ENDPROC(_mcount) +#else /* CONFIG_DYNAMIC_FTRACE */ +/* + * _mcount() is used to build the kernel with -pg option, but all the branch + * instructions to _mcount() are replaced to NOP initially at kernel start up, + * and later on, NOP to branch to ftrace_caller() when enabled or branch to + * NOP when disabled per-function base. + */ +ENTRY(_mcount) + ret +ENDPROC(_mcount) + +/* + * void ftrace_caller(unsigned long return_address) + * @return_address: return address to instrumented function + * + * This function is a counterpart of _mcount() in 'static' ftrace, and + * makes calls to: + * - tracer function to probe instrumented function's entry, + * - ftrace_graph_caller to set up an exit hook + */ +ENTRY(ftrace_caller) + mcount_enter + + mcount_get_pc0 x0 // function's pc + mcount_get_lr x1 // function's lr + + .global ftrace_call +ftrace_call: // tracer(pc, lr); + nop // This will be replaced with "bl xxx" + // where xxx can be any kind of tracer. + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .global ftrace_graph_call +ftrace_graph_call: // ftrace_graph_caller(); + nop // If enabled, this will be replaced + // "b ftrace_graph_caller" +#endif + + mcount_exit +ENDPROC(ftrace_caller) +#endif /* CONFIG_DYNAMIC_FTRACE */ + ENTRY(ftrace_stub) ret ENDPROC(ftrace_stub) diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index a559ab86999b..2e61b956e7f6 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -17,6 +17,87 @@ #include #include +#ifdef CONFIG_DYNAMIC_FTRACE +/* + * Replace a single instruction, which may be a branch or NOP. + * If @validate == true, a replaced instruction is checked against 'old'. + */ +static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, + bool validate) +{ + u32 replaced; + + /* + * Note: + * Due to modules and __init, code can disappear and change, + * we need to protect against faulting as well as code changing. + * We do this by aarch64_insn_*() which use the probe_kernel_*(). + * + * No lock is held here because all the modifications are run + * through stop_machine(). + */ + if (validate) { + if (aarch64_insn_read((void *)pc, &replaced)) + return -EFAULT; + + if (replaced != old) + return -EINVAL; + } + if (aarch64_insn_patch_text_nosync((void *)pc, new)) + return -EPERM; + + return 0; +} + +/* + * Replace tracer function in ftrace_caller() + */ +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long pc; + u32 new; + + pc = (unsigned long)&ftrace_call; + new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true); + + return ftrace_modify_code(pc, 0, new, false); +} + +/* + * Turn on the call to ftrace_caller() in instrumented function + */ +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long pc = rec->ip; + u32 old, new; + + old = aarch64_insn_gen_nop(); + new = aarch64_insn_gen_branch_imm(pc, addr, true); + + return ftrace_modify_code(pc, old, new, true); +} + +/* + * Turn off the call to ftrace_caller() in instrumented function + */ +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + unsigned long pc = rec->ip; + u32 old, new; + + old = aarch64_insn_gen_branch_imm(pc, addr, true); + new = aarch64_insn_gen_nop(); + + return ftrace_modify_code(pc, old, new, true); +} + +int __init ftrace_dyn_arch_init(void *data) +{ + return 0; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * function_graph tracer expects ftrace_return_to_handler() to be called @@ -61,4 +142,35 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, return; } } + +#ifdef CONFIG_DYNAMIC_FTRACE +/* + * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() + * depending on @enable. + */ +static int ftrace_modify_graph_caller(bool enable) +{ + unsigned long pc = (unsigned long)&ftrace_graph_call; + u32 branch, nop; + + branch = aarch64_insn_gen_branch_imm(pc, + (unsigned long)ftrace_graph_caller, false); + nop = aarch64_insn_gen_nop(); + + if (enable) + return ftrace_modify_code(pc, nop, branch, true); + else + return ftrace_modify_code(pc, branch, nop, true); +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(false); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -- cgit v1.2.3 From f3350a7ce92c1538e57c96c9a2bc2a842d899cd8 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Wed, 30 Apr 2014 18:54:35 +0900 Subject: arm64: ftrace: Add CALLER_ADDRx macros CALLER_ADDRx returns caller's address at specified level in call stacks. They are used for several tracers like irqsoff and preemptoff. Strange to say, however, they are refered even without FTRACE. Signed-off-by: AKASHI Takahiro Signed-off-by: Mark Brown (cherry picked from LSK commit ea76465b82b819230ab16ab8ab1b460c09ed5b3b) Signed-off-by: Alex Shi Conflicts: arch/arm64/kernel/Makefile --- arch/arm64/include/asm/ftrace.h | 5 +++- arch/arm64/kernel/Makefile | 3 ++- arch/arm64/kernel/return_address.c | 55 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/kernel/return_address.c diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index ed5c448ece99..41e8670db20f 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -18,6 +18,7 @@ #ifndef __ASSEMBLY__ extern void _mcount(unsigned long); +extern void *return_address(unsigned int); struct dyn_arch_ftrace { /* No extra data needed for arm64 */ @@ -33,6 +34,8 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) */ return addr; } -#endif /* __ASSEMBLY__ */ + +#define ftrace_return_address(n) return_address(n) +#endif /* ifndef __ASSEMBLY__ */ #endif /* __ASM_FTRACE_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index ac67fd0e9af1..b5bfa7fa80d5 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -7,12 +7,13 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_insn.o = -pg +CFLAGS_REMOVE_return_address.o = -pg # Object file lists. arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o cpu_ops.o insn.o + hyp-stub.o psci.o cpu_ops.o insn.o return_address.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c new file mode 100644 index 000000000000..89102a6ffad5 --- /dev/null +++ b/arch/arm64/kernel/return_address.c @@ -0,0 +1,55 @@ +/* + * arch/arm64/kernel/return_address.c + * + * Copyright (C) 2013 Linaro Limited + * Author: AKASHI Takahiro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#include + +struct return_address_data { + unsigned int level; + void *addr; +}; + +static int save_return_addr(struct stackframe *frame, void *d) +{ + struct return_address_data *data = d; + + if (!data->level) { + data->addr = (void *)frame->pc; + return 1; + } else { + --data->level; + return 0; + } +} + +void *return_address(unsigned int level) +{ + struct return_address_data data; + struct stackframe frame; + register unsigned long current_sp asm ("sp"); + + data.level = level + 2; + data.addr = NULL; + + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.sp = current_sp; + frame.pc = (unsigned long)return_address; /* dummy */ + + walk_stackframe(&frame, save_return_addr, &data); + + if (!data.level) + return data.addr; + else + return NULL; +} +EXPORT_SYMBOL_GPL(return_address); -- cgit v1.2.3 From 302268d39b8109e3790b3a45b06e67bf46bcc744 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Wed, 30 Apr 2014 18:54:36 +0900 Subject: arm64: ftrace: Add system call tracepoint This patch allows system call entry or exit to be traced as ftrace events, ie. sys_enter_*/sys_exit_*, if CONFIG_FTRACE_SYSCALLS is enabled. Those events appear and can be controlled under ${sysfs}/tracing/events/syscalls/ Please note that we can't trace compat system calls here because AArch32 mode does not share the same syscall table with AArch64. Just define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS in order to avoid unexpected results (bogus syscalls reported or even hang-up). Acked-by: Will Deacon Signed-off-by: AKASHI Takahiro Signed-off-by: Mark Brown (cherry picked from LSK commit 1bda9b270a49edbe30c77f1ba160e6535a568d60) Signed-off-by: Alex Shi --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/ftrace.h | 18 ++++++++++++++++++ arch/arm64/include/asm/syscall.h | 1 + arch/arm64/include/asm/unistd.h | 2 ++ arch/arm64/kernel/ptrace.c | 9 +++++++++ 5 files changed, 31 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9ddb5a6790be..4ce0836e5c70 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -43,6 +43,7 @@ config ARM64 select HAVE_MEMBLOCK select HAVE_PATA_PLATFORM select HAVE_PERF_EVENTS + select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA select NO_BOOTMEM diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 41e8670db20f..c5534facf941 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -17,6 +17,8 @@ #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE #ifndef __ASSEMBLY__ +#include + extern void _mcount(unsigned long); extern void *return_address(unsigned int); @@ -36,6 +38,22 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) } #define ftrace_return_address(n) return_address(n) + +/* + * Because AArch32 mode does not share the same syscall table with AArch64, + * tracing compat syscalls may result in reporting bogus syscalls or even + * hang-up, so just do not trace them. + * See kernel/trace/trace_syscalls.c + * + * x86 code says: + * If the user realy wants these, then they should use the + * raw syscall tracepoints with filtering. + */ +#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS +static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) +{ + return is_compat_task(); +} #endif /* ifndef __ASSEMBLY__ */ #endif /* __ASM_FTRACE_H */ diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 70ba9d4ee978..383771eb0b87 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -18,6 +18,7 @@ #include +extern const void *sys_call_table[]; static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 82ce217e94cf..c335479c2638 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -28,3 +28,5 @@ #endif #define __ARCH_WANT_SYS_CLONE #include + +#define NR_syscalls (__NR_syscalls) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 4b58e812cf67..0bf195533088 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -42,6 +42,9 @@ #include #include +#define CREATE_TRACE_POINTS +#include + /* * TODO: does not yet catch signals sent when the child dies. * in exit.c or in signal.c. @@ -1091,11 +1094,17 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_enter(regs, regs->syscallno); + return regs->syscallno; } asmlinkage void syscall_trace_exit(struct pt_regs *regs) { + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_exit(regs, regs_return_value(regs)); + if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); } -- cgit v1.2.3 From f8c22758249bcb8f144aec27414a1a2f17ea75e0 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 29 May 2014 16:02:15 +0100 Subject: arm64: Add __ASSEMBLY__ guards to insn.h Signed-off-by: Mark Brown (cherry picked from LSK commit d10fbc12d0c4c6e872de2a342b88b896e430cea3) Signed-off-by: Alex Shi --- arch/arm64/include/asm/insn.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index c44ad39ed310..62e7b8bcd2dc 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -16,11 +16,14 @@ */ #ifndef __ASM_INSN_H #define __ASM_INSN_H + #include /* A64 instructions are always 32 bits. */ #define AARCH64_INSN_SIZE 4 +#ifndef __ASSEMBLY__ + /* * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a * Section C3.1 "A64 instruction index by encoding": @@ -105,4 +108,6 @@ int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); +#endif /* __ASSEMBLY__ */ + #endif /* __ASM_INSN_H */ -- cgit v1.2.3 From 694fe636bb23b9fe02e5ff3143ae46607c6a46af Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Fri, 30 May 2014 11:23:41 +0900 Subject: arm64: ftrace: (bugfix) synced with ftcace interface change ftrace_init() failed since ftrace_dyn_arch_init() doesn't initialize the argument to null. This bug comes in only if arm64 ftrace is back-ported as ftrace_dyn_arch_init() interface has been changed in the merge window of 3.15. Signed-off-by: AKASHI Takahiro Signed-off-by: Mark Brown (cherry picked from LSK commit 0cc5286fc3ca12ec0d388e4d8c08a66b40d52233) Signed-off-by: Alex Shi --- arch/arm64/kernel/ftrace.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 2e61b956e7f6..649890a3ac4e 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -94,6 +94,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, int __init ftrace_dyn_arch_init(void *data) { + *(unsigned long *)data = 0; return 0; } #endif /* CONFIG_DYNAMIC_FTRACE */ -- cgit v1.2.3