From af085d9084b48530153f51e6cad19fd0b1a13ed7 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:28 -0600 Subject: stacktrace/x86: add function for detecting reliable stack traces For live patching and possibly other use cases, a stack trace is only useful if it can be assured that it's completely reliable. Add a new save_stack_trace_tsk_reliable() function to achieve that. Note that if the target task isn't the current task, and the target task is allowed to run, then it could be writing the stack while the unwinder is reading it, resulting in possible corruption. So the caller of save_stack_trace_tsk_reliable() must ensure that the task is either 'current' or inactive. save_stack_trace_tsk_reliable() relies on the x86 unwinder's detection of pt_regs on the stack. If the pt_regs are not user-mode registers from a syscall, then they indicate an in-kernel interrupt or exception (e.g. preemption or a page fault), in which case the stack is considered unreliable due to the nature of frame pointers. It also relies on the x86 unwinder's detection of other issues, such as: - corrupted stack data - stack grows the wrong way - stack walk doesn't reach the bottom - user didn't provide a large enough entries array Such issues are reported by checking unwind_error() and !unwind_done(). Also add CONFIG_HAVE_RELIABLE_STACKTRACE so arch-independent code can determine at build time whether the function is implemented. Signed-off-by: Josh Poimboeuf Reviewed-by: Miroslav Benes Acked-by: Ingo Molnar # for the x86 changes Signed-off-by: Jiri Kosina --- include/linux/stacktrace.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 0a34489a46b6..4205f71a5f0e 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -18,6 +18,8 @@ extern void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace); extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); +extern int save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace); extern void print_stack_trace(struct stack_trace *trace, int spaces); extern int snprint_stack_trace(char *buf, size_t size, @@ -29,12 +31,13 @@ extern void save_stack_trace_user(struct stack_trace *trace); # define save_stack_trace_user(trace) do { } while (0) #endif -#else +#else /* !CONFIG_STACKTRACE */ # define save_stack_trace(trace) do { } while (0) # define save_stack_trace_tsk(tsk, trace) do { } while (0) # define save_stack_trace_user(trace) do { } while (0) # define print_stack_trace(trace, spaces) do { } while (0) # define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) -#endif +# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; }) +#endif /* CONFIG_STACKTRACE */ -#endif +#endif /* __LINUX_STACKTRACE_H */ -- cgit v1.2.3 From 46c5a0113f843be5c55b1c40dd486538891156d4 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:30 -0600 Subject: livepatch: create temporary klp_update_patch_state() stub Create temporary stubs for klp_update_patch_state() so we can add TIF_PATCH_PENDING to different architectures in separate patches without breaking build bisectability. Signed-off-by: Josh Poimboeuf Reviewed-by: Petr Mladek Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9072f04db616..5cc20e588a22 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -123,10 +123,13 @@ void arch_klp_init_object_loaded(struct klp_patch *patch, int klp_module_coming(struct module *mod); void klp_module_going(struct module *mod); +void klp_update_patch_state(struct task_struct *task); + #else /* !CONFIG_LIVEPATCH */ static inline int klp_module_coming(struct module *mod) { return 0; } -static inline void klp_module_going(struct module *mod) { } +static inline void klp_module_going(struct module *mod) {} +static inline void klp_update_patch_state(struct task_struct *task) {} #endif /* CONFIG_LIVEPATCH */ -- cgit v1.2.3 From 0dade9f374f1c15f9b43ab01ab75a3b459bba5f6 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:35 -0600 Subject: livepatch: separate enabled and patched states Once we have a consistency model, patches and their objects will be enabled and disabled at different times. For example, when a patch is disabled, its loaded objects' funcs can remain registered with ftrace indefinitely until the unpatching operation is complete and they're no longer in use. It's less confusing if we give them different names: patches can be enabled or disabled; objects (and their funcs) can be patched or unpatched: - Enabled means that a patch is logically enabled (but not necessarily fully applied). - Patched means that an object's funcs are registered with ftrace and added to the klp_ops func stack. Also, since these states are binary, represent them with booleans instead of ints. Signed-off-by: Josh Poimboeuf Acked-by: Miroslav Benes Reviewed-by: Petr Mladek Reviewed-by: Kamalesh Babulal Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 5cc20e588a22..9787a63b57ac 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -28,11 +28,6 @@ #include -enum klp_state { - KLP_DISABLED, - KLP_ENABLED -}; - /** * struct klp_func - function structure for live patching * @old_name: name of the function to be patched @@ -41,8 +36,8 @@ enum klp_state { * can be found (optional) * @old_addr: the address of the function being patched * @kobj: kobject for sysfs resources - * @state: tracks function-level patch application state * @stack_node: list node for klp_ops func_stack list + * @patched: the func has been added to the klp_ops list */ struct klp_func { /* external */ @@ -60,8 +55,8 @@ struct klp_func { /* internal */ unsigned long old_addr; struct kobject kobj; - enum klp_state state; struct list_head stack_node; + bool patched; }; /** @@ -71,7 +66,7 @@ struct klp_func { * @kobj: kobject for sysfs resources * @mod: kernel module associated with the patched object * (NULL for vmlinux) - * @state: tracks object-level patch application state + * @patched: the object's funcs have been added to the klp_ops list */ struct klp_object { /* external */ @@ -81,7 +76,7 @@ struct klp_object { /* internal */ struct kobject kobj; struct module *mod; - enum klp_state state; + bool patched; }; /** @@ -90,7 +85,7 @@ struct klp_object { * @objs: object entries for kernel objects to be patched * @list: list node for global list of registered patches * @kobj: kobject for sysfs resources - * @state: tracks patch-level application state + * @enabled: the patch is enabled (but operation may be incomplete) */ struct klp_patch { /* external */ @@ -100,7 +95,7 @@ struct klp_patch { /* internal */ struct list_head list; struct kobject kobj; - enum klp_state state; + bool enabled; }; #define klp_for_each_object(patch, obj) \ -- cgit v1.2.3 From f5e547f4ac785c65a39211f0b8e4ffc4fe09112d Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:39 -0600 Subject: livepatch: store function sizes For the consistency model we'll need to know the sizes of the old and new functions to determine if they're on the stacks of any tasks. Signed-off-by: Josh Poimboeuf Acked-by: Miroslav Benes Reviewed-by: Petr Mladek Reviewed-by: Kamalesh Babulal Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9787a63b57ac..6602b34bed2b 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -37,6 +37,8 @@ * @old_addr: the address of the function being patched * @kobj: kobject for sysfs resources * @stack_node: list node for klp_ops func_stack list + * @old_size: size of the old function + * @new_size: size of the new function * @patched: the func has been added to the klp_ops list */ struct klp_func { @@ -56,6 +58,7 @@ struct klp_func { unsigned long old_addr; struct kobject kobj; struct list_head stack_node; + unsigned long old_size, new_size; bool patched; }; -- cgit v1.2.3 From d83a7cb375eec21f04c83542395d08b2f6641da2 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:40 -0600 Subject: livepatch: change to a per-task consistency model Change livepatch to use a basic per-task consistency model. This is the foundation which will eventually enable us to patch those ~10% of security patches which change function or data semantics. This is the biggest remaining piece needed to make livepatch more generally useful. This code stems from the design proposal made by Vojtech [1] in November 2014. It's a hybrid of kGraft and kpatch: it uses kGraft's per-task consistency and syscall barrier switching combined with kpatch's stack trace switching. There are also a number of fallback options which make it quite flexible. Patches are applied on a per-task basis, when the task is deemed safe to switch over. When a patch is enabled, livepatch enters into a transition state where tasks are converging to the patched state. Usually this transition state can complete in a few seconds. The same sequence occurs when a patch is disabled, except the tasks converge from the patched state to the unpatched state. An interrupt handler inherits the patched state of the task it interrupts. The same is true for forked tasks: the child inherits the patched state of the parent. Livepatch uses several complementary approaches to determine when it's safe to patch tasks: 1. The first and most effective approach is stack checking of sleeping tasks. If no affected functions are on the stack of a given task, the task is patched. In most cases this will patch most or all of the tasks on the first try. Otherwise it'll keep trying periodically. This option is only available if the architecture has reliable stacks (HAVE_RELIABLE_STACKTRACE). 2. The second approach, if needed, is kernel exit switching. A task is switched when it returns to user space from a system call, a user space IRQ, or a signal. It's useful in the following cases: a) Patching I/O-bound user tasks which are sleeping on an affected function. In this case you have to send SIGSTOP and SIGCONT to force it to exit the kernel and be patched. b) Patching CPU-bound user tasks. If the task is highly CPU-bound then it will get patched the next time it gets interrupted by an IRQ. c) In the future it could be useful for applying patches for architectures which don't yet have HAVE_RELIABLE_STACKTRACE. In this case you would have to signal most of the tasks on the system. However this isn't supported yet because there's currently no way to patch kthreads without HAVE_RELIABLE_STACKTRACE. 3. For idle "swapper" tasks, since they don't ever exit the kernel, they instead have a klp_update_patch_state() call in the idle loop which allows them to be patched before the CPU enters the idle state. (Note there's not yet such an approach for kthreads.) All the above approaches may be skipped by setting the 'immediate' flag in the 'klp_patch' struct, which will disable per-task consistency and patch all tasks immediately. This can be useful if the patch doesn't change any function or data semantics. Note that, even with this flag set, it's possible that some tasks may still be running with an old version of the function, until that function returns. There's also an 'immediate' flag in the 'klp_func' struct which allows you to specify that certain functions in the patch can be applied without per-task consistency. This might be useful if you want to patch a common function like schedule(), and the function change doesn't need consistency but the rest of the patch does. For architectures which don't have HAVE_RELIABLE_STACKTRACE, the user must set patch->immediate which causes all tasks to be patched immediately. This option should be used with care, only when the patch doesn't change any function or data semantics. In the future, architectures which don't have HAVE_RELIABLE_STACKTRACE may be allowed to use per-task consistency if we can come up with another way to patch kthreads. The /sys/kernel/livepatch//transition file shows whether a patch is in transition. Only a single patch (the topmost patch on the stack) can be in transition at a given time. A patch can remain in transition indefinitely, if any of the tasks are stuck in the initial patch state. A transition can be reversed and effectively canceled by writing the opposite value to the /sys/kernel/livepatch//enabled file while the transition is in progress. Then all the tasks will attempt to converge back to the original patch state. [1] https://lkml.kernel.org/r/20141107140458.GA21774@suse.cz Signed-off-by: Josh Poimboeuf Acked-by: Miroslav Benes Acked-by: Ingo Molnar # for the scheduler changes Signed-off-by: Jiri Kosina --- include/linux/init_task.h | 9 +++++++++ include/linux/livepatch.h | 42 +++++++++++++++++++++++++++++++++++++++++- include/linux/sched.h | 3 +++ 3 files changed, 53 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 91d9049f0039..5a791055b176 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -202,6 +203,13 @@ extern struct cred init_cred; # define INIT_KASAN(tsk) #endif +#ifdef CONFIG_LIVEPATCH +# define INIT_LIVEPATCH(tsk) \ + .patch_state = KLP_UNDEFINED, +#else +# define INIT_LIVEPATCH(tsk) +#endif + #ifdef CONFIG_THREAD_INFO_IN_TASK # define INIT_TASK_TI(tsk) \ .thread_info = INIT_THREAD_INFO(tsk), \ @@ -288,6 +296,7 @@ extern struct cred init_cred; INIT_VTIME(tsk) \ INIT_NUMA_BALANCING(tsk) \ INIT_KASAN(tsk) \ + INIT_LIVEPATCH(tsk) \ } diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 6602b34bed2b..ed90ad1605c1 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -28,18 +28,40 @@ #include +/* task patch states */ +#define KLP_UNDEFINED -1 +#define KLP_UNPATCHED 0 +#define KLP_PATCHED 1 + /** * struct klp_func - function structure for live patching * @old_name: name of the function to be patched * @new_func: pointer to the patched function code * @old_sympos: a hint indicating which symbol position the old function * can be found (optional) + * @immediate: patch the func immediately, bypassing safety mechanisms * @old_addr: the address of the function being patched * @kobj: kobject for sysfs resources * @stack_node: list node for klp_ops func_stack list * @old_size: size of the old function * @new_size: size of the new function * @patched: the func has been added to the klp_ops list + * @transition: the func is currently being applied or reverted + * + * The patched and transition variables define the func's patching state. When + * patching, a func is always in one of the following states: + * + * patched=0 transition=0: unpatched + * patched=0 transition=1: unpatched, temporary starting state + * patched=1 transition=1: patched, may be visible to some tasks + * patched=1 transition=0: patched, visible to all tasks + * + * And when unpatching, it goes in the reverse order: + * + * patched=1 transition=0: patched, visible to all tasks + * patched=1 transition=1: patched, may be visible to some tasks + * patched=0 transition=1: unpatched, temporary ending state + * patched=0 transition=0: unpatched */ struct klp_func { /* external */ @@ -53,6 +75,7 @@ struct klp_func { * in kallsyms for the given object is used. */ unsigned long old_sympos; + bool immediate; /* internal */ unsigned long old_addr; @@ -60,6 +83,7 @@ struct klp_func { struct list_head stack_node; unsigned long old_size, new_size; bool patched; + bool transition; }; /** @@ -68,7 +92,7 @@ struct klp_func { * @funcs: function entries for functions to be patched in the object * @kobj: kobject for sysfs resources * @mod: kernel module associated with the patched object - * (NULL for vmlinux) + * (NULL for vmlinux) * @patched: the object's funcs have been added to the klp_ops list */ struct klp_object { @@ -86,6 +110,7 @@ struct klp_object { * struct klp_patch - patch structure for live patching * @mod: reference to the live patch module * @objs: object entries for kernel objects to be patched + * @immediate: patch all funcs immediately, bypassing safety mechanisms * @list: list node for global list of registered patches * @kobj: kobject for sysfs resources * @enabled: the patch is enabled (but operation may be incomplete) @@ -94,6 +119,7 @@ struct klp_patch { /* external */ struct module *mod; struct klp_object *objs; + bool immediate; /* internal */ struct list_head list; @@ -121,13 +147,27 @@ void arch_klp_init_object_loaded(struct klp_patch *patch, int klp_module_coming(struct module *mod); void klp_module_going(struct module *mod); +void klp_copy_process(struct task_struct *child); void klp_update_patch_state(struct task_struct *task); +static inline bool klp_patch_pending(struct task_struct *task) +{ + return test_tsk_thread_flag(task, TIF_PATCH_PENDING); +} + +static inline bool klp_have_reliable_stack(void) +{ + return IS_ENABLED(CONFIG_STACKTRACE) && + IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); +} + #else /* !CONFIG_LIVEPATCH */ static inline int klp_module_coming(struct module *mod) { return 0; } static inline void klp_module_going(struct module *mod) {} +static inline bool klp_patch_pending(struct task_struct *task) { return false; } static inline void klp_update_patch_state(struct task_struct *task) {} +static inline void klp_copy_process(struct task_struct *child) {} #endif /* CONFIG_LIVEPATCH */ diff --git a/include/linux/sched.h b/include/linux/sched.h index d67eee84fd43..e11032010318 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1037,6 +1037,9 @@ struct task_struct { #ifdef CONFIG_THREAD_INFO_IN_TASK /* A live task holds one reference: */ atomic_t stack_refcount; +#endif +#ifdef CONFIG_LIVEPATCH + int patch_state; #endif /* CPU-specific state of this task: */ struct thread_struct thread; -- cgit v1.2.3 From 3ec24776bfd09668079df7dca0c0136d80820ab4 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 6 Mar 2017 11:20:29 -0600 Subject: livepatch: allow removal of a disabled patch Currently we do not allow patch module to unload since there is no method to determine if a task is still running in the patched code. The consistency model gives us the way because when the unpatching finishes we know that all tasks were marked as safe to call an original function. Thus every new call to the function calls the original code and at the same time no task can be somewhere in the patched code, because it had to leave that code to be marked as safe. We can safely let the patch module go after that. Completion is used for synchronization between module removal and sysfs infrastructure in a similar way to commit 942e443127e9 ("module: Fix mod->mkobj.kobj potentially freed too early"). Note that we still do not allow the removal for immediate model, that is no consistency model. The module refcount may increase in this case if somebody disables and enables the patch several times. This should not cause any harm. With this change a call to try_module_get() is moved to __klp_enable_patch from klp_register_patch to make module reference counting symmetric (module_put() is in a patch disable path) and to allow to take a new reference to a disabled module when being enabled. Finally, we need to be very careful about possible races between klp_unregister_patch(), kobject_put() functions and operations on the related sysfs files. kobject_put(&patch->kobj) must be called without klp_mutex. Otherwise, it might be blocked by enabled_store() that needs the mutex as well. In addition, enabled_store() must check if the patch was not unregisted in the meantime. There is no need to do the same for other kobject_put() callsites at the moment. Their sysfs operations neither take the lock nor they access any data that might be freed in the meantime. There was an attempt to use kobjects the right way and prevent these races by design. But it made the patch definition more complicated and opened another can of worms. See https://lkml.kernel.org/r/1464018848-4303-1-git-send-email-pmladek@suse.com [Thanks to Petr Mladek for improving the commit message.] Signed-off-by: Miroslav Benes Signed-off-by: Josh Poimboeuf Reviewed-by: Petr Mladek Acked-by: Miroslav Benes Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index ed90ad1605c1..194991ef9347 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -23,6 +23,7 @@ #include #include +#include #if IS_ENABLED(CONFIG_LIVEPATCH) @@ -114,6 +115,7 @@ struct klp_object { * @list: list node for global list of registered patches * @kobj: kobject for sysfs resources * @enabled: the patch is enabled (but operation may be incomplete) + * @finish: for waiting till it is safe to remove the patch module */ struct klp_patch { /* external */ @@ -125,6 +127,7 @@ struct klp_patch { struct list_head list; struct kobject kobj; bool enabled; + struct completion finish; }; #define klp_for_each_object(patch, obj) \ -- cgit v1.2.3