diff options
author | Masami Hiramatsu <mhiramat@kernel.org> | 2019-12-02 16:32:13 +0900 |
---|---|---|
committer | Anders Roxell <anders.roxell@linaro.org> | 2019-12-02 11:21:14 +0100 |
commit | 85c01910621c61e1e50b1d9e7dc46b2a7374b754 (patch) | |
tree | 6a4b41bb230409b0e3017d410eb575654cd251e2 | |
parent | 1cf4d5a903a124e4e2ef205aaf20fb044f54429a (diff) |
kprobes: Lock rcu_read_lock() while searching kprobefrag-next-20191202
Anders reported that the lockdep warns that suspicious
RCU list usage in register_kprobe() (detected by
CONFIG_PROVE_RCU_LIST.) This is because get_kprobe()
access kprobe_table[] by hlist_for_each_entry_rcu()
without rcu_read_lock.
If we call get_kprobe() from the breakpoint handler context,
it is run with preempt disabled, so this is not a problem.
But in other cases, instead of rcu_read_lock(), we locks
kprobe_mutex so that the kprobe_table[] is not updated.
So, current code is safe, but still not good from the view
point of RCU.
Let's lock the rcu_read_lock() around get_kprobe() and
ensure kprobe_mutex is locked at those points.
Note that we can safely unlock rcu_read_lock() soon after
accessing the list, because we are sure the found kprobe has
never gone before unlocking kprobe_mutex. Unless locking
kprobe_mutex, caller must hold rcu_read_lock() until it
finished operations on that kprobe.
Reported-by: Anders Roxell <anders.roxell@linaro.org>
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
-rw-r--r-- | kernel/kprobes.c | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 5a1fa9d346e8..11f831d473d6 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -319,6 +319,7 @@ static inline void reset_kprobe_instance(void) * - under the kprobe_mutex - during kprobe_[un]register() * OR * - with preemption disabled - from arch/xxx/kernel/kprobes.c + * In both cases, caller must disable preempt (or acquire rcu_read_lock) */ struct kprobe *get_kprobe(void *addr) { @@ -435,6 +436,7 @@ static int kprobe_queued(struct kprobe *p) /* * Return an optimized kprobe whose optimizing code replaces * instructions including addr (exclude breakpoint). + * This must be called with locking kprobe_mutex. */ static struct kprobe *get_optimized_kprobe(unsigned long addr) { @@ -442,9 +444,12 @@ static struct kprobe *get_optimized_kprobe(unsigned long addr) struct kprobe *p = NULL; struct optimized_kprobe *op; + lockdep_assert_held(&kprobe_mutex); + rcu_read_lock(); /* Don't check i == 0, since that is a breakpoint case. */ for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) p = get_kprobe((void *)(addr - i)); + rcu_read_unlock(); /* We are safe because kprobe_mutex is held */ if (p && kprobe_optready(p)) { op = container_of(p, struct optimized_kprobe, kp); @@ -1478,18 +1483,21 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p) { struct kprobe *ap, *list_p; + lockdep_assert_held(&kprobe_mutex); + rcu_read_lock(); ap = get_kprobe(p->addr); if (unlikely(!ap)) - return NULL; + goto out; if (p != ap) { list_for_each_entry_rcu(list_p, &ap->list, list) if (list_p == p) /* kprobe p is a valid probe */ - goto valid; - return NULL; + goto out; + ap = NULL; } -valid: +out: + rcu_read_unlock(); /* We are safe because kprobe_mutex is held */ return ap; } @@ -1602,7 +1610,9 @@ int register_kprobe(struct kprobe *p) mutex_lock(&kprobe_mutex); + rcu_read_lock(); old_p = get_kprobe(p->addr); + rcu_read_unlock(); /* We are safe because kprobe_mutex is held */ if (old_p) { /* Since this may unoptimize old_p, locking text_mutex. */ ret = register_aggr_kprobe(old_p, p); |