aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-06-07 18:46:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-06-07 18:46:51 -0700
commit14d0ee051753f30e23033bdb153110f00b03c8ce (patch)
tree88a19ee630628e8904b96b586521d3f3755c14b8 /kernel
parentea7f665612fcc73da6b7698f468cd5fc03a30d47 (diff)
parentf17a5194859a82afe4164e938b92035b86c55794 (diff)
Merge tag 'trace-fixes-v3.10-rc3-v3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt: "This contains 4 fixes. The first two fix the case where full RCU debugging is enabled, enabling function tracing causes a live lock of the system. This is due to the added debug checks in rcu_dereference_raw() that is used by the function tracer. These checks are also traced by the function tracer as well as cause enough overhead to the function tracer to slow down the system enough that the time to finish an interrupt can take longer than when the next interrupt is triggered, causing a live lock from the timer interrupt. Talking this over with Paul McKenney, we came up with a fix that adds a new rcu_dereference_raw_notrace() that does not perform these added checks, and let the function tracer use that. The third commit fixes a failed compile when branch tracing is enabled, due to the conversion of the trace_test_buffer() selftest that the branch trace wasn't converted for. The forth patch fixes a bug caught by the RCU lockdep code where a rcu_read_lock() is performed when rcu is disabled (either going to or from idle, or user space). This happened on the irqsoff tracer as it calls task_uid(). The fix here was to use current_uid() when possible that doesn't use rcu locking. Which luckily, is always used when irqsoff calls this code." * tag 'trace-fixes-v3.10-rc3-v3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Use current_uid() for critical time tracing tracing: Fix bad parameter passed in branch selftest ftrace: Use the rcu _notrace variants for rcu_dereference_raw() and friends rcu: Add _notrace variation of rcu_dereference_raw() and hlist_for_each_entry_rcu()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ftrace.c18
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_selftest.c2
3 files changed, 19 insertions, 11 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b549b0f5b97..6c508ff33c6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
/*
* Traverse the ftrace_global_list, invoking all entries. The reason that we
- * can use rcu_dereference_raw() is that elements removed from this list
+ * can use rcu_dereference_raw_notrace() is that elements removed from this list
* are simply leaked, so there is no need to interact with a grace-period
- * mechanism. The rcu_dereference_raw() calls are needed to handle
+ * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
* concurrent insertions into the ftrace_global_list.
*
* Silly Alpha and silly pointer-speculation compiler optimizations!
*/
#define do_for_each_ftrace_op(op, list) \
- op = rcu_dereference_raw(list); \
+ op = rcu_dereference_raw_notrace(list); \
do
/*
* Optimized for just a single item in the list (as that is the normal case).
*/
#define while_for_each_ftrace_op(op) \
- while (likely(op = rcu_dereference_raw((op)->next)) && \
+ while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
unlikely((op) != &ftrace_list_end))
static inline void ftrace_ops_init(struct ftrace_ops *ops)
@@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
if (hlist_empty(hhd))
return NULL;
- hlist_for_each_entry_rcu(rec, hhd, node) {
+ hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
if (rec->ip == ip)
return rec;
}
@@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
hhd = &hash->buckets[key];
- hlist_for_each_entry_rcu(entry, hhd, hlist) {
+ hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
if (entry->ip == ip)
return entry;
}
@@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
struct ftrace_hash *notrace_hash;
int ret;
- filter_hash = rcu_dereference_raw(ops->filter_hash);
- notrace_hash = rcu_dereference_raw(ops->notrace_hash);
+ filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
+ notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
if ((ftrace_hash_empty(filter_hash) ||
ftrace_lookup_ip(filter_hash, ip)) &&
@@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
* on the hash. rcu_read_lock is too dangerous here.
*/
preempt_disable_notrace();
- hlist_for_each_entry_rcu(entry, hhd, node) {
+ hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
if (entry->ip == ip)
entry->ops->func(ip, parent_ip, &entry->data);
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4d79485b323..1a41023a1f8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -843,7 +843,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
max_data->pid = tsk->pid;
- max_data->uid = task_uid(tsk);
+ /*
+ * If tsk == current, then use current_uid(), as that does not use
+ * RCU. The irq tracer can be called out of RCU scope.
+ */
+ if (tsk == current)
+ max_data->uid = current_uid();
+ else
+ max_data->uid = task_uid(tsk);
+
max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
max_data->policy = tsk->policy;
max_data->rt_priority = tsk->rt_priority;
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 55e2cf66967..2901e3b8859 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1159,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check the trace buffer */
- ret = trace_test_buffer(tr, &count);
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
trace->reset(tr);
tracing_start();