aboutsummaryrefslogtreecommitdiff
path: root/kernel/context_tracking.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-06-20 08:18:35 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2013-06-20 08:18:35 -1000
commita3d5c3460a86f52ea435b3fb98be112bd18faabc (patch)
tree48a32968b569af0e0f0af1def3effa0770710fea /kernel/context_tracking.c
parent86c76676cfdbf283f6131d5a2783bed3f3d490ea (diff)
parent29bb9e5a75684106a37593ad75ec75ff8312731b (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Two smaller fixes - plus a context tracking tracing fix that is a bit bigger" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: tracing/context-tracking: Add preempt_schedule_context() for tracing sched: Fix clear NOHZ_BALANCE_KICK sched/x86: Construct all sibling maps if smt
Diffstat (limited to 'kernel/context_tracking.c')
-rw-r--r--kernel/context_tracking.c40
1 files changed, 40 insertions, 0 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 85bdde1137e..383f8231e43 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -70,6 +70,46 @@ void user_enter(void)
local_irq_restore(flags);
}
+#ifdef CONFIG_PREEMPT
+/**
+ * preempt_schedule_context - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+void __sched notrace preempt_schedule_context(void)
+{
+ struct thread_info *ti = current_thread_info();
+ enum ctx_state prev_ctx;
+
+ if (likely(ti->preempt_count || irqs_disabled()))
+ return;
+
+ /*
+ * Need to disable preemption in case user_exit() is traced
+ * and the tracer calls preempt_enable_notrace() causing
+ * an infinite recursion.
+ */
+ preempt_disable_notrace();
+ prev_ctx = exception_enter();
+ preempt_enable_no_resched_notrace();
+
+ preempt_schedule();
+
+ preempt_disable_notrace();
+ exception_exit(prev_ctx);
+ preempt_enable_notrace();
+}
+EXPORT_SYMBOL_GPL(preempt_schedule_context);
+#endif /* CONFIG_PREEMPT */
/**
* user_exit - Inform the context tracking that the CPU is