aboutsummaryrefslogtreecommitdiff
path: root/include/linux/preempt_mask.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-10-04 14:20:47 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2015-01-01 00:00:00 +0100
commit0c27d50930361940c82214a3c85f3c067cfa43f1 (patch)
treee930f0d2f81013f4aed960b36e12769657101366 /include/linux/preempt_mask.h
parentdacad4975c4149867edecd8ff013d83f0d0d7239 (diff)
softirq: Split softirq locks
The 3.x RT series removed the split softirq implementation in favour of pushing softirq processing into the context of the thread which raised it. Though this prevents us from handling the various softirqs at different priorities. Now instead of reintroducing the split softirq threads we split the locks which serialize the softirq processing. If a softirq is raised in context of a thread, then the softirq is noted on a per thread field, if the thread is in a bh disabled region. If the softirq is raised from hard interrupt context, then the bit is set in the flag field of ksoftirqd and ksoftirqd is invoked. When a thread leaves a bh disabled region, then it tries to execute the softirqs which have been raised in its own context. It acquires the per softirq / per cpu lock for the softirq and then checks, whether the softirq is still pending in the per cpu local_softirq_pending() field. If yes, it runs the softirq. If no, then some other task executed it already. This allows for zero config softirq elevation in the context of user space tasks or interrupt threads. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/linux/preempt_mask.h')
-rw-r--r--include/linux/preempt_mask.h15
1 files changed, 12 insertions, 3 deletions
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
index dbeec4d4a3be..c7e373dc7314 100644
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -44,16 +44,26 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+#else
+# define SOFTIRQ_DISABLE_OFFSET (0)
+#endif
#define PREEMPT_ACTIVE_BITS 1
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+#else
+# define softirq_count() (0UL)
+extern int in_serving_softirq(void);
+#endif
/*
* Are we doing bottom half or hardware interrupt processing?
@@ -64,7 +74,6 @@
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?