aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-10-28 11:18:08 +0100
committerSteven Rostedt <rostedt@rostedt.homelinux.com>2012-12-20 15:20:31 -0500
commit95d9f5e6f28a7d3decdb55b9d92f7efdd7140aea (patch)
tree23897d29efcff1a6f077e6f4905217f71bf27f51
parent137ac49549fe9572243917d678c3ff35c2d333e5 (diff)
net: netfilter: Serialize xt_write_recseq sections on RT
The netfilter code relies only on the implicit semantics of local_bh_disable() for serializing wt_write_recseq sections. RT breaks that and needs explicit serialization here. Reported-by: Peter LaDow <petela@gocougs.wsu.edu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable-rt@vger.kernel.org
-rw-r--r--include/linux/locallock.h4
-rw-r--r--include/linux/netfilter/x_tables.h7
-rw-r--r--net/netfilter/core.c6
3 files changed, 17 insertions, 0 deletions
diff --git a/include/linux/locallock.h b/include/linux/locallock.h
index f1804a38dd44..a5eea5df7cc2 100644
--- a/include/linux/locallock.h
+++ b/include/linux/locallock.h
@@ -25,6 +25,9 @@ struct local_irq_lock {
DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
.lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
+#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
+ DECLARE_PER_CPU(struct local_irq_lock, lvar)
+
#define local_irq_lock_init(lvar) \
do { \
int __cpu; \
@@ -220,6 +223,7 @@ static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
#else /* PREEMPT_RT_BASE */
#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
+#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
static inline void local_irq_lock_init(int lvar) { }
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 8d674a786744..3f2bc5755a96 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -186,6 +186,7 @@ struct xt_counters_info {
#ifdef __KERNEL__
#include <linux/netdevice.h>
+#include <linux/locallock.h>
/**
* struct xt_action_param - parameters for matches/targets
@@ -466,6 +467,8 @@ extern void xt_free_table_info(struct xt_table_info *info);
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
+DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
+
/**
* xt_write_recseq_begin - start of a write section
*
@@ -480,6 +483,9 @@ static inline unsigned int xt_write_recseq_begin(void)
{
unsigned int addend;
+ /* RT protection */
+ local_lock(xt_write_lock);
+
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
@@ -510,6 +516,7 @@ static inline void xt_write_recseq_end(unsigned int addend)
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
+ local_unlock(xt_write_lock);
}
/*
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e1b7e051332e..151061b2e474 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -20,11 +20,17 @@
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/locallock.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include "nf_internals.h"
+#ifdef CONFIG_PREEMPT_RT_BASE
+DEFINE_LOCAL_IRQ_LOCK(xt_write_lock);
+EXPORT_PER_CPU_SYMBOL(xt_write_lock);
+#endif
+
static DEFINE_MUTEX(afinfo_mutex);
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;