diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2012-03-01 16:16:02 +0100 |
---|---|---|
committer | Steven Rostedt <rostedt@rostedt.homelinux.com> | 2012-10-26 14:31:51 -0400 |
commit | 6918bd731188d8ce8ff4edfb9989e3a796503887 (patch) | |
tree | 5ae72f21f1bdc685739891c457b12073fdb3daa0 | |
parent | 3d16ce4a7e8175e17cab340801a28271246168b3 (diff) |
net: u64_stat: Protect seqcount
On RT we must prevent that the writer gets preempted inside the write
section. Otherwise a preempting reader might spin forever.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | include/linux/u64_stats_sync.h | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index 8da8c4e87da3..b39549fb3c3f 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -70,6 +70,7 @@ struct u64_stats_sync { static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) + preempt_disable_rt(); write_seqcount_begin(&syncp->seq); #endif } @@ -78,6 +79,7 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); + preempt_enable_rt(); #endif } |