aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUwe Kleine-König <u.kleine-koenig@pengutronix.de>2013-07-09 00:26:32 +0200
committerSteven Rostedt <rostedt@rostedt.homelinux.com>2014-03-24 18:27:10 -0400
commit36dedc2a1c6d217d6616ae694bb439632e8a70c9 (patch)
treeb71363ad52ec66f35fe932d248aa94d383ad9fd8
parent4cad52a9ab3ba5e9ec5d4300b126439f8b934787 (diff)
list_bl.h: fix it for for !SMP && !DEBUG_SPINLOCK
The patch "list_bl.h: make list head locking RT safe" introduced an unconditional __set_bit(0, (unsigned long *)b); in void hlist_bl_lock(struct hlist_bl_head *b). This clobbers the value of b->first. When the value of b->first is retrieved using hlist_bl_first the clobbering is undone using (unsigned long)h->first & ~LIST_BL_LOCKMASK and so depending on LIST_BL_LOCKMASK being one. But LIST_BL_LOCKMASK is only one if at least on of CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are defined. Without these the value returned by hlist_bl_first has the zeroth bit set which likely results in a crash. So only do the clobbering in the cases where LIST_BL_LOCKMASK is one. An alternative would be to always define LIST_BL_LOCKMASK to one with CONFIG_PREEMPT_RT_BASE. Cc: stable-rt@vger.kernel.org Acked-by: Paul Gortmaker <paul.gortmaker@windriver.com> Tested-by: Paul Gortmaker <paul.gortmaker@windriver.com> Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--include/linux/list_bl.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index ddfd46a88f93..becd7a61263f 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -131,8 +131,10 @@ static inline void hlist_bl_lock(struct hlist_bl_head *b)
bit_spin_lock(0, (unsigned long *)b);
#else
raw_spin_lock(&b->lock);
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
__set_bit(0, (unsigned long *)b);
#endif
+#endif
}
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
@@ -140,7 +142,9 @@ static inline void hlist_bl_unlock(struct hlist_bl_head *b)
#ifndef CONFIG_PREEMPT_RT_BASE
__bit_spin_unlock(0, (unsigned long *)b);
#else
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
__clear_bit(0, (unsigned long *)b);
+#endif
raw_spin_unlock(&b->lock);
#endif
}