aboutsummaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2013-09-13 17:42:24 -0400
committerMike Snitzer <snitzer@redhat.com>2013-09-18 14:41:06 -0400
commitbbf3f8cbdc139860a14c4fc2bb25432427045aaa (patch)
tree49c29342acc975d30d5d7399c9ba2ceacd718a01 /drivers/md
parentcc9d3c382bc1674884c2e5e468d51230a9503dee (diff)
dm stats: fix possible counter corruption on 32-bit systems
There was a deliberate race condition in dm_stat_for_entry() to avoid the overhead of disabling and enabling interrupts. The race could result in some events not being counted on 64-bit architectures. However, on 32-bit architectures, operations on long long variables are not atomic, so the race condition could cause the counter to jump by 2^32. Such jumps could be disruptive, so we need to do proper locking on 32-bit architectures. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Cc: Alasdair G. Kergon <agk@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-stats.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8ae31e8d3d6..3d404c1371e 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
struct dm_stat_percpu *p;
/*
- * For strict correctness we should use local_irq_disable/enable
+ * For strict correctness we should use local_irq_save/restore
* instead of preempt_disable/enable.
*
- * This is racy if the driver finishes bios from non-interrupt
- * context as well as from interrupt context or from more different
- * interrupts.
+ * preempt_disable/enable is racy if the driver finishes bios
+ * from non-interrupt context as well as from interrupt context
+ * or from more different interrupts.
*
- * However, the race only results in not counting some events,
- * so it is acceptable.
+ * On 64-bit architectures the race only results in not counting some
+ * events, so it is acceptable. On 32-bit architectures the race could
+ * cause the counter going off by 2^32, so we need to do proper locking
+ * there.
*
* part_stat_lock()/part_stat_unlock() have this race too.
*/
+#if BITS_PER_LONG == 32
+ unsigned long flags;
+ local_irq_save(flags);
+#else
preempt_disable();
+#endif
p = &s->stat_percpu[smp_processor_id()][entry];
if (!end) {
@@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
p->ticks[idx] += duration;
}
+#if BITS_PER_LONG == 32
+ local_irq_restore(flags);
+#else
preempt_enable();
+#endif
}
static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,