aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2014-04-09 10:37:23 +0200
committerSteven Rostedt <rostedt@goodmis.org>2014-10-31 10:54:34 -0400
commitf68c65b38444dc623458e10b002e71c56354a162 (patch)
tree53e9c679d1c182de94f331adda05c5c4ab757e67 /block
parenta71c9a2839d7a3ae148874c42ba1355d4fba9eb9 (diff)
block: mq: use cpu_light()
there is a might sleep splat because get_cpu() disables preemption and later we grab a lock. As a workaround for this we use get_cpu_light() and an additional lock to prevent taking the same ctx. There is a lock member in the ctx already but there some functions which do ++ on the member and this works with irq off but on RT we would need the extra lock. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c20
-rw-r--r--block/blk-mq.h1
2 files changed, 18 insertions, 3 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 883f72089015..40e80d8e51f3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -30,7 +30,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
- return per_cpu_ptr(q->queue_ctx, cpu);
+ struct blk_mq_ctx *ctx;
+
+ ctx = per_cpu_ptr(q->queue_ctx, cpu);
+ spin_lock(&ctx->cpu_lock);
+ return ctx;
}
/*
@@ -41,12 +45,18 @@ static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
*/
static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
- return __blk_mq_get_ctx(q, get_cpu());
+ return __blk_mq_get_ctx(q, get_cpu_light());
+}
+
+static void __blk_mq_put_ctx(struct blk_mq_ctx *ctx)
+{
+ spin_unlock(&ctx->cpu_lock);
}
static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
{
- put_cpu();
+ __blk_mq_put_ctx(ctx);
+ put_cpu_light();
}
/*
@@ -897,7 +907,9 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (list_empty(&plug->mq_list))
trace_block_plug(q);
else if (request_count >= BLK_MAX_REQUEST_COUNT) {
+ spin_unlock(&ctx->cpu_lock);
blk_flush_plug_list(plug, false);
+ spin_lock(&ctx->cpu_lock);
trace_block_plug(q);
}
list_add_tail(&rq->queuelist, &plug->mq_list);
@@ -973,6 +985,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
clear_bit(ctx->index_hw, hctx->ctx_map);
}
spin_unlock(&ctx->lock);
+ __blk_mq_put_ctx(ctx);
if (list_empty(&tmp))
return;
@@ -1212,6 +1225,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
memset(__ctx, 0, sizeof(*__ctx));
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
+ spin_lock_init(&__ctx->cpu_lock);
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 72beba1f9d55..110e5e1f1f22 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -7,6 +7,7 @@ struct blk_mq_ctx {
struct list_head rq_list;
} ____cacheline_aligned_in_smp;
+ spinlock_t cpu_lock;
unsigned int cpu;
unsigned int index_hw;
unsigned int ipi_redirect;