aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-06-04 20:40:56 -0700
committerJens Axboe <axboe@kernel.dk>2012-06-25 11:53:50 +0200
commit7f4b35d155a5f9e5748539a79558533aa08d6a81 (patch)
tree9e26e52852cd6b364d413b02b9c5379c5372be7e /block
parenta06e05e6afab70b4b23c0a7975aaeae24b195cd6 (diff)
block: allocate io_context upfront
Block layer very lazy allocation of ioc. It waits until the moment ioc is absolutely necessary; unfortunately, that time could be inside queue lock and __get_request() performs unlock - try alloc - retry dancing. Just allocate it up-front on entry to block layer. We're not saving the rain forest by deferring it to the last possible moment and complicating things unnecessarily. This patch is to prepare for further updates to request allocation path. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c42
-rw-r--r--block/blk-throttle.c3
2 files changed, 15 insertions, 30 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 080204a10fc..71894e143b9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -855,15 +855,11 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
{
struct request *rq;
struct request_list *rl = &q->rq;
- struct elevator_type *et;
- struct io_context *ioc;
+ struct elevator_type *et = q->elevator->type;
+ struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(rw_flags) != 0;
- bool retried = false;
int may_queue;
-retry:
- et = q->elevator->type;
- ioc = rq_ioc(bio);
if (unlikely(blk_queue_dead(q)))
return NULL;
@@ -875,20 +871,6 @@ retry:
if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[is_sync]+1 >= q->nr_requests) {
/*
- * We want ioc to record batching state. If it's
- * not already there, creating a new one requires
- * dropping queue_lock, which in turn requires
- * retesting conditions to avoid queue hang.
- */
- if (!ioc && !retried) {
- spin_unlock_irq(q->queue_lock);
- create_io_context(gfp_mask, q->node);
- spin_lock_irq(q->queue_lock);
- retried = true;
- goto retry;
- }
-
- /*
* The queue will fill after this allocation, so set
* it as full, and mark this process as "batching".
* This process will be allowed to complete a batch of
@@ -955,12 +937,8 @@ retry:
/* init elvpriv */
if (rw_flags & REQ_ELVPRIV) {
if (unlikely(et->icq_cache && !icq)) {
- create_io_context(gfp_mask, q->node);
- ioc = rq_ioc(bio);
- if (!ioc)
- goto fail_elvpriv;
-
- icq = ioc_create_icq(ioc, q, gfp_mask);
+ if (ioc)
+ icq = ioc_create_icq(ioc, q, gfp_mask);
if (!icq)
goto fail_elvpriv;
}
@@ -1071,7 +1049,6 @@ retry:
* to allocate at least one request, and up to a big batch of them
* for a small period time. See ioc_batching, ioc_set_batching
*/
- create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context);
spin_lock_irq(q->queue_lock);
@@ -1086,6 +1063,9 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
BUG_ON(rw != READ && rw != WRITE);
+ /* create ioc upfront */
+ create_io_context(gfp_mask, q->node);
+
spin_lock_irq(q->queue_lock);
rq = get_request(q, rw, NULL, gfp_mask);
if (!rq)
@@ -1698,6 +1678,14 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}
+ /*
+ * Various block parts want %current->io_context and lazy ioc
+ * allocation ends up trading a lot of pain for a small amount of
+ * memory. Just allocate it upfront. This may fail and block
+ * layer knows how to live with it.
+ */
+ create_io_context(GFP_ATOMIC, q->node);
+
if (blk_throtl_bio(q, bio))
return false; /* throttled, will be resubmitted later */
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5b065951204..e287c19908c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
goto out;
}
- /* bio_associate_current() needs ioc, try creating */
- create_io_context(GFP_ATOMIC, q->node);
-
/*
* A throtl_grp pointer retrieved under rcu can be used to access
* basic fields like stats and io rates. If a group has no rules,