aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-07-10 13:43:25 +0200
committerJens Axboe <jens.axboe@oracle.com>2007-07-10 13:43:25 +0200
commit15c31be4d5bd2402c6f5a288d56a24edc9252b71 (patch)
tree0fca6e97186080d83ff3f36bb359bcb4ef06a9e2 /block
parent72d3a38ee083a96c09032e608a4c7e047ce26760 (diff)
cfq-iosched: fix async queue behaviour
With the cfq_queue hash removal, we inadvertently got rid of the async queue sharing. This was not intentional, in fact CFQ purposely shares the async queue per priority level to get good merging for async writes. So put some logic in cfq_get_queue() to track the shared queues. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c39
1 files changed, 36 insertions, 3 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index baef5fc7cff..e0aa4dad674 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -92,6 +92,8 @@ struct cfq_data {
struct cfq_queue *active_queue;
struct cfq_io_context *active_cic;
+ struct cfq_queue *async_cfqq[IOPRIO_BE_NR];
+
struct timer_list idle_class_timer;
sector_t last_position;
@@ -1351,8 +1353,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
}
static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
- gfp_t gfp_mask)
+cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+ struct task_struct *tsk, gfp_t gfp_mask)
{
struct cfq_queue *cfqq, *new_cfqq = NULL;
struct cfq_io_context *cic;
@@ -1405,12 +1407,35 @@ retry:
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);
- atomic_inc(&cfqq->ref);
out:
WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
return cfqq;
}
+static struct cfq_queue *
+cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
+ gfp_t gfp_mask)
+{
+ const int ioprio = task_ioprio(tsk);
+ struct cfq_queue *cfqq = NULL;
+
+ if (!is_sync)
+ cfqq = cfqd->async_cfqq[ioprio];
+ if (!cfqq)
+ cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
+
+ /*
+ * pin the queue now that it's allocated, scheduler exit will prune it
+ */
+ if (!is_sync && !cfqd->async_cfqq[ioprio]) {
+ atomic_inc(&cfqq->ref);
+ cfqd->async_cfqq[ioprio] = cfqq;
+ }
+
+ atomic_inc(&cfqq->ref);
+ return cfqq;
+}
+
/*
* We drop cfq io contexts lazily, so we may find a dead one.
*/
@@ -2019,6 +2044,7 @@ static void cfq_exit_queue(elevator_t *e)
{
struct cfq_data *cfqd = e->elevator_data;
request_queue_t *q = cfqd->queue;
+ int i;
cfq_shutdown_timer_wq(cfqd);
@@ -2035,6 +2061,13 @@ static void cfq_exit_queue(elevator_t *e)
__cfq_exit_single_io_context(cfqd, cic);
}
+ /*
+ * Put the async queues
+ */
+ for (i = 0; i < IOPRIO_BE_NR; i++)
+ if (cfqd->async_cfqq[i])
+ cfq_put_queue(cfqd->async_cfqq[i]);
+
spin_unlock_irq(q->queue_lock);
cfq_shutdown_timer_wq(cfqd);