aboutsummaryrefslogtreecommitdiff
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-13 13:11:33 -0700
committerJens Axboe <axboe@kernel.dk>2012-04-20 10:06:06 +0200
commita2b1693bac45ea3fe3ba612fd22c45f17449f610 (patch)
tree2e05859caab6453efbc85d584dd72dca7ef03cd0 /block/cfq-iosched.c
parent03d8e11142a893ad322285d3c8a08e88b570cda1 (diff)
blkcg: implement per-queue policy activation
All blkcg policies were assumed to be enabled on all request_queues. Due to various implementation obstacles, during the recent blkcg core updates, this was temporarily implemented as shooting down all !root blkgs on elevator switch and policy [de]registration combined with half-broken in-place root blkg updates. In addition to being buggy and racy, this meant losing all blkcg configurations across those events. Now that blkcg is cleaned up enough, this patch replaces the temporary implementation with proper per-queue policy activation. Each blkcg policy should call the new blkcg_[de]activate_policy() to enable and disable the policy on a specific queue. blkcg_activate_policy() allocates and installs policy data for the policy for all existing blkgs. blkcg_deactivate_policy() does the reverse. If a policy is not enabled for a given queue, blkg printing / config functions skip the respective blkg for the queue. blkcg_activate_policy() also takes care of root blkg creation, and cfq_init_queue() and blk_throtl_init() are updated accordingly. This replaces blkcg_bypass_{start|end}() and update_root_blkg_pd() unnecessary. Dropped. v2: cfq_init_queue() was returning uninitialized @ret on root_group alloc failure if !CONFIG_CFQ_GROUP_IOSCHED. Fixed. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c37
1 files changed, 16 insertions, 21 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 86440e04f3e..0203652e1f3 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1406,8 +1406,7 @@ static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
ret = -EINVAL;
cfqg = blkg_to_cfqg(ctx.blkg);
- if (cfqg && (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN &&
- ctx.v <= CFQ_WEIGHT_MAX))) {
+ if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
cfqg->dev_weight = ctx.v;
cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
ret = 0;
@@ -3938,7 +3937,7 @@ static void cfq_exit_queue(struct elevator_queue *e)
#ifndef CONFIG_CFQ_GROUP_IOSCHED
kfree(cfqd->root_group);
#endif
- update_root_blkg_pd(q, &blkio_policy_cfq);
+ blkcg_deactivate_policy(q, &blkio_policy_cfq);
kfree(cfqd);
}
@@ -3946,7 +3945,7 @@ static int cfq_init_queue(struct request_queue *q)
{
struct cfq_data *cfqd;
struct blkio_group *blkg __maybe_unused;
- int i;
+ int i, ret;
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!cfqd)
@@ -3960,28 +3959,20 @@ static int cfq_init_queue(struct request_queue *q)
/* Init root group and prefer root group over other groups by default */
#ifdef CONFIG_CFQ_GROUP_IOSCHED
- rcu_read_lock();
- spin_lock_irq(q->queue_lock);
-
- blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
- if (!IS_ERR(blkg)) {
- q->root_blkg = blkg;
- cfqd->root_group = blkg_to_cfqg(blkg);
- }
+ ret = blkcg_activate_policy(q, &blkio_policy_cfq);
+ if (ret)
+ goto out_free;
- spin_unlock_irq(q->queue_lock);
- rcu_read_unlock();
+ cfqd->root_group = blkg_to_cfqg(q->root_blkg);
#else
+ ret = -ENOMEM;
cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
GFP_KERNEL, cfqd->queue->node);
- if (cfqd->root_group)
- cfq_init_cfqg_base(cfqd->root_group);
-#endif
- if (!cfqd->root_group) {
- kfree(cfqd);
- return -ENOMEM;
- }
+ if (!cfqd->root_group)
+ goto out_free;
+ cfq_init_cfqg_base(cfqd->root_group);
+#endif
cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
/*
@@ -4031,6 +4022,10 @@ static int cfq_init_queue(struct request_queue *q)
*/
cfqd->last_delayed_sync = jiffies - HZ;
return 0;
+
+out_free:
+ kfree(cfqd);
+ return ret;
}
/*