From 6c9546675864f51506af69eca388e5d922942c56 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Sat, 23 Mar 2013 11:42:26 +0800 Subject: block: add runtime pm helpers Add runtime pm helper functions: void blk_pm_runtime_init(struct request_queue *q, struct device *dev) - Initialization function for drivers to call. int blk_pre_runtime_suspend(struct request_queue *q) - If any requests are in the queue, mark last busy and return -EBUSY. Otherwise set q->rpm_status to RPM_SUSPENDING and return 0. void blk_post_runtime_suspend(struct request_queue *q, int err) - If the suspend succeeded then set q->rpm_status to RPM_SUSPENDED. Otherwise set it to RPM_ACTIVE and mark last busy. void blk_pre_runtime_resume(struct request_queue *q) - Set q->rpm_status to RPM_RESUMING. void blk_post_runtime_resume(struct request_queue *q, int err) - If the resume succeeded then set q->rpm_status to RPM_ACTIVE and call __blk_run_queue, then mark last busy and autosuspend. Otherwise set q->rpm_status to RPM_SUSPENDED. The idea and API is designed by Alan Stern and described here: http://marc.info/?l=linux-scsi&m=133727953625963&w=2 Signed-off-by: Lin Ming Signed-off-by: Aaron Lu Acked-by: Alan Stern Signed-off-by: Jens Axboe --- block/blk-core.c | 144 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 074b758efc4..123d240132b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -30,6 +30,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -3045,6 +3046,149 @@ void blk_finish_plug(struct blk_plug *plug) } EXPORT_SYMBOL(blk_finish_plug); +#ifdef CONFIG_PM_RUNTIME +/** + * blk_pm_runtime_init - Block layer runtime PM initialization routine + * @q: the queue of the device + * @dev: the device the queue belongs to + * + * Description: + * Initialize runtime-PM-related fields for @q and start auto suspend for + * @dev. Drivers that want to take advantage of request-based runtime PM + * should call this function after @dev has been initialized, and its + * request queue @q has been allocated, and runtime PM for it can not happen + * yet(either due to disabled/forbidden or its usage_count > 0). In most + * cases, driver should call this function before any I/O has taken place. + * + * This function takes care of setting up using auto suspend for the device, + * the autosuspend delay is set to -1 to make runtime suspend impossible + * until an updated value is either set by user or by driver. Drivers do + * not need to touch other autosuspend settings. + * + * The block layer runtime PM is request based, so only works for drivers + * that use request as their IO unit instead of those directly use bio's. + */ +void blk_pm_runtime_init(struct request_queue *q, struct device *dev) +{ + q->dev = dev; + q->rpm_status = RPM_ACTIVE; + pm_runtime_set_autosuspend_delay(q->dev, -1); + pm_runtime_use_autosuspend(q->dev); +} +EXPORT_SYMBOL(blk_pm_runtime_init); + +/** + * blk_pre_runtime_suspend - Pre runtime suspend check + * @q: the queue of the device + * + * Description: + * This function will check if runtime suspend is allowed for the device + * by examining if there are any requests pending in the queue. If there + * are requests pending, the device can not be runtime suspended; otherwise, + * the queue's status will be updated to SUSPENDING and the driver can + * proceed to suspend the device. + * + * For the not allowed case, we mark last busy for the device so that + * runtime PM core will try to autosuspend it some time later. + * + * This function should be called near the start of the device's + * runtime_suspend callback. + * + * Return: + * 0 - OK to runtime suspend the device + * -EBUSY - Device should not be runtime suspended + */ +int blk_pre_runtime_suspend(struct request_queue *q) +{ + int ret = 0; + + spin_lock_irq(q->queue_lock); + if (q->nr_pending) { + ret = -EBUSY; + pm_runtime_mark_last_busy(q->dev); + } else { + q->rpm_status = RPM_SUSPENDING; + } + spin_unlock_irq(q->queue_lock); + return ret; +} +EXPORT_SYMBOL(blk_pre_runtime_suspend); + +/** + * blk_post_runtime_suspend - Post runtime suspend processing + * @q: the queue of the device + * @err: return value of the device's runtime_suspend function + * + * Description: + * Update the queue's runtime status according to the return value of the + * device's runtime suspend function and mark last busy for the device so + * that PM core will try to auto suspend the device at a later time. + * + * This function should be called near the end of the device's + * runtime_suspend callback. + */ +void blk_post_runtime_suspend(struct request_queue *q, int err) +{ + spin_lock_irq(q->queue_lock); + if (!err) { + q->rpm_status = RPM_SUSPENDED; + } else { + q->rpm_status = RPM_ACTIVE; + pm_runtime_mark_last_busy(q->dev); + } + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL(blk_post_runtime_suspend); + +/** + * blk_pre_runtime_resume - Pre runtime resume processing + * @q: the queue of the device + * + * Description: + * Update the queue's runtime status to RESUMING in preparation for the + * runtime resume of the device. + * + * This function should be called near the start of the device's + * runtime_resume callback. + */ +void blk_pre_runtime_resume(struct request_queue *q) +{ + spin_lock_irq(q->queue_lock); + q->rpm_status = RPM_RESUMING; + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL(blk_pre_runtime_resume); + +/** + * blk_post_runtime_resume - Post runtime resume processing + * @q: the queue of the device + * @err: return value of the device's runtime_resume function + * + * Description: + * Update the queue's runtime status according to the return value of the + * device's runtime_resume function. If it is successfully resumed, process + * the requests that are queued into the device's queue when it is resuming + * and then mark last busy and initiate autosuspend for it. + * + * This function should be called near the end of the device's + * runtime_resume callback. + */ +void blk_post_runtime_resume(struct request_queue *q, int err) +{ + spin_lock_irq(q->queue_lock); + if (!err) { + q->rpm_status = RPM_ACTIVE; + __blk_run_queue(q); + pm_runtime_mark_last_busy(q->dev); + pm_runtime_autosuspend(q->dev); + } else { + q->rpm_status = RPM_SUSPENDED; + } + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL(blk_post_runtime_resume); +#endif + int __init blk_dev_init(void) { BUILD_BUG_ON(__REQ_NR_BITS > 8 * -- cgit v1.2.3 From c8158819d506a8aedeca53c52dfb709a0aabe011 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Sat, 23 Mar 2013 11:42:27 +0800 Subject: block: implement runtime pm strategy When a request is added: If device is suspended or is suspending and the request is not a PM request, resume the device. When the last request finishes: Call pm_runtime_mark_last_busy(). When pick a request: If device is resuming/suspending, then only PM request is allowed to go. The idea and API is designed by Alan Stern and described here: http://marc.info/?l=linux-scsi&m=133727953625963&w=2 Signed-off-by: Lin Ming Signed-off-by: Aaron Lu Acked-by: Alan Stern Signed-off-by: Jens Axboe --- block/blk-core.c | 39 +++++++++++++++++++++++++++++++++++++++ block/elevator.c | 26 ++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 123d240132b..441f3488a76 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1264,6 +1264,16 @@ void part_round_stats(int cpu, struct hd_struct *part) } EXPORT_SYMBOL_GPL(part_round_stats); +#ifdef CONFIG_PM_RUNTIME +static void blk_pm_put_request(struct request *rq) +{ + if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) + pm_runtime_mark_last_busy(rq->q->dev); +} +#else +static inline void blk_pm_put_request(struct request *rq) {} +#endif + /* * queue lock must be held */ @@ -1274,6 +1284,8 @@ void __blk_put_request(struct request_queue *q, struct request *req) if (unlikely(--req->ref_count)) return; + blk_pm_put_request(req); + elv_completed_request(q, req); /* this is a bio leak */ @@ -2053,6 +2065,28 @@ static void blk_account_io_done(struct request *req) } } +#ifdef CONFIG_PM_RUNTIME +/* + * Don't process normal requests when queue is suspended + * or in the process of suspending/resuming + */ +static struct request *blk_pm_peek_request(struct request_queue *q, + struct request *rq) +{ + if (q->dev && (q->rpm_status == RPM_SUSPENDED || + (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) + return NULL; + else + return rq; +} +#else +static inline struct request *blk_pm_peek_request(struct request_queue *q, + struct request *rq) +{ + return rq; +} +#endif + /** * blk_peek_request - peek at the top of a request queue * @q: request queue to peek at @@ -2075,6 +2109,11 @@ struct request *blk_peek_request(struct request_queue *q) int ret; while ((rq = __elv_next_request(q)) != NULL) { + + rq = blk_pm_peek_request(q, rq); + if (!rq) + break; + if (!(rq->cmd_flags & REQ_STARTED)) { /* * This is the first time the device driver diff --git a/block/elevator.c b/block/elevator.c index a0ffdd943c9..eba5b04c29b 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -34,6 +34,7 @@ #include #include #include +#include #include @@ -536,6 +537,27 @@ void elv_bio_merged(struct request_queue *q, struct request *rq, e->type->ops.elevator_bio_merged_fn(q, rq, bio); } +#ifdef CONFIG_PM_RUNTIME +static void blk_pm_requeue_request(struct request *rq) +{ + if (rq->q->dev && !(rq->cmd_flags & REQ_PM)) + rq->q->nr_pending--; +} + +static void blk_pm_add_request(struct request_queue *q, struct request *rq) +{ + if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 && + (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING)) + pm_request_resume(q->dev); +} +#else +static inline void blk_pm_requeue_request(struct request *rq) {} +static inline void blk_pm_add_request(struct request_queue *q, + struct request *rq) +{ +} +#endif + void elv_requeue_request(struct request_queue *q, struct request *rq) { /* @@ -550,6 +572,8 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) rq->cmd_flags &= ~REQ_STARTED; + blk_pm_requeue_request(rq); + __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); } @@ -572,6 +596,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) { trace_block_rq_insert(q, rq); + blk_pm_add_request(q, rq); + rq->q = q; if (rq->cmd_flags & REQ_SOFTBARRIER) { -- cgit v1.2.3 From f79ea4161434b31e351658283b24e92c3e570142 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 20 Sep 2012 16:38:30 -0700 Subject: block: Refactor blk_update_request() Converts it to use bio_advance(), simplifying it quite a bit in the process. Note that req_bio_endio() now always calls bio_advance() - which means it always loops over the biovec, not just on partial completions. Don't expect it to affect performance, but worth noting. Tested it by forcing partial updates, and dumping before and after on various bio/bvec fields when doing a partial update. Signed-off-by: Kent Overstreet CC: Jens Axboe --- block/blk-core.c | 80 +++++++++----------------------------------------------- 1 file changed, 12 insertions(+), 68 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 074b758efc4..86a1afeef60 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -158,20 +158,10 @@ static void req_bio_endio(struct request *rq, struct bio *bio, else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) error = -EIO; - if (unlikely(nbytes > bio->bi_size)) { - printk(KERN_ERR "%s: want %u bytes done, %u left\n", - __func__, nbytes, bio->bi_size); - nbytes = bio->bi_size; - } - if (unlikely(rq->cmd_flags & REQ_QUIET)) set_bit(BIO_QUIET, &bio->bi_flags); - bio->bi_size -= nbytes; - bio->bi_sector += (nbytes >> 9); - - if (bio_integrity(bio)) - bio_integrity_advance(bio, nbytes); + bio_advance(bio, nbytes); /* don't actually finish bio if it's part of flush sequence */ if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) @@ -2252,8 +2242,7 @@ EXPORT_SYMBOL(blk_fetch_request); **/ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) { - int total_bytes, bio_nbytes, next_idx = 0; - struct bio *bio; + int total_bytes; if (!req->bio) return false; @@ -2299,56 +2288,21 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) blk_account_io_completion(req, nr_bytes); - total_bytes = bio_nbytes = 0; - while ((bio = req->bio) != NULL) { - int nbytes; + total_bytes = 0; + while (req->bio) { + struct bio *bio = req->bio; + unsigned bio_bytes = min(bio->bi_size, nr_bytes); - if (nr_bytes >= bio->bi_size) { + if (bio_bytes == bio->bi_size) req->bio = bio->bi_next; - nbytes = bio->bi_size; - req_bio_endio(req, bio, nbytes, error); - next_idx = 0; - bio_nbytes = 0; - } else { - int idx = bio->bi_idx + next_idx; - - if (unlikely(idx >= bio->bi_vcnt)) { - blk_dump_rq_flags(req, "__end_that"); - printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", - __func__, idx, bio->bi_vcnt); - break; - } - - nbytes = bio_iovec_idx(bio, idx)->bv_len; - BIO_BUG_ON(nbytes > bio->bi_size); - - /* - * not a complete bvec done - */ - if (unlikely(nbytes > nr_bytes)) { - bio_nbytes += nr_bytes; - total_bytes += nr_bytes; - break; - } - /* - * advance to the next vector - */ - next_idx++; - bio_nbytes += nbytes; - } + req_bio_endio(req, bio, bio_bytes, error); - total_bytes += nbytes; - nr_bytes -= nbytes; + total_bytes += bio_bytes; + nr_bytes -= bio_bytes; - bio = req->bio; - if (bio) { - /* - * end more in this run, or just return 'not-done' - */ - if (unlikely(nr_bytes <= 0)) - break; - } + if (!nr_bytes) + break; } /* @@ -2364,16 +2318,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) return false; } - /* - * if the request wasn't completed, update state - */ - if (bio_nbytes) { - req_bio_endio(req, bio, bio_nbytes, error); - bio->bi_idx += next_idx; - bio_iovec(bio)->bv_offset += nr_bytes; - bio_iovec(bio)->bv_len -= nr_bytes; - } - req->__data_len -= total_bytes; req->buffer = bio_data(req->bio); -- cgit v1.2.3 From f73a1c7d117d07a96d89475066188a2b79e53c48 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 25 Sep 2012 15:05:12 -0700 Subject: block: Add bio_end_sector() Just a little convenience macro - main reason to add it now is preparing for immutable bio vecs, it'll reduce the size of the patch that puts bi_sector/bi_size/bi_idx into a struct bvec_iter. Signed-off-by: Kent Overstreet CC: Jens Axboe CC: Lars Ellenberg CC: Jiri Kosina CC: Alasdair Kergon CC: dm-devel@redhat.com CC: Neil Brown CC: Martin Schwidefsky CC: Heiko Carstens CC: linux-s390@vger.kernel.org CC: Chris Mason CC: Steven Whitehouse Acked-by: Steven Whitehouse --- block/blk-core.c | 2 +- block/cfq-iosched.c | 7 ++----- block/deadline-iosched.c | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 86a1afeef60..7236b826f4a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1586,7 +1586,7 @@ static void handle_bad_sector(struct bio *bio) printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", bdevname(bio->bi_bdev, b), bio->bi_rw, - (unsigned long long)bio->bi_sector + bio_sectors(bio), + (unsigned long long)bio_end_sector(bio), (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); set_bit(BIO_EOF, &bio->bi_flags); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 4f0ade74cfd..d5cd3131c57 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2270,11 +2270,8 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) return NULL; cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); - if (cfqq) { - sector_t sector = bio->bi_sector + bio_sectors(bio); - - return elv_rb_find(&cfqq->sort_list, sector); - } + if (cfqq) + return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio)); return NULL; } diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 90037b5eb17..ba19a3afab7 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -132,7 +132,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) * check for front merge */ if (dd->front_merges) { - sector_t sector = bio->bi_sector + bio_sectors(bio); + sector_t sector = bio_end_sector(bio); __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); if (__rq) { -- cgit v1.2.3 From e5072664f8237cf53b0bd68a51aa1a7bc69061c5 Mon Sep 17 00:00:00 2001 From: Jun'ichi Nomura Date: Tue, 9 Apr 2013 15:01:21 +0200 Subject: blkcg: fix "scheduling while atomic" in blk_queue_bypass_start Since 749fefe677 in v3.7 ("block: lift the initial queue bypass mode on blk_register_queue() instead of blk_init_allocated_queue()"), the following warning appears when multipath is used with CONFIG_PREEMPT=y. This patch moves blk_queue_bypass_start() before radix_tree_preload() to avoid the sleeping call while preemption is disabled. BUG: scheduling while atomic: multipath/2460/0x00000002 1 lock held by multipath/2460: #0: (&md->type_lock){......}, at: [] dm_lock_md_type+0x17/0x19 [dm_mod] Modules linked in: ... Pid: 2460, comm: multipath Tainted: G W 3.7.0-rc2 #1 Call Trace: [] __schedule_bug+0x6a/0x78 [] __schedule+0xb4/0x5e0 [] schedule+0x64/0x66 [] schedule_timeout+0x39/0xf8 [] ? put_lock_stats+0xe/0x29 [] ? lock_release_holdtime+0xb6/0xbb [] wait_for_common+0x9d/0xee [] ? try_to_wake_up+0x206/0x206 [] ? kfree_call_rcu+0x1c/0x1c [] wait_for_completion+0x1d/0x1f [] wait_rcu_gp+0x5d/0x7a [] ? wait_rcu_gp+0x7a/0x7a [] ? complete+0x21/0x53 [] synchronize_rcu+0x1e/0x20 [] blk_queue_bypass_start+0x5d/0x62 [] blkcg_activate_policy+0x73/0x270 [] ? kmem_cache_alloc_node_trace+0xc7/0x108 [] cfq_init_queue+0x80/0x28e [] ? dm_blk_ioctl+0xa7/0xa7 [dm_mod] [] elevator_init+0xe1/0x115 [] ? blk_queue_make_request+0x54/0x59 [] blk_init_allocated_queue+0x8c/0x9e [] dm_setup_md_queue+0x36/0xaa [dm_mod] [] table_load+0x1bd/0x2c8 [dm_mod] [] ctl_ioctl+0x1d6/0x236 [dm_mod] [] ? table_clear+0xaa/0xaa [dm_mod] [] dm_ctl_ioctl+0x13/0x17 [dm_mod] [] do_vfs_ioctl+0x3fb/0x441 [] ? file_has_perm+0x8a/0x99 [] sys_ioctl+0x5e/0x82 [] ? trace_hardirqs_on_thunk+0x3a/0x3f [] system_call_fastpath+0x16/0x1b Signed-off-by: Jun'ichi Nomura Acked-by: Vivek Goyal Acked-by: Tejun Heo Cc: Alasdair G Kergon Cc: stable@kernel.org Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index b2b9837f9dd..e8918ffaf96 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -972,10 +972,10 @@ int blkcg_activate_policy(struct request_queue *q, if (!new_blkg) return -ENOMEM; - preloaded = !radix_tree_preload(GFP_KERNEL); - blk_queue_bypass_start(q); + preloaded = !radix_tree_preload(GFP_KERNEL); + /* * Make sure the root blkg exists and count the existing blkgs. As * @q is bypassing at this point, blkg_lookup_create() can't be -- cgit v1.2.3 From ea56505bedd03e21f497c59cece15a62b4398fc4 Mon Sep 17 00:00:00 2001 From: Philippe De Muyter Date: Mon, 29 Apr 2013 23:00:18 +0200 Subject: partitions/efi.c: replace useless kzalloc's by kmalloc's In alloc_read_gpt_entries and alloc_read_gpt_header, the kzalloc'ated zones are either totally overwritten by the following read_lba call, or freed. As kmalloc is cheaper than kzalloc, use kmalloc. Signed-off-by: Philippe De Muyter Cc: Matt Domsch Cc: Panagiotis Issaris Cc: Andrew Morton Signed-off-by: Jens Axboe --- block/partitions/efi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/partitions/efi.c b/block/partitions/efi.c index ff5804e2f1d..c85fc895ecd 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c @@ -238,7 +238,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state, le32_to_cpu(gpt->sizeof_partition_entry); if (!count) return NULL; - pte = kzalloc(count, GFP_KERNEL); + pte = kmalloc(count, GFP_KERNEL); if (!pte) return NULL; @@ -267,7 +267,7 @@ static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state, gpt_header *gpt; unsigned ssz = bdev_logical_block_size(state->bdev); - gpt = kzalloc(ssz, GFP_KERNEL); + gpt = kmalloc(ssz, GFP_KERNEL); if (!gpt) return NULL; -- cgit v1.2.3