aboutsummaryrefslogtreecommitdiff
path: root/block/blk.h
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-21 10:14:27 +0100
committerJens Axboe <jaxboe@fusionio.com>2011-03-21 10:14:27 +0100
commit5e84ea3a9c662dc2d7a48703a4468fad954a3b7f (patch)
tree3fa0fb26a7c8a970213584104cc2498ef46d60a3 /block/blk.h
parent4345caba340f051e10847924fc078ae18ed6695c (diff)
block: attempt to merge with existing requests on plug flush
One of the disadvantages of on-stack plugging is that we potentially lose out on merging since all pending IO isn't always visible to everybody. When we flush the on-stack plugs, right now we don't do any checks to see if potential merge candidates could be utilized. Correct this by adding a new insert variant, ELEVATOR_INSERT_SORT_MERGE. It works just ELEVATOR_INSERT_SORT, but first checks whether we can merge with an existing request before doing the insertion (if we fail merging). This fixes a regression with multiple processes issuing IO that can be merged. Thanks to Shaohua Li <shaohua.li@intel.com> for testing and fixing an accounting bug. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk.h')
-rw-r--r--block/blk.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/block/blk.h b/block/blk.h
index 49d21af81d0..c8db371a921 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -103,6 +103,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio);
int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq);
+int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
+ struct request *next);
void blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);