aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorAlan D. Brunelle <Alan.Brunelle@hp.com>2008-04-29 14:44:19 +0200
committerJens Axboe <jens.axboe@oracle.com>2008-04-29 14:48:55 +0200
commitac9fafa1243640349aa481adf473db283a695766 (patch)
tree155c2371cca8971638d781269f39fa015bc6509c /block
parentd7e3c3249ef23b4617393c69fe464765b4ff1645 (diff)
block: Skip I/O merges when disabled
The block I/O + elevator + I/O scheduler code spend a lot of time trying to merge I/Os -- rightfully so under "normal" circumstances. However, if one were to know that the incoming I/O stream was /very/ random in nature, the cycles are wasted. This patch adds a per-request_queue tunable that (when set) disables merge attempts (beyond the simple one-hit cache check), thus freeing up a non-trivial amount of CPU cycles. Signed-off-by: Alan D. Brunelle <alan.brunelle@hp.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-sysfs.c26
-rw-r--r--block/elevator.c3
2 files changed, 29 insertions, 0 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index fc41d83be22..e85c4013e8a 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -135,6 +135,25 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
return queue_var_show(max_hw_sectors_kb, (page));
}
+static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(blk_queue_nomerges(q), page);
+}
+
+static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long nm;
+ ssize_t ret = queue_var_store(&nm, page, count);
+
+ if (nm)
+ set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
+ else
+ clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
+
+ return ret;
+}
+
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
@@ -170,6 +189,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.show = queue_hw_sector_size_show,
};
+static struct queue_sysfs_entry queue_nomerges_entry = {
+ .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_nomerges_show,
+ .store = queue_nomerges_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -177,6 +202,7 @@ static struct attribute *default_attrs[] = {
&queue_max_sectors_entry.attr,
&queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
+ &queue_nomerges_entry.attr,
NULL,
};
diff --git a/block/elevator.c b/block/elevator.c
index 7253fa05db0..ac5310ef827 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -488,6 +488,9 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
}
}
+ if (blk_queue_nomerges(q))
+ return ELEVATOR_NO_MERGE;
+
/*
* See if our hash lookup can find a potential backmerge.
*/