aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio-integrity.c13
-rw-r--r--block/blk-core.c10
-rw-r--r--block/blk-merge.c18
-rw-r--r--block/blk-mq-tag.c28
-rw-r--r--block/blk-mq.c43
-rw-r--r--block/elevator.c6
-rw-r--r--block/ioprio.c14
-rw-r--r--block/scsi_ioctl.c13
8 files changed, 105 insertions, 40 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 0984232e429f..5cbd5d9ea61d 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -216,9 +216,10 @@ static int bio_integrity_process(struct bio *bio,
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct blk_integrity_iter iter;
- struct bio_vec *bv;
+ struct bvec_iter bviter;
+ struct bio_vec bv;
struct bio_integrity_payload *bip = bio_integrity(bio);
- unsigned int i, ret = 0;
+ unsigned int ret = 0;
void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset;
@@ -227,11 +228,11 @@ static int bio_integrity_process(struct bio *bio,
iter.seed = bip_get_seed(bip);
iter.prot_buf = prot_buf;
- bio_for_each_segment_all(bv, bio, i) {
- void *kaddr = kmap_atomic(bv->bv_page);
+ bio_for_each_segment(bv, bio, bviter) {
+ void *kaddr = kmap_atomic(bv.bv_page);
- iter.data_buf = kaddr + bv->bv_offset;
- iter.data_size = bv->bv_len;
+ iter.data_buf = kaddr + bv.bv_offset;
+ iter.data_size = bv.bv_len;
ret = proc_fn(&iter);
if (ret) {
diff --git a/block/blk-core.c b/block/blk-core.c
index 93f9152fc271..30f6153a40c2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1269,7 +1269,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
- if (blk_rq_tagged(rq))
+ if (rq->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(q, rq);
BUG_ON(blk_queued_rq(rq));
@@ -1328,7 +1328,7 @@ void part_round_stats(int cpu, struct hd_struct *part)
}
EXPORT_SYMBOL_GPL(part_round_stats);
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static void blk_pm_put_request(struct request *rq)
{
if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
@@ -2137,7 +2137,7 @@ void blk_account_io_done(struct request *req)
}
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
/*
* Don't process normal requests when queue is suspended
* or in the process of suspending/resuming
@@ -2557,7 +2557,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
*/
void blk_finish_request(struct request *req, int error)
{
- if (blk_rq_tagged(req))
+ if (req->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(req->q, req);
BUG_ON(blk_queued_rq(req));
@@ -3162,7 +3162,7 @@ void blk_finish_plug(struct blk_plug *plug)
}
EXPORT_SYMBOL(blk_finish_plug);
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
/**
* blk_pm_runtime_init - Block layer runtime PM initialization routine
* @q: the queue of the device
diff --git a/block/blk-merge.c b/block/blk-merge.c
index ba99351c0f58..89b97b5e0881 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -97,18 +97,22 @@ void blk_recalc_rq_segments(struct request *rq)
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
- bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
- &q->queue_flags);
+ unsigned short seg_cnt;
+
+ /* estimate segment number by bi_vcnt for non-cloned bio */
+ if (bio_flagged(bio, BIO_CLONED))
+ seg_cnt = bio_segments(bio);
+ else
+ seg_cnt = bio->bi_vcnt;
- if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) &&
- bio->bi_vcnt < queue_max_segments(q))
- bio->bi_phys_segments = bio->bi_vcnt;
+ if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
+ (seg_cnt < queue_max_segments(q)))
+ bio->bi_phys_segments = seg_cnt;
else {
struct bio *nxt = bio->bi_next;
bio->bi_next = NULL;
- bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio,
- no_sg_merge);
+ bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
bio->bi_next = nxt;
}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 1b7229f9354a..e3d4e4043b49 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -586,6 +586,34 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
return 0;
}
+/**
+ * blk_mq_unique_tag() - return a tag that is unique queue-wide
+ * @rq: request for which to compute a unique tag
+ *
+ * The tag field in struct request is unique per hardware queue but not over
+ * all hardware queues. Hence this function that returns a tag with the
+ * hardware context index in the upper bits and the per hardware queue tag in
+ * the lower bits.
+ *
+ * Note: When called for a request that is queued on a non-multiqueue request
+ * queue, the hardware context index is set to zero.
+ */
+u32 blk_mq_unique_tag(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+ struct blk_mq_hw_ctx *hctx;
+ int hwq = 0;
+
+ if (q->mq_ops) {
+ hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
+ hwq = hctx->queue_num;
+ }
+
+ return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
+ (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
+}
+EXPORT_SYMBOL(blk_mq_unique_tag);
+
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b21a3b6f7b65..da1ab5641227 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
wake_up_all(&q->mq_freeze_wq);
}
-/*
- * Guarantee no request is in use, so we can change any data structure of
- * the queue afterward.
- */
-void blk_mq_freeze_queue(struct request_queue *q)
+static void blk_mq_freeze_queue_start(struct request_queue *q)
{
bool freeze;
@@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q)
percpu_ref_kill(&q->mq_usage_counter);
blk_mq_run_queues(q, false);
}
+}
+
+static void blk_mq_freeze_queue_wait(struct request_queue *q)
+{
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
}
+/*
+ * Guarantee no request is in use, so we can change any data structure of
+ * the queue afterward.
+ */
+void blk_mq_freeze_queue(struct request_queue *q)
+{
+ blk_mq_freeze_queue_start(q);
+ blk_mq_freeze_queue_wait(q);
+}
+
static void blk_mq_unfreeze_queue(struct request_queue *q)
{
bool wake;
@@ -1945,7 +1955,7 @@ void blk_mq_free_queue(struct request_queue *q)
/* Basically redo blk_mq_init_queue with queue frozen */
static void blk_mq_queue_reinit(struct request_queue *q)
{
- blk_mq_freeze_queue(q);
+ WARN_ON_ONCE(!q->mq_freeze_depth);
blk_mq_sysfs_unregister(q);
@@ -1960,8 +1970,6 @@ static void blk_mq_queue_reinit(struct request_queue *q)
blk_mq_map_swqueue(q);
blk_mq_sysfs_register(q);
-
- blk_mq_unfreeze_queue(q);
}
static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
@@ -1980,8 +1988,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
return NOTIFY_OK;
mutex_lock(&all_q_mutex);
+
+ /*
+ * We need to freeze and reinit all existing queues. Freezing
+ * involves synchronous wait for an RCU grace period and doing it
+ * one by one may take a long time. Start freezing all queues in
+ * one swoop and then wait for the completions so that freezing can
+ * take place in parallel.
+ */
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_freeze_queue_start(q);
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_freeze_queue_wait(q);
+
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_queue_reinit(q);
+
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_unfreeze_queue(q);
+
mutex_unlock(&all_q_mutex);
return NOTIFY_OK;
}
@@ -2048,6 +2073,8 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
+ BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
+
if (!set->nr_hw_queues)
return -EINVAL;
if (!set->queue_depth)
diff --git a/block/elevator.c b/block/elevator.c
index 24c28b659bb3..59794d0d38e3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -229,7 +229,9 @@ int elevator_init(struct request_queue *q, char *name)
}
err = e->ops.elevator_init_fn(q, e);
- return 0;
+ if (err)
+ elevator_put(e);
+ return err;
}
EXPORT_SYMBOL(elevator_init);
@@ -537,7 +539,7 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
e->type->ops.elevator_bio_merged_fn(q, rq, bio);
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static void blk_pm_requeue_request(struct request *rq)
{
if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
diff --git a/block/ioprio.c b/block/ioprio.c
index e50170ca7c33..31666c92b46a 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -157,14 +157,16 @@ out:
int ioprio_best(unsigned short aprio, unsigned short bprio)
{
- unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
- unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
+ unsigned short aclass;
+ unsigned short bclass;
- if (aclass == IOPRIO_CLASS_NONE)
- aclass = IOPRIO_CLASS_BE;
- if (bclass == IOPRIO_CLASS_NONE)
- bclass = IOPRIO_CLASS_BE;
+ if (!ioprio_valid(aprio))
+ aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+ if (!ioprio_valid(bprio))
+ bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+ aclass = IOPRIO_PRIO_CLASS(aprio);
+ bclass = IOPRIO_PRIO_CLASS(bprio);
if (aclass == bclass)
return min(aprio, bprio);
if (aclass > bclass)
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index abb2e65b24cc..28163fad3c5d 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -142,7 +142,7 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(GPCMD_VERIFY_10, filter->read_ok);
__set_bit(VERIFY_16, filter->read_ok);
__set_bit(REPORT_LUNS, filter->read_ok);
- __set_bit(SERVICE_ACTION_IN, filter->read_ok);
+ __set_bit(SERVICE_ACTION_IN_16, filter->read_ok);
__set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
__set_bit(MAINTENANCE_IN, filter->read_ok);
__set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
@@ -458,7 +458,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto error;
+ goto error_free_buffer;
}
blk_rq_set_block_pc(rq);
@@ -508,7 +508,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
err = DRIVER_ERROR << 24;
- goto out;
+ goto error;
}
memset(sense, 0, sizeof(sense));
@@ -517,7 +517,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
blk_execute_rq(q, disk, rq, 0);
-out:
err = rq->errors & 0xff; /* only 8 bit SCSI status */
if (err) {
if (rq->sense_len && rq->sense) {
@@ -532,9 +531,11 @@ out:
}
error:
+ blk_put_request(rq);
+
+error_free_buffer:
kfree(buffer);
- if (rq)
- blk_put_request(rq);
+
return err;
}
EXPORT_SYMBOL_GPL(sg_scsi_ioctl);