aboutsummaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/cpqarray.c1
-rw-r--r--drivers/block/loop.c21
-rw-r--r--drivers/block/nbd.c10
-rw-r--r--drivers/block/rbd.c14
-rw-r--r--drivers/block/xen-blkback/blkback.c16
-rw-r--r--drivers/block/xen-blkfront.c36
8 files changed, 61 insertions, 40 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 9bf4371755f..d91f1a56e86 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -545,7 +545,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
mutex_lock(&brd_devices_mutex);
brd = brd_init_one(MINOR(dev) >> part_shift);
- kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
+ kobj = brd ? get_disk(brd->brd_disk) : NULL;
mutex_unlock(&brd_devices_mutex);
*part = 0;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 62b6c2cc80b..90a4e6b940a 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |=
copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 639d26b90b9..2b944038453 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1193,6 +1193,7 @@ out_passthru:
ida_pci_info_struct pciinfo;
if (!arg) return -EINVAL;
+ memset(&pciinfo, 0, sizeof(pciinfo));
pciinfo.bus = host->pci_dev->bus->number;
pciinfo.dev_fn = host->pci_dev->devfn;
pciinfo.board_id = host->board_id;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d92d50fd84b..4a811654787 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -894,13 +894,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
bio_list_init(&lo->lo_bio_list);
- /*
- * set queue make_request_fn, and add limits based on lower level
- * device
- */
- blk_queue_make_request(lo->lo_queue, loop_make_request);
- lo->lo_queue->queuedata = lo;
-
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
@@ -1618,6 +1611,8 @@ static int loop_add(struct loop_device **l, int i)
if (!lo)
goto out;
+ lo->lo_state = Lo_unbound;
+
/* allocate id, if @id >= 0, we're requesting that specific id */
if (i >= 0) {
err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
@@ -1633,7 +1628,13 @@ static int loop_add(struct loop_device **l, int i)
err = -ENOMEM;
lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
if (!lo->lo_queue)
- goto out_free_dev;
+ goto out_free_idr;
+
+ /*
+ * set queue make_request_fn
+ */
+ blk_queue_make_request(lo->lo_queue, loop_make_request);
+ lo->lo_queue->queuedata = lo;
disk = lo->lo_disk = alloc_disk(1 << part_shift);
if (!disk)
@@ -1678,6 +1679,8 @@ static int loop_add(struct loop_device **l, int i)
out_free_queue:
blk_cleanup_queue(lo->lo_queue);
+out_free_idr:
+ idr_remove(&loop_index_idr, i);
out_free_dev:
kfree(lo);
out:
@@ -1741,7 +1744,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
if (err < 0)
err = loop_add(&lo, MINOR(dev) >> part_shift);
if (err < 0)
- kobj = ERR_PTR(err);
+ kobj = NULL;
else
kobj = get_disk(lo->lo_disk);
mutex_unlock(&loop_index_mutex);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 037288e7874..cf1576d5436 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -623,8 +623,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
if (!nbd->sock)
return -EINVAL;
+ nbd->disconnect = 1;
+
nbd_send_req(nbd, &sreq);
- return 0;
+ return 0;
}
case NBD_CLEAR_SOCK: {
@@ -654,6 +656,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd->sock = SOCKET_I(inode);
if (max_part > 0)
bdev->bd_invalidated = 1;
+ nbd->disconnect = 0; /* we're connected now */
return 0;
} else {
fput(file);
@@ -714,7 +717,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
else
blk_queue_flush(nbd->disk->queue, 0);
- thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
+ thread = kthread_create(nbd_thread, nbd, "%s",
+ nbd->disk->disk_name);
if (IS_ERR(thread)) {
mutex_lock(&nbd->tx_lock);
return PTR_ERR(thread);
@@ -742,6 +746,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
set_capacity(nbd->disk, 0);
if (max_part > 0)
ioctl_by_bdev(bdev, BLKRRPART, 0);
+ if (nbd->disconnect) /* user requested, ignore socket errors */
+ return 0;
return nbd->harderror;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index aff789d6fcc..8c7421af8f1 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1565,11 +1565,12 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
obj_request, obj_request->img_request, obj_request->result,
xferred, length);
/*
- * ENOENT means a hole in the image. We zero-fill the
- * entire length of the request. A short read also implies
- * zero-fill to the end of the request. Either way we
- * update the xferred count to indicate the whole request
- * was satisfied.
+ * ENOENT means a hole in the image. We zero-fill the entire
+ * length of the request. A short read also implies zero-fill
+ * to the end of the request. An error requires the whole
+ * length of the request to be reported finished with an error
+ * to the block layer. In each case we update the xferred
+ * count to indicate the whole request was satisfied.
*/
rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
if (obj_request->result == -ENOENT) {
@@ -1578,14 +1579,13 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
else
zero_pages(obj_request->pages, 0, length);
obj_request->result = 0;
- obj_request->xferred = length;
} else if (xferred < length && !obj_request->result) {
if (obj_request->type == OBJ_REQUEST_BIO)
zero_bio_chain(obj_request->bio_list, xferred);
else
zero_pages(obj_request->pages, xferred, length);
- obj_request->xferred = length;
}
+ obj_request->xferred = length;
obj_request_done_set(obj_request);
}
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index dd5b2fed97e..03dd4aa612d 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -647,10 +647,22 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev;
unsigned long secure;
+ struct phys_req preq;
+
+ xen_blkif_get(blkif);
+ preq.sector_number = req->u.discard.sector_number;
+ preq.nr_sects = req->u.discard.nr_sectors;
+
+ err = xen_vbd_translate(&preq, blkif, WRITE);
+ if (err) {
+ pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
+ preq.sector_number,
+ preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
+ goto fail_response;
+ }
blkif->st_ds_req++;
- xen_blkif_get(blkif);
secure = (blkif->vbd.discard_secure &&
(req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
BLKDEV_DISCARD_SECURE : 0;
@@ -658,7 +670,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
req->u.discard.nr_sectors,
GFP_KERNEL, secure);
-
+fail_response:
if (err == -EOPNOTSUPP) {
pr_debug(DRV_PFX "discard op failed, not supported\n");
status = BLKIF_RSP_EOPNOTSUPP;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d89ef86220f..69b45fc9727 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -75,6 +75,7 @@ struct blk_shadow {
struct blkif_request req;
struct request *request;
struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
static DEFINE_MUTEX(blkfront_mutex);
@@ -98,7 +99,6 @@ struct blkfront_info
enum blkif_state connected;
int ring_ref;
struct blkif_front_ring ring;
- struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int evtchn, irq;
struct request_queue *rq;
struct work_struct work;
@@ -422,11 +422,11 @@ static int blkif_queue_request(struct request *req)
ring_req->u.discard.flag = 0;
} else {
ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
- info->sg);
+ info->shadow[id].sg);
BUG_ON(ring_req->u.rw.nr_segments >
BLKIF_MAX_SEGMENTS_PER_REQUEST);
- for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
+ for_each_sg(info->shadow[id].sg, sg, ring_req->u.rw.nr_segments, i) {
fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
@@ -867,12 +867,12 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
struct blkif_response *bret)
{
int i = 0;
- struct bio_vec *bvec;
- struct req_iterator iter;
- unsigned long flags;
+ struct scatterlist *sg;
char *bvec_data;
void *shared_data;
- unsigned int offset = 0;
+ int nseg;
+
+ nseg = s->req.u.rw.nr_segments;
if (bret->operation == BLKIF_OP_READ) {
/*
@@ -881,19 +881,16 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
* than PAGE_SIZE, we have to keep track of the current offset,
* to be sure we are copying the data from the right shared page.
*/
- rq_for_each_segment(bvec, s->request, iter) {
- BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
- if (bvec->bv_offset < offset)
- i++;
- BUG_ON(i >= s->req.u.rw.nr_segments);
+ for_each_sg(s->sg, sg, nseg, i) {
+ BUG_ON(sg->offset + sg->length > PAGE_SIZE);
shared_data = kmap_atomic(
pfn_to_page(s->grants_used[i]->pfn));
- bvec_data = bvec_kmap_irq(bvec, &flags);
- memcpy(bvec_data, shared_data + bvec->bv_offset,
- bvec->bv_len);
- bvec_kunmap_irq(bvec_data, &flags);
+ bvec_data = kmap_atomic(sg_page(sg));
+ memcpy(bvec_data + sg->offset,
+ shared_data + sg->offset,
+ sg->length);
+ kunmap_atomic(bvec_data);
kunmap_atomic(shared_data);
- offset = bvec->bv_offset + bvec->bv_len;
}
}
/* Add the persistent grant into the list of free grants */
@@ -1022,7 +1019,7 @@ static int setup_blkring(struct xenbus_device *dev,
struct blkfront_info *info)
{
struct blkif_sring *sring;
- int err;
+ int err, i;
info->ring_ref = GRANT_INVALID_REF;
@@ -1034,7 +1031,8 @@ static int setup_blkring(struct xenbus_device *dev,
SHARED_RING_INIT(sring);
FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
- sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ for (i = 0; i < BLK_RING_SIZE; i++)
+ sg_init_table(info->shadow[i].sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
/* Allocate memory for grants */
err = fill_grant_buffer(info, BLK_RING_SIZE *