From af0d9187f66db2711f94dc20d5a06ec9ba5845b3 Mon Sep 17 00:00:00 2001 From: Asias He Date: Tue, 2 Apr 2013 23:31:37 +0800 Subject: tcm_vhost: Use ACCESS_ONCE for vs->vs_tpg[target] access In vhost_scsi_handle_vq: tv_tpg = vs->vs_tpg[target]; if (!tv_tpg) { .... return } tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req, 1) vs->vs_tpg[target] might change after the NULL check and 2) the above line might access tv_tpg from vs->vs_tpg[target]. To prevent 2), use ACCESS_ONCE. Thanks mst for catching this up! Signed-off-by: Asias He Acked-by: Michael S. Tsirkin Signed-off-by: Nicholas Bellinger --- drivers/vhost/tcm_vhost.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/vhost') diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 2968b493465..dd9614eb257 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -661,7 +661,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, /* Extract the tpgt */ target = v_req.lun[1]; - tv_tpg = vs->vs_tpg[target]; + tv_tpg = ACCESS_ONCE(vs->vs_tpg[target]); /* Target does not exist, fail the request */ if (unlikely(!tv_tpg)) { -- cgit v1.2.3 From 4f7f46d32c9875004fae1d57ae3c02cc2e6cd6a3 Mon Sep 17 00:00:00 2001 From: Asias He Date: Wed, 3 Apr 2013 14:17:37 +0800 Subject: tcm_vhost: Use vq->private_data to indicate if the endpoint is setup Currently, vs->vs_endpoint is used indicate if the endpoint is setup or not. It is set or cleared in vhost_scsi_set_endpoint() or vhost_scsi_clear_endpoint() under the vs->dev.mutex lock. However, when we check it in vhost_scsi_handle_vq(), we ignored the lock. Instead of using the vs->vs_endpoint and the vs->dev.mutex lock to indicate the status of the endpoint, we use per virtqueue vq->private_data to indicate it. In this way, we can only take the vq->mutex lock which is per queue and make the concurrent multiqueue process having less lock contention. Further, in the read side of vq->private_data, we can even do not take the lock if it is accessed in the vhost worker thread, because it is protected by "vhost rcu". (nab: Do s/VHOST_FEATURES/~VHOST_SCSI_FEATURES) Signed-off-by: Asias He Acked-by: Michael S. Tsirkin Signed-off-by: Nicholas Bellinger --- drivers/vhost/tcm_vhost.c | 144 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 101 insertions(+), 43 deletions(-) (limited to 'drivers/vhost') diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index dd9614eb257..6cda137bb20 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -74,9 +74,8 @@ enum { struct vhost_scsi { /* Protected by vhost_scsi->dev.mutex */ - struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET]; + struct tcm_vhost_tpg **vs_tpg; char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; - bool vs_endpoint; struct vhost_dev dev; struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; @@ -582,6 +581,7 @@ static void tcm_vhost_submission_work(struct work_struct *work) static void vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) { + struct tcm_vhost_tpg **vs_tpg; struct virtio_scsi_cmd_req v_req; struct tcm_vhost_tpg *tv_tpg; struct tcm_vhost_cmd *tv_cmd; @@ -590,8 +590,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, int head, ret; u8 target; - /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ - if (unlikely(!vs->vs_endpoint)) + /* + * We can handle the vq only after the endpoint is setup by calling the + * VHOST_SCSI_SET_ENDPOINT ioctl. + * + * TODO: Check that we are running from vhost_worker which acts + * as read-side critical section for vhost kind of RCU. + * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h + */ + vs_tpg = rcu_dereference_check(vq->private_data, 1); + if (!vs_tpg) return; mutex_lock(&vq->mutex); @@ -661,7 +669,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, /* Extract the tpgt */ target = v_req.lun[1]; - tv_tpg = ACCESS_ONCE(vs->vs_tpg[target]); + tv_tpg = ACCESS_ONCE(vs_tpg[target]); /* Target does not exist, fail the request */ if (unlikely(!tv_tpg)) { @@ -780,6 +788,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work) vhost_scsi_handle_vq(vs, vq); } +static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) +{ + vhost_poll_flush(&vs->dev.vqs[index].poll); +} + +static void vhost_scsi_flush(struct vhost_scsi *vs) +{ + int i; + + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) + vhost_scsi_flush_vq(vs, i); + vhost_work_flush(&vs->dev, &vs->vs_completion_work); +} + /* * Called from vhost_scsi_ioctl() context to walk the list of available * tcm_vhost_tpg with an active struct tcm_vhost_nexus @@ -790,8 +812,10 @@ static int vhost_scsi_set_endpoint( { struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tv_tpg; + struct tcm_vhost_tpg **vs_tpg; + struct vhost_virtqueue *vq; + int index, ret, i, len; bool match = false; - int index, ret; mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ @@ -803,6 +827,15 @@ static int vhost_scsi_set_endpoint( } } + len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; + vs_tpg = kzalloc(len, GFP_KERNEL); + if (!vs_tpg) { + mutex_unlock(&vs->dev.mutex); + return -ENOMEM; + } + if (vs->vs_tpg) + memcpy(vs_tpg, vs->vs_tpg, len); + mutex_lock(&tcm_vhost_mutex); list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { mutex_lock(&tv_tpg->tv_tpg_mutex); @@ -817,14 +850,15 @@ static int vhost_scsi_set_endpoint( tv_tport = tv_tpg->tport; if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { - if (vs->vs_tpg[tv_tpg->tport_tpgt]) { + if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) { mutex_unlock(&tv_tpg->tv_tpg_mutex); mutex_unlock(&tcm_vhost_mutex); mutex_unlock(&vs->dev.mutex); + kfree(vs_tpg); return -EEXIST; } tv_tpg->tv_tpg_vhost_count++; - vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; + vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; smp_mb__after_atomic_inc(); match = true; } @@ -835,12 +869,26 @@ static int vhost_scsi_set_endpoint( if (match) { memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, sizeof(vs->vs_vhost_wwpn)); - vs->vs_endpoint = true; + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { + vq = &vs->vqs[i]; + /* Flushing the vhost_work acts as synchronize_rcu */ + mutex_lock(&vq->mutex); + rcu_assign_pointer(vq->private_data, vs_tpg); + mutex_unlock(&vq->mutex); + } ret = 0; } else { ret = -EEXIST; } + /* + * Act as synchronize_rcu to make sure access to + * old vs->vs_tpg is finished. + */ + vhost_scsi_flush(vs); + kfree(vs->vs_tpg); + vs->vs_tpg = vs_tpg; + mutex_unlock(&vs->dev.mutex); return ret; } @@ -851,6 +899,8 @@ static int vhost_scsi_clear_endpoint( { struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tv_tpg; + struct vhost_virtqueue *vq; + bool match = false; int index, ret, i; u8 target; @@ -862,9 +912,14 @@ static int vhost_scsi_clear_endpoint( goto err_dev; } } + + if (!vs->vs_tpg) { + mutex_unlock(&vs->dev.mutex); + return 0; + } + for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { target = i; - tv_tpg = vs->vs_tpg[target]; if (!tv_tpg) continue; @@ -886,10 +941,27 @@ static int vhost_scsi_clear_endpoint( } tv_tpg->tv_tpg_vhost_count--; vs->vs_tpg[target] = NULL; - vs->vs_endpoint = false; + match = true; mutex_unlock(&tv_tpg->tv_tpg_mutex); } + if (match) { + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { + vq = &vs->vqs[i]; + /* Flushing the vhost_work acts as synchronize_rcu */ + mutex_lock(&vq->mutex); + rcu_assign_pointer(vq->private_data, NULL); + mutex_unlock(&vq->mutex); + } + } + /* + * Act as synchronize_rcu to make sure access to + * old vs->vs_tpg is finished. + */ + vhost_scsi_flush(vs); + kfree(vs->vs_tpg); + vs->vs_tpg = NULL; mutex_unlock(&vs->dev.mutex); + return 0; err_tpg: @@ -899,6 +971,24 @@ err_dev: return ret; } +static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) +{ + if (features & ~VHOST_SCSI_FEATURES) + return -EOPNOTSUPP; + + mutex_lock(&vs->dev.mutex); + if ((features & (1 << VHOST_F_LOG_ALL)) && + !vhost_log_access_ok(&vs->dev)) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + vs->dev.acked_features = features; + smp_wmb(); + vhost_scsi_flush(vs); + mutex_unlock(&vs->dev.mutex); + return 0; +} + static int vhost_scsi_open(struct inode *inode, struct file *f) { struct vhost_scsi *s; @@ -939,38 +1029,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f) return 0; } -static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) -{ - vhost_poll_flush(&vs->dev.vqs[index].poll); -} - -static void vhost_scsi_flush(struct vhost_scsi *vs) -{ - int i; - - for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) - vhost_scsi_flush_vq(vs, i); - vhost_work_flush(&vs->dev, &vs->vs_completion_work); -} - -static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) -{ - if (features & ~VHOST_SCSI_FEATURES) - return -EOPNOTSUPP; - - mutex_lock(&vs->dev.mutex); - if ((features & (1 << VHOST_F_LOG_ALL)) && - !vhost_log_access_ok(&vs->dev)) { - mutex_unlock(&vs->dev.mutex); - return -EFAULT; - } - vs->dev.acked_features = features; - smp_wmb(); - vhost_scsi_flush(vs); - mutex_unlock(&vs->dev.mutex); - return 0; -} - static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { -- cgit v1.2.3 From dfd5d5692c7ddf27380511b80a3dc590acfc4eee Mon Sep 17 00:00:00 2001 From: Asias He Date: Wed, 3 Apr 2013 14:17:38 +0800 Subject: tcm_vhost: Initialize vq->last_used_idx when set endpoint This patch fixes guest hang when booting seabios and guest. [ 0.576238] scsi0 : Virtio SCSI HBA [ 0.616754] virtio_scsi virtio1: request:id 0 is not a head! vq->last_used_idx is initialized only when /dev/vhost-scsi is opened or closed. vhost_scsi_open -> vhost_dev_init() -> vhost_vq_reset() vhost_scsi_release() -> vhost_dev_cleanup -> vhost_vq_reset() So, when guest talks to tcm_vhost after seabios does, vq->last_used_idx still contains the old valule for seabios. This confuses guest. Fix this by calling vhost_init_used() to init vq->last_used_idx when we set endpoint. Signed-off-by: Asias He Acked-by: Michael S. Tsirkin Signed-off-by: Nicholas Bellinger --- drivers/vhost/tcm_vhost.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/vhost') diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 6cda137bb20..c127731b423 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -874,6 +874,7 @@ static int vhost_scsi_set_endpoint( /* Flushing the vhost_work acts as synchronize_rcu */ mutex_lock(&vq->mutex); rcu_assign_pointer(vq->private_data, vs_tpg); + vhost_init_used(vq); mutex_unlock(&vq->mutex); } ret = 0; -- cgit v1.2.3 From f6da51c3ef07c593a410551af4cad093e2fbe755 Mon Sep 17 00:00:00 2001 From: Asias He Date: Wed, 10 Apr 2013 15:06:13 +0800 Subject: tcm_vhost: Remove double check of response We did the length of response check twice. Signed-off-by: Asias He Acked-by: Michael S. Tsirkin Signed-off-by: Nicholas Bellinger --- drivers/vhost/tcm_vhost.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers/vhost') diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index c127731b423..28c112f2383 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -705,15 +705,6 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, tv_cmd->tvc_vhost = vs; tv_cmd->tvc_vq = vq; - - if (unlikely(vq->iov[out].iov_len != - sizeof(struct virtio_scsi_cmd_resp))) { - vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" - " bytes, out: %d, in: %d\n", - vq->iov[out].iov_len, out, in); - break; - } - tv_cmd->tvc_resp = vq->iov[out].iov_base; /* -- cgit v1.2.3 From 7ea206cf3b06704cf2bfc9c9c395094b24dc44a2 Mon Sep 17 00:00:00 2001 From: Asias He Date: Wed, 10 Apr 2013 15:06:14 +0800 Subject: tcm_vhost: Fix tv_cmd leak in vhost_scsi_handle_vq If we fail to submit the allocated tv_vmd to tcm_vhost_submission_work, we will leak the tv_vmd. Free tv_vmd on fail path. Signed-off-by: Asias He Acked-by: Michael S. Tsirkin Signed-off-by: Nicholas Bellinger --- drivers/vhost/tcm_vhost.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/vhost') diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 28c112f2383..210d59e222f 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -724,7 +724,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(tv_cmd->tvc_cdb), TCM_VHOST_MAX_CDB_SIZE); - break; /* TODO */ + goto err; } tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; @@ -737,7 +737,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, data_direction == DMA_TO_DEVICE); if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); - break; /* TODO */ + goto err; } } @@ -758,6 +758,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, } mutex_unlock(&vq->mutex); + return; + +err: + vhost_scsi_free_cmd(tv_cmd); + mutex_unlock(&vq->mutex); } static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) -- cgit v1.2.3 From 637ab21e284f4e9188acc2746695f7f6452b4a22 Mon Sep 17 00:00:00 2001 From: Asias He Date: Wed, 10 Apr 2013 15:06:15 +0800 Subject: tcm_vhost: Add vhost_scsi_send_bad_target() helper Share the send bad target code with other use cases. Signed-off-by: Asias He Acked-by: Michael S. Tsirkin Signed-off-by: Nicholas Bellinger --- drivers/vhost/tcm_vhost.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) (limited to 'drivers/vhost') diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 210d59e222f..1bb0fb4950d 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -578,6 +578,23 @@ static void tcm_vhost_submission_work(struct work_struct *work) } } +static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, + struct vhost_virtqueue *vq, int head, unsigned out) +{ + struct virtio_scsi_cmd_resp __user *resp; + struct virtio_scsi_cmd_resp rsp; + int ret; + + memset(&rsp, 0, sizeof(rsp)); + rsp.response = VIRTIO_SCSI_S_BAD_TARGET; + resp = vq->iov[out].iov_base; + ret = __copy_to_user(resp, &rsp, sizeof(rsp)); + if (!ret) + vhost_add_used_and_signal(&vs->dev, vq, head, 0); + else + pr_err("Faulted on virtio_scsi_cmd_resp\n"); +} + static void vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) { @@ -673,19 +690,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, /* Target does not exist, fail the request */ if (unlikely(!tv_tpg)) { - struct virtio_scsi_cmd_resp __user *resp; - struct virtio_scsi_cmd_resp rsp; - - memset(&rsp, 0, sizeof(rsp)); - rsp.response = VIRTIO_SCSI_S_BAD_TARGET; - resp = vq->iov[out].iov_base; - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); - if (!ret) - vhost_add_used_and_signal(&vs->dev, - vq, head, 0); - else - pr_err("Faulted on virtio_scsi_cmd_resp\n"); - + vhost_scsi_send_bad_target(vs, vq, head, out); continue; } -- cgit v1.2.3 From 055f648c49d948de9452b0f1758869bbbd0097b8 Mon Sep 17 00:00:00 2001 From: Asias He Date: Wed, 10 Apr 2013 15:06:16 +0800 Subject: tcm_vhost: Send bad target to guest when cmd fails Send bad target to guest in case: 1) we can not allocate the cmd 2) fail to submit the cmd Signed-off-by: Asias He Acked-by: Michael S. Tsirkin Signed-off-by: Nicholas Bellinger --- drivers/vhost/tcm_vhost.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/vhost') diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 1bb0fb4950d..957a0b98a5d 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -703,7 +703,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, if (IS_ERR(tv_cmd)) { vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", PTR_ERR(tv_cmd)); - break; + goto err_cmd; } pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" ": %d\n", tv_cmd, exp_data_len, data_direction); @@ -729,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(tv_cmd->tvc_cdb), TCM_VHOST_MAX_CDB_SIZE); - goto err; + goto err_free; } tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; @@ -742,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, data_direction == DMA_TO_DEVICE); if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); - goto err; + goto err_free; } } @@ -765,8 +765,10 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, mutex_unlock(&vq->mutex); return; -err: +err_free: vhost_scsi_free_cmd(tv_cmd); +err_cmd: + vhost_scsi_send_bad_target(vs, vq, head, out); mutex_unlock(&vq->mutex); } -- cgit v1.2.3