From ed01fee283a067c72b2d6500046080dbc1bb9dae Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Mar 2021 13:28:22 +0100 Subject: nvme-fabrics: only reserve a single tag Fabrics drivers currently reserve two tags on the admin queue. But given that the connect command is only run on a freshly created queue or after all commands have been force aborted we only need to reserve a single tag. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Reviewed-by: Hannes Reinecke Reviewed-by: Daniel Wagner --- drivers/nvme/host/fabrics.h | 7 +++++++ drivers/nvme/host/fc.c | 4 ++-- drivers/nvme/host/rdma.c | 4 ++-- drivers/nvme/host/tcp.c | 4 ++-- drivers/nvme/target/loop.c | 4 ++-- 5 files changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 733010d2eafd..888b108d87a4 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -18,6 +18,13 @@ /* default is -1: the fail fast mechanism is disabled */ #define NVMF_DEF_FAIL_FAST_TMO -1 +/* + * Reserved one command for internal usage. This command is used for sending + * the connect command, as well as for the keep alive command on the admin + * queue once live. + */ +#define NVMF_RESERVED_TAGS 1 + /* * Define a host as seen by the target. We allocate one at boot, but also * allow the override it when creating controllers. This is both to provide diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 73d073748389..6ffa8de2a0d7 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2863,7 +2863,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ctrl->tag_set.ops = &nvme_fc_mq_ops; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; - ctrl->tag_set.reserved_tags = 1; /* fabric connect */ + ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ctrl->tag_set.cmd_size = @@ -3485,7 +3485,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; - ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ + ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->admin_tag_set.cmd_size = struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 53ac4d7442ba..7855103e05d2 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -798,7 +798,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_rdma_admin_mq_ops; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; - set->reserved_tags = 2; /* connect + keep-alive */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->cmd_size = sizeof(struct nvme_rdma_request) + NVME_RDMA_DATA_SGL_SIZE; @@ -811,7 +811,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_rdma_mq_ops; set->queue_depth = nctrl->sqsize + 1; - set->reserved_tags = 1; /* fabric connect */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE; set->cmd_size = sizeof(struct nvme_rdma_request) + diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 69f59d2c5799..c535836e7065 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1575,7 +1575,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_tcp_admin_mq_ops; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; - set->reserved_tags = 2; /* connect + keep-alive */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_BLOCKING; set->cmd_size = sizeof(struct nvme_tcp_request); @@ -1587,7 +1587,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, memset(set, 0, sizeof(*set)); set->ops = &nvme_tcp_mq_ops; set->queue_depth = nctrl->sqsize + 1; - set->reserved_tags = 1; /* fabric connect */ + set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; set->cmd_size = sizeof(struct nvme_tcp_request); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index cb6f86572b24..3e189e753bcf 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -349,7 +349,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops; ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; - ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ + ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); @@ -520,7 +520,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ctrl->tag_set.ops = &nvme_loop_mq_ops; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; - ctrl->tag_set.reserved_tags = 1; /* fabric connect */ + ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + -- cgit v1.2.3 From 06c3c3365b4bae5ef0f0525d3683b73cbae1e69c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Mar 2021 13:46:06 +0100 Subject: nvme: merge nvme_keep_alive into nvme_keep_alive_work Merge nvme_keep_alive into its only caller to prepare for additional changes to this code. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Reviewed-by: Hannes Reinecke Reviewed-by: Daniel Wagner --- drivers/nvme/host/core.c | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index a5653892d773..45d510a12c6b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1226,28 +1226,12 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); } -static int nvme_keep_alive(struct nvme_ctrl *ctrl) -{ - struct request *rq; - - rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, - BLK_MQ_REQ_RESERVED); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - rq->timeout = ctrl->kato * HZ; - rq->end_io_data = ctrl; - - blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io); - - return 0; -} - static void nvme_keep_alive_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), struct nvme_ctrl, ka_work); bool comp_seen = ctrl->comp_seen; + struct request *rq; if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { dev_dbg(ctrl->device, @@ -1257,12 +1241,18 @@ static void nvme_keep_alive_work(struct work_struct *work) return; } - if (nvme_keep_alive(ctrl)) { + rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, + BLK_MQ_REQ_RESERVED); + if (IS_ERR(rq)) { /* allocation failure, reset the controller */ dev_err(ctrl->device, "keep-alive failed\n"); nvme_reset_ctrl(ctrl); return; } + + rq->timeout = ctrl->kato * HZ; + rq->end_io_data = ctrl; + blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io); } static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) -- cgit v1.2.3 From 985c5a329dfe5ecb782551cddef48912961b83f1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Mar 2021 13:51:47 +0100 Subject: nvme: allocate the keep alive request using BLK_MQ_REQ_NOWAIT To avoid an error recovery deadlock where the keep alive work is waiting for a request and thus can't be flushed to make progress for tearing down the controller. Also print the error code returned from blk_mq_alloc_request to help debugging any future issues in this code. Based on an earlier patch from Hannes Reinecke . Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Reviewed-by: Hannes Reinecke Reviewed-by: Daniel Wagner --- drivers/nvme/host/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 45d510a12c6b..83d6740f8d4e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1242,10 +1242,10 @@ static void nvme_keep_alive_work(struct work_struct *work) } rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, - BLK_MQ_REQ_RESERVED); + BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); if (IS_ERR(rq)) { /* allocation failure, reset the controller */ - dev_err(ctrl->device, "keep-alive failed\n"); + dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); nvme_reset_ctrl(ctrl); return; } -- cgit v1.2.3 From b94e8cd2e6a94fc7563529ddc82726a7e77e04de Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 15 Mar 2021 10:32:07 +0100 Subject: nvme: fix Write Zeroes limitations We voluntarily limit the Write Zeroes sizes to the MDTS value provided by the hardware, but currently get the units wrong, so fix that. Fixes: 6e02318eaea5 ("nvme: add support for the Write Zeroes command") Signed-off-by: Christoph Hellwig Reviewed-by: Keith Busch Tested-by: Klaus Jensen Reviewed-by: Klaus Jensen Reviewed-by: Chaitanya Kulkarni Reviewed-by: Himanshu Madhani --- drivers/nvme/host/core.c | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 83d6740f8d4e..0896e21642be 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1954,30 +1954,18 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); } -static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) +/* + * Even though NVMe spec explicitly states that MDTS is not applicable to the + * write-zeroes, we are cautious and limit the size to the controllers + * max_hw_sectors value, which is based on the MDTS field and possibly other + * limiting factors. + */ +static void nvme_config_write_zeroes(struct request_queue *q, + struct nvme_ctrl *ctrl) { - u64 max_blocks; - - if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || - (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) - return; - /* - * Even though NVMe spec explicitly states that MDTS is not - * applicable to the write-zeroes:- "The restriction does not apply to - * commands that do not transfer data between the host and the - * controller (e.g., Write Uncorrectable ro Write Zeroes command).". - * In order to be more cautious use controller's max_hw_sectors value - * to configure the maximum sectors for the write-zeroes which is - * configured based on the controller's MDTS field in the - * nvme_init_identify() if available. - */ - if (ns->ctrl->max_hw_sectors == UINT_MAX) - max_blocks = (u64)USHRT_MAX + 1; - else - max_blocks = ns->ctrl->max_hw_sectors + 1; - - blk_queue_max_write_zeroes_sectors(disk->queue, - nvme_lba_to_sect(ns, max_blocks)); + if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && + !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) + blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors); } static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) @@ -2149,7 +2137,7 @@ static void nvme_update_disk_info(struct gendisk *disk, set_capacity_and_notify(disk, capacity); nvme_config_discard(disk, ns); - nvme_config_write_zeroes(disk, ns); + nvme_config_write_zeroes(disk->queue, ns->ctrl); set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || test_bit(NVME_NS_FORCE_RO, &ns->flags)); -- cgit v1.2.3 From fd0823f405090f9f410fc3e3ff7efb52e7b486fa Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 15 Mar 2021 14:08:11 -0700 Subject: nvme-tcp: fix a NULL deref when receiving a 0-length r2t PDU When the controller sends us a 0-length r2t PDU we should not attempt to try to set up a h2cdata PDU but rather conclude that this is a buggy controller (forward progress is not possible) and simply fail it immediately. Fixes: 3f2304f8c6d6 ("nvme-tcp: add NVMe over TCP host driver") Reported-by: Belanger, Martin Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/tcp.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index c535836e7065..5b23e1d52cb3 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -568,6 +568,13 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, req->pdu_len = le32_to_cpu(pdu->r2t_length); req->pdu_sent = 0; + if (unlikely(!req->pdu_len)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len is %u, probably a bug...\n", + rq->tag, req->pdu_len); + return -EPROTO; + } + if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { dev_err(queue->ctrl->ctrl.device, "req %d r2t len %u exceeded data len %u (%zu sent)\n", -- cgit v1.2.3 From bb83337058a7000644cdeffc67361d2473534756 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 15 Mar 2021 13:53:47 -0700 Subject: nvme-tcp: fix misuse of __smp_processor_id with preemption enabled For our pure advisory use-case, we only rely on this call as a hint, so fix the warning complaints of using the smp_processor_id variants with preemption enabled. Fixes: db5ad6b7f8cd ("nvme-tcp: try to send request in queue_rq context") Fixes: ada831772188 ("nvme-tcp: Fix warning with CONFIG_DEBUG_PREEMPT") Signed-off-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Tested-by: Yi Zhang Signed-off-by: Christoph Hellwig --- drivers/nvme/host/tcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 5b23e1d52cb3..35a78682f8de 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -287,7 +287,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, * directly, otherwise queue io_work. Also, only do that if we * are on the same cpu, so we don't introduce contention. */ - if (queue->io_cpu == __smp_processor_id() && + if (queue->io_cpu == raw_smp_processor_id() && sync && empty && mutex_trylock(&queue->send_mutex)) { queue->more_requests = !last; nvme_tcp_send_all(queue); -- cgit v1.2.3 From 72f572428b83d0bc7028e7c4326d1a5f45205e44 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 15 Mar 2021 14:04:26 -0700 Subject: nvme-tcp: fix possible hang when failing to set io queues We only setup io queues for nvme controllers, and it makes absolutely no sense to allow a controller (re)connect without any I/O queues. If we happen to fail setting the queue count for any reason, we should not allow this to be a successful reconnect as I/O has no chance in going through. Instead just fail and schedule another reconnect. Fixes: 3f2304f8c6d6 ("nvme-tcp: add NVMe over TCP host driver") Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/tcp.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 35a78682f8de..a0f00cb8f9f3 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1752,8 +1752,11 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) return ret; ctrl->queue_count = nr_io_queues + 1; - if (ctrl->queue_count < 2) - return 0; + if (ctrl->queue_count < 2) { + dev_err(ctrl->device, + "unable to set any I/O queues\n"); + return -ENOMEM; + } dev_info(ctrl->device, "creating %d I/O queues.\n", nr_io_queues); -- cgit v1.2.3 From c4c6df5fc84659690d4391d1fba155cd94185295 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 15 Mar 2021 14:04:27 -0700 Subject: nvme-rdma: fix possible hang when failing to set io queues We only setup io queues for nvme controllers, and it makes absolutely no sense to allow a controller (re)connect without any I/O queues. If we happen to fail setting the queue count for any reason, we should not allow this to be a successful reconnect as I/O has no chance in going through. Instead just fail and schedule another reconnect. Reported-by: Chao Leng Fixes: 711023071960 ("nvme-rdma: add a NVMe over Fabrics RDMA host driver") Signed-off-by: Sagi Grimberg Reviewed-by: Chao Leng Signed-off-by: Christoph Hellwig --- drivers/nvme/host/rdma.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 7855103e05d2..be905d4fdb47 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -736,8 +736,11 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) return ret; ctrl->ctrl.queue_count = nr_io_queues + 1; - if (ctrl->ctrl.queue_count < 2) - return 0; + if (ctrl->ctrl.queue_count < 2) { + dev_err(ctrl->ctrl.device, + "unable to set any I/O queues\n"); + return -ENOMEM; + } dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); -- cgit v1.2.3 From d218a8a3003e84ab136e69a4e30dd4ec7dab2d22 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 15 Mar 2021 15:34:51 -0700 Subject: nvmet: don't check iosqes,iocqes for discovery controllers From the base spec, Figure 78: "Controller Configuration, these fields are defined as parameters to configure an "I/O Controller (IOC)" and not to configure a "Discovery Controller (DC). ... If the controller does not support I/O queues, then this field shall be read-only with a value of 0h Just perform this check for I/O controllers. Fixes: a07b4970f464 ("nvmet: add a generic NVMe target") Reported-by: Belanger, Martin Signed-off-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/core.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index be6fcdaf51a7..a027433b8be8 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1118,9 +1118,20 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) { lockdep_assert_held(&ctrl->lock); - if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || - nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || - nvmet_cc_mps(ctrl->cc) != 0 || + /* + * Only I/O controllers should verify iosqes,iocqes. + * Strictly speaking, the spec says a discovery controller + * should verify iosqes,iocqes are zeroed, however that + * would break backwards compatibility, so don't enforce it. + */ + if (ctrl->subsys->type != NVME_NQN_DISC && + (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || + nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { + ctrl->csts = NVME_CSTS_CFS; + return; + } + + if (nvmet_cc_mps(ctrl->cc) != 0 || nvmet_cc_ams(ctrl->cc) != 0 || nvmet_cc_css(ctrl->cc) != 0) { ctrl->csts = NVME_CSTS_CFS; -- cgit v1.2.3 From bac04454ef9fada009f0572576837548b190bf94 Mon Sep 17 00:00:00 2001 From: Elad Grupi Date: Tue, 16 Mar 2021 17:44:25 +0200 Subject: nvmet-tcp: fix kmap leak when data digest in use When data digest is enabled we should unmap pdu iovec before handling the data digest pdu. Signed-off-by: Elad Grupi Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/target/tcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 8b0485ada315..d658c6e8263a 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1098,11 +1098,11 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) cmd->rbytes_done += ret; } + nvmet_tcp_unmap_pdu_iovec(cmd); if (queue->data_digest) { nvmet_tcp_prep_recv_ddgst(cmd); return 0; } - nvmet_tcp_unmap_pdu_iovec(cmd); if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && cmd->rbytes_done == cmd->req.transfer_len) { -- cgit v1.2.3