From 4fe7c2962e110dfd58e61888514726aac419562f Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 22 Dec 2016 07:04:59 -0800 Subject: iw_cxgb4: refactor sq/rq drain logic With the addition of the IB/Core drain API, iw_cxgb4 supported drain by watching the CQs when the QP was out of RTS and signalling "drain complete" when the last CQE is polled. This, however, doesn't fully support the drain semantics. Namely, the drain logic is supposed to signal "drain complete" only when the application has _processed_ the last CQE, not just removed them from the CQ. Thus a small timing hole exists that can cause touch after free type bugs in applications using the drain API (nvmf, iSER, for example). So iw_cxgb4 needs a better solution. The iWARP Verbs spec mandates that "_at some point_ after the QP is moved to ERROR", the iWARP driver MUST synchronously fail post_send and post_recv calls. iw_cxgb4 was currently not allowing any posts once the QP is in ERROR. This was in part due to the fact that the HW queues for the QP in ERROR state are disabled at this point, so there wasn't much else to do but fail the post operation synchronously. This restriction is what drove the first drain implementation in iw_cxgb4 that has the above mentioned flaw. This patch changes iw_cxgb4 to allow post_send and post_recv WRs after the QP is moved to ERROR state for kernel mode users, thus still adhering to the Verbs spec for user mode users, but allowing flush WRs for kernel users. Since the HW queues are disabled, we just synthesize a CQE for this post, queue it to the SW CQ, and then call the CQ event handler. This enables proper drain operations for the various storage applications. Signed-off-by: Steve Wise Signed-off-by: Doug Ledford --- drivers/infiniband/hw/cxgb4/cq.c | 21 ++++--- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 6 +- drivers/infiniband/hw/cxgb4/provider.c | 2 - drivers/infiniband/hw/cxgb4/qp.c | 112 ++++++++++++++++++++------------- drivers/infiniband/hw/cxgb4/t4.h | 2 + 5 files changed, 85 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 19c6477af19f..bec82a600d77 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -504,6 +504,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, goto skip_cqe; } + /* + * Special cqe for drain WR completions... + */ + if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) { + *cookie = CQE_DRAIN_COOKIE(hw_cqe); + *cqe = *hw_cqe; + goto skip_cqe; + } + /* * Gotta tweak READ completions: * 1) the cqe doesn't contain the sq_wptr from the wr. @@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) c4iw_invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); break; + case C4IW_DRAIN_OPCODE: + wc->opcode = IB_WC_SEND; + break; default: printk(KERN_ERR MOD "Unexpected opcode %d " "in the CQE received for QPID=0x%0x\n", @@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) } } out: - if (wq) { - if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) { - if (t4_sq_empty(wq)) - complete(&qhp->sq_drained); - if (t4_rq_empty(wq)) - complete(&qhp->rq_drained); - } + if (wq) spin_unlock(&qhp->lock); - } return ret; } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 4788e1a46fde..7b1e465b2a5e 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -480,8 +480,6 @@ struct c4iw_qp { wait_queue_head_t wait; struct timer_list timer; int sq_sig_all; - struct completion rq_drained; - struct completion sq_drained; }; static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) @@ -615,6 +613,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state) return IB_QPS_ERR; } +#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN + static inline u32 c4iw_ib_to_tpt_access(int a) { return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | @@ -997,8 +997,6 @@ extern int c4iw_wr_log; extern int db_fc_threshold; extern int db_coalescing_threshold; extern int use_dsgl; -void c4iw_drain_rq(struct ib_qp *qp); -void c4iw_drain_sq(struct ib_qp *qp); void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); #endif diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 49b51b7e0fd7..c156413515b1 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -607,8 +607,6 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; dev->ibdev.get_port_immutable = c4iw_port_immutable; dev->ibdev.get_dev_fw_str = get_dev_fw_str; - dev->ibdev.drain_sq = c4iw_drain_sq; - dev->ibdev.drain_rq = c4iw_drain_rq; dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); if (!dev->ibdev.iwcm) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index cda5542e13a2..31ab4512f827 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -776,6 +776,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) return 0; } +static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) +{ + struct t4_cqe cqe = {}; + struct c4iw_cq *schp; + unsigned long flag; + struct t4_cq *cq; + + schp = to_c4iw_cq(qhp->ibqp.send_cq); + cq = &schp->cq; + + cqe.u.drain_cookie = wr->wr_id; + cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | + CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_TYPE_V(1) | + CQE_SWCQE_V(1) | + CQE_QPID_V(qhp->wq.sq.qid)); + + spin_lock_irqsave(&schp->lock, flag); + cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); + cq->sw_queue[cq->sw_pidx] = cqe; + t4_swcq_produce(cq); + spin_unlock_irqrestore(&schp->lock, flag); + + spin_lock_irqsave(&schp->comp_handler_lock, flag); + (*schp->ibcq.comp_handler)(&schp->ibcq, + schp->ibcq.cq_context); + spin_unlock_irqrestore(&schp->comp_handler_lock, flag); +} + +static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) +{ + struct t4_cqe cqe = {}; + struct c4iw_cq *rchp; + unsigned long flag; + struct t4_cq *cq; + + rchp = to_c4iw_cq(qhp->ibqp.recv_cq); + cq = &rchp->cq; + + cqe.u.drain_cookie = wr->wr_id; + cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | + CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_TYPE_V(0) | + CQE_SWCQE_V(1) | + CQE_QPID_V(qhp->wq.sq.qid)); + + spin_lock_irqsave(&rchp->lock, flag); + cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); + cq->sw_queue[cq->sw_pidx] = cqe; + t4_swcq_produce(cq); + spin_unlock_irqrestore(&rchp->lock, flag); + + spin_lock_irqsave(&rchp->comp_handler_lock, flag); + (*rchp->ibcq.comp_handler)(&rchp->ibcq, + rchp->ibcq.cq_context); + spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); +} + int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { @@ -794,8 +852,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag); - *bad_wr = wr; - return -EINVAL; + complete_sq_drain_wr(qhp, wr); + return err; } num_wrs = t4_sq_avail(&qhp->wq); if (num_wrs == 0) { @@ -937,8 +995,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag); - *bad_wr = wr; - return -EINVAL; + complete_rq_drain_wr(qhp, wr); + return err; } num_wrs = t4_rq_avail(&qhp->wq); if (num_wrs == 0) { @@ -1550,7 +1608,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, } break; case C4IW_QP_STATE_CLOSING: - if (!internal) { + + /* + * Allow kernel users to move to ERROR for qp draining. + */ + if (!internal && (qhp->ibqp.uobject || attrs->next_state != + C4IW_QP_STATE_ERROR)) { ret = -EINVAL; goto out; } @@ -1763,8 +1826,6 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, qhp->attr.max_ird = 0; qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; spin_lock_init(&qhp->lock); - init_completion(&qhp->sq_drained); - init_completion(&qhp->rq_drained); mutex_init(&qhp->mutex); init_waitqueue_head(&qhp->wait); kref_init(&qhp->kref); @@ -1958,40 +2019,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; return 0; } - -static void move_qp_to_err(struct c4iw_qp *qp) -{ - struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR }; - - (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); -} - -void c4iw_drain_sq(struct ib_qp *ibqp) -{ - struct c4iw_qp *qp = to_c4iw_qp(ibqp); - unsigned long flag; - bool need_to_wait; - - move_qp_to_err(qp); - spin_lock_irqsave(&qp->lock, flag); - need_to_wait = !t4_sq_empty(&qp->wq); - spin_unlock_irqrestore(&qp->lock, flag); - - if (need_to_wait) - wait_for_completion(&qp->sq_drained); -} - -void c4iw_drain_rq(struct ib_qp *ibqp) -{ - struct c4iw_qp *qp = to_c4iw_qp(ibqp); - unsigned long flag; - bool need_to_wait; - - move_qp_to_err(qp); - spin_lock_irqsave(&qp->lock, flag); - need_to_wait = !t4_rq_empty(&qp->wq); - spin_unlock_irqrestore(&qp->lock, flag); - - if (need_to_wait) - wait_for_completion(&qp->rq_drained); -} diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 862381aa83c8..640d22148a3e 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -179,6 +179,7 @@ struct t4_cqe { __be32 wrid_hi; __be32 wrid_low; } gen; + u64 drain_cookie; } u; __be64 reserved; __be64 bits_type_ts; @@ -238,6 +239,7 @@ struct t4_cqe { /* generic accessor macros */ #define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) #define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) +#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie) /* macros for flit 3 of the cqe */ #define CQE_GENBIT_S 63 -- cgit v1.2.3 From c12a67fec8d99bb554e8d4e99120d418f1a39c87 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 22 Dec 2016 07:40:36 -0800 Subject: iw_cxgb4: free EQ queue memory on last deref Commit ad61a4c7a9b7 ("iw_cxgb4: don't block in destroy_qp awaiting the last deref") introduced a bug where the RDMA QP EQ queue memory (and QIDs) are possibly freed before the underlying connection has been fully shutdown. The result being a possible DMA read issued by HW after the queue memory has been unmapped and freed. This results in possible WR corruption in the worst case, system bus errors if an IOMMU is in use, and SGE "bad WR" errors reported in the very least. The fix is to defer unmap/free of queue memory and QID resources until the QP struct has been fully dereferenced. To do this, the c4iw_ucontext must also be kept around until the last QP that references it is fully freed. In addition, since the last QP deref can happen in an IRQ disabled context, we need a new workqueue thread to do the final unmap/free of the EQ queue memory. Fixes: ad61a4c7a9b7 ("iw_cxgb4: don't block in destroy_qp awaiting the last deref") Cc: stable@vger.kernel.org Signed-off-by: Steve Wise Signed-off-by: Doug Ledford --- drivers/infiniband/hw/cxgb4/device.c | 9 +++++++++ drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 18 +++++++++++++++++ drivers/infiniband/hw/cxgb4/provider.c | 20 +++++++++++++++---- drivers/infiniband/hw/cxgb4/qp.c | 35 +++++++++++++++++++++++++--------- 4 files changed, 69 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 516b0ae6dc3f..40c0e7b9fc6e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) } } + rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); + if (!rdev->free_workq) { + err = -ENOMEM; + goto err_free_status_page; + } + rdev->status_page->db_off = 0; return 0; +err_free_status_page: + free_page((unsigned long)rdev->status_page); destroy_ocqp_pool: c4iw_ocqp_pool_destroy(rdev); destroy_rqtpool: @@ -862,6 +870,7 @@ destroy_resource: static void c4iw_rdev_close(struct c4iw_rdev *rdev) { + destroy_workqueue(rdev->free_workq); kfree(rdev->wr_log); free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7b1e465b2a5e..8cd4d054a87e 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -45,6 +45,7 @@ #include #include #include +#include #include @@ -107,6 +108,7 @@ struct c4iw_dev_ucontext { struct list_head qpids; struct list_head cqids; struct mutex lock; + struct kref kref; }; enum c4iw_rdev_flags { @@ -183,6 +185,7 @@ struct c4iw_rdev { atomic_t wr_log_idx; struct wr_log_entry *wr_log; int wr_log_size; + struct workqueue_struct *free_workq; }; static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) @@ -480,6 +483,8 @@ struct c4iw_qp { wait_queue_head_t wait; struct timer_list timer; int sq_sig_all; + struct work_struct free_work; + struct c4iw_ucontext *ucontext; }; static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) @@ -493,6 +498,7 @@ struct c4iw_ucontext { u32 key; spinlock_t mmap_lock; struct list_head mmaps; + struct kref kref; }; static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) @@ -500,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) return container_of(c, struct c4iw_ucontext, ibucontext); } +void _c4iw_free_ucontext(struct kref *kref); + +static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext) +{ + kref_put(&ucontext->kref, _c4iw_free_ucontext); +} + +static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext) +{ + kref_get(&ucontext->kref); +} + struct c4iw_mm_entry { struct list_head entry; u64 addr; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index c156413515b1..fa64f5d93b11 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags, return -ENOSYS; } -static int c4iw_dealloc_ucontext(struct ib_ucontext *context) +void _c4iw_free_ucontext(struct kref *kref) { - struct c4iw_dev *rhp = to_c4iw_dev(context->device); - struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); + struct c4iw_ucontext *ucontext; + struct c4iw_dev *rhp; struct c4iw_mm_entry *mm, *tmp; - PDBG("%s context %p\n", __func__, context); + ucontext = container_of(kref, struct c4iw_ucontext, kref); + rhp = to_c4iw_dev(ucontext->ibucontext.device); + + PDBG("%s ucontext %p\n", __func__, ucontext); list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) kfree(mm); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); kfree(ucontext); +} + +static int c4iw_dealloc_ucontext(struct ib_ucontext *context) +{ + struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); + + PDBG("%s context %p\n", __func__, context); + c4iw_put_ucontext(ucontext); return 0; } @@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); INIT_LIST_HEAD(&context->mmaps); spin_lock_init(&context->mmap_lock); + kref_init(&context->kref); if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { if (!warned++) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 31ab4512f827..04c1c382dedb 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) return 0; } -static void _free_qp(struct kref *kref) +static void free_qp_work(struct work_struct *work) +{ + struct c4iw_ucontext *ucontext; + struct c4iw_qp *qhp; + struct c4iw_dev *rhp; + + qhp = container_of(work, struct c4iw_qp, free_work); + ucontext = qhp->ucontext; + rhp = qhp->rhp; + + PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext); + destroy_qp(&rhp->rdev, &qhp->wq, + ucontext ? &ucontext->uctx : &rhp->rdev.uctx); + + if (ucontext) + c4iw_put_ucontext(ucontext); + kfree(qhp); +} + +static void queue_qp_free(struct kref *kref) { struct c4iw_qp *qhp; qhp = container_of(kref, struct c4iw_qp, kref); PDBG("%s qhp %p\n", __func__, qhp); - kfree(qhp); + queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work); } void c4iw_qp_add_ref(struct ib_qp *qp) @@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp) void c4iw_qp_rem_ref(struct ib_qp *qp) { PDBG("%s ib_qp %p\n", __func__, qp); - kref_put(&to_c4iw_qp(qp)->kref, _free_qp); + kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free); } static void add_to_fc_list(struct list_head *head, struct list_head *entry) @@ -1706,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) struct c4iw_dev *rhp; struct c4iw_qp *qhp; struct c4iw_qp_attributes attrs; - struct c4iw_ucontext *ucontext; qhp = to_c4iw_qp(ib_qp); rhp = qhp->rhp; @@ -1726,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) spin_unlock_irq(&rhp->lock); free_ird(rhp, qhp->attr.max_ird); - ucontext = ib_qp->uobject ? - to_c4iw_ucontext(ib_qp->uobject->context) : NULL; - destroy_qp(&rhp->rdev, &qhp->wq, - ucontext ? &ucontext->uctx : &rhp->rdev.uctx); - c4iw_qp_rem_ref(ib_qp); PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); @@ -1829,6 +1842,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, mutex_init(&qhp->mutex); init_waitqueue_head(&qhp->wait); kref_init(&qhp->kref); + INIT_WORK(&qhp->free_work, free_qp_work); ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); if (ret) @@ -1915,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ma_sync_key_mm->len = PAGE_SIZE; insert_mmap(ucontext, ma_sync_key_mm); } + + c4iw_get_ucontext(ucontext); + qhp->ucontext = ucontext; } qhp->ibqp.qp_num = qhp->wq.sq.qid; init_timer(&(qhp->timer)); -- cgit v1.2.3 From 3bcf96e0183f5c863657cb6ae9adad307a0f6071 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 22 Dec 2016 07:40:37 -0800 Subject: iw_cxgb4: do not send RX_DATA_ACK CPLs after close/abort Function rx_data(), which handles ingress CPL_RX_DATA messages, was always sending an RX_DATA_ACK with the goal of updating the credits. However, if the RDMA connection is moved out of FPDU mode abruptly, then it is possible for iw_cxgb4 to process queued RX_DATA CPLs after HW has aborted the connection. These CPLs should not trigger RX_DATA_ACKS. If they do, HW can see a READ after DELETE of the DB_LE hash entry for the tid and post a LE_DB HashTblMemCrcError. Signed-off-by: Steve Wise Signed-off-by: Doug Ledford --- drivers/infiniband/hw/cxgb4/cm.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index f1510cc76d2d..9398143d7c5e 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) skb_trim(skb, dlen); mutex_lock(&ep->com.mutex); - /* update RX credits */ - update_rx_credits(ep, dlen); - switch (ep->com.state) { case MPA_REQ_SENT: + update_rx_credits(ep, dlen); ep->rcv_seq += dlen; disconnect = process_mpa_reply(ep, skb); break; case MPA_REQ_WAIT: + update_rx_credits(ep, dlen); ep->rcv_seq += dlen; disconnect = process_mpa_request(ep, skb); break; case FPDU_MODE: { struct c4iw_qp_attributes attrs; + + update_rx_credits(ep, dlen); BUG_ON(!ep->com.qp); if (status) pr_err("%s Unexpected streaming data." \ -- cgit v1.2.3 From ad8e66b4a80182174f73487ed25fd2140cf43361 Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Wed, 28 Dec 2016 12:48:28 +0200 Subject: IB/srp: fix mr allocation when the device supports sg gaps If the device support arbitrary sg list mapping (device cap IB_DEVICE_SG_GAPS_REG set) we allocate the memory regions with IB_MR_TYPE_SG_GAPS. Fixes: 509c5f33f4f6 ("IB/srp: Prevent mapping failures") Cc: # 4.7+ Signed-off-by: Israel Rukshin Signed-off-by: Max Gurtovoy Reviewed-by: Leon Romanovsky Reviewed-by: Mark Bloch Reviewed-by: Yuval Shaia Reviewed-by: Bart Van Assche Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/srp/ib_srp.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 8ddc07123193..0f67cf909462 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, struct srp_fr_desc *d; struct ib_mr *mr; int i, ret = -EINVAL; + enum ib_mr_type mr_type; if (pool_size <= 0) goto err; @@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->free_list); + if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) + mr_type = IB_MR_TYPE_SG_GAPS; + else + mr_type = IB_MR_TYPE_MEM_REG; + for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { - mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, - max_page_list_len); + mr = ib_alloc_mr(pd, mr_type, max_page_list_len); if (IS_ERR(mr)) { ret = PTR_ERR(mr); if (ret == -ENOMEM) -- cgit v1.2.3 From 0a475ef4226e305bdcffe12b401ca1eab06c4913 Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Wed, 4 Jan 2017 15:59:37 +0200 Subject: IB/srp: fix invalid indirect_sg_entries parameter value After setting indirect_sg_entries module_param to huge value (e.g 500,000), srp_alloc_req_data() fails to allocate indirect descriptors for the request ring (kmalloc fails). This commit enforces the maximum value of indirect_sg_entries to be SG_MAX_SEGMENTS as signified in module param description. Fixes: 65e8617fba17 (scsi: rename SCSI_MAX_{SG, SG_CHAIN}_SEGMENTS) Fixes: c07d424d6118 (IB/srp: add support for indirect tables that don't fit in SRP_CMD) Cc: stable@vger.kernel.org # 4.7+ Signed-off-by: Israel Rukshin Signed-off-by: Max Gurtovoy Reviewed-by: Laurence Oberman Reviewed-by: Bart Van Assche -- Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/srp/ib_srp.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 0f67cf909462..79bf48477ddb 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -3699,6 +3699,12 @@ static int __init srp_init_module(void) indirect_sg_entries = cmd_sg_entries; } + if (indirect_sg_entries > SG_MAX_SEGMENTS) { + pr_warn("Clamping indirect_sg_entries to %u\n", + SG_MAX_SEGMENTS); + indirect_sg_entries = SG_MAX_SEGMENTS; + } + srp_remove_wq = create_workqueue("srp_remove"); if (!srp_remove_wq) { ret = -ENOMEM; -- cgit v1.2.3 From 1e5db6c31ade4150c2e2b1a21e39f776c38fea39 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Wed, 18 Jan 2017 00:40:39 +0200 Subject: IB/iser: Fix sg_tablesize calculation For devices that can register page list that is bigger than USHRT_MAX, we actually take the wrong value for sg_tablesize. E.g: for CX4 max_fast_reg_page_list_len is 65536 (bigger than USHRT_MAX) so we set sg_tablesize to 0 by mistake. Therefore, each IO that is bigger than 4k splitted to "< 4k" chunks that cause performance degredation. Remove wrong sg_tablesize assignment, and use the value that was set during address resolution handler with the needed casting. Cc: # v4.5+ Signed-off-by: Max Gurtovoy Reviewed-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iscsi_iser.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 9104e6b8cac9..1c911876556e 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, SHOST_DIX_GUARD_CRC); } - /* - * Limit the sg_tablesize and max_sectors based on the device - * max fastreg page list length. - */ - shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize, - ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len); - if (iscsi_host_add(shost, ib_conn->device->ib_device->dma_device)) { mutex_unlock(&iser_conn->state_mutex); -- cgit v1.2.3 From 83236f0157feec0f01bf688a1474b889bdcc5ad0 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Wed, 18 Jan 2017 00:40:40 +0200 Subject: IB/iser: remove unused variable from iser_conn struct max_sectors calculation was fixed in commit: 9c674815d346 ("IB/iser: Fix max_sectors calculation"). Thus, iser_conn variable scsi_max_sectors is not needed anymore. Signed-off-by: Max Gurtovoy Reviewed-by: Sagi Grimberg Tested-by: Raju Rangoju Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iscsi_iser.c | 4 ++++ drivers/infiniband/ulp/iser/iscsi_iser.h | 2 -- drivers/infiniband/ulp/iser/iser_verbs.c | 13 +------------ 3 files changed, 5 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 1c911876556e..e71af717e71b 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -672,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; shost->max_sectors = min(iser_max_sectors, max_fr_sectors); + iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", + iser_conn, shost->sg_tablesize, + shost->max_sectors); + if (cmds_max > max_cmds) { iser_info("cmds_max changed from %u to %u\n", cmds_max, max_cmds); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 0be6a7c5ddb5..9d0b22ad58c1 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -496,7 +496,6 @@ struct ib_conn { * @rx_descs: rx buffers array (cyclic buffer) * @num_rx_descs: number of rx descriptors * @scsi_sg_tablesize: scsi host sg_tablesize - * @scsi_max_sectors: scsi host max sectors */ struct iser_conn { struct ib_conn ib_conn; @@ -519,7 +518,6 @@ struct iser_conn { struct iser_rx_desc *rx_descs; u32 num_rx_descs; unsigned short scsi_sg_tablesize; - unsigned int scsi_max_sectors; bool snd_w_inv; }; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 8ae7a3beddb7..6a9d1cb548ee 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn, sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, device->ib_device->attrs.max_fast_reg_page_list_len); - if (sg_tablesize > sup_sg_tablesize) { - sg_tablesize = sup_sg_tablesize; - iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512; - } else { - iser_conn->scsi_max_sectors = max_sectors; - } - - iser_conn->scsi_sg_tablesize = sg_tablesize; - - iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", - iser_conn, iser_conn->scsi_sg_tablesize, - iser_conn->scsi_max_sectors); + iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); } /** -- cgit v1.2.3 From 7d211c81e97ef8505610ef82e14e302ab415bad1 Mon Sep 17 00:00:00 2001 From: Adit Ranadive Date: Thu, 19 Jan 2017 13:20:39 -0800 Subject: IB/vmw_pvrdma: Don't leak info from alloc_ucontext Clear out the user response struct correctly. Fixes: 29c8d9eba550 ("IB: Add vmw_pvrdma driver") Reported-by: Dan Carpenter Signed-off-by: Adit Ranadive Signed-off-by: Doug Ledford --- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 54891370d18a..c2aa52638dcb 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_uc *cmd = &req.create_uc; struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; - struct pvrdma_alloc_ucontext_resp uresp; + struct pvrdma_alloc_ucontext_resp uresp = {0}; int ret; void *ptr; -- cgit v1.2.3 From ff89b070b7c98eb6782361310ca7a15186f15b2c Mon Sep 17 00:00:00 2001 From: Adit Ranadive Date: Thu, 19 Jan 2017 13:20:40 -0800 Subject: IB/vmw_pvrdma: Fix incorrect cleanup on pvrdma_pci_probe error path If the interrupt allocation failed we should start freeing the CQ rings rather than unregistering the netdev notifier. Fixes: 29c8d9eba550 ("IB: Add vmw_pvrdma driver") Signed-off-by: Adit Ranadive Reviewed-by: Yuval Shaia Signed-off-by: Doug Ledford --- drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 231a1ce1f4be..bd8fbd3d2032 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "failed to allocate interrupts\n"); ret = -ENOMEM; - goto err_netdevice; + goto err_free_cq_ring; } /* Allocate UAR table. */ @@ -1092,8 +1092,6 @@ err_free_uar_table: err_free_intrs: pvrdma_free_irq(dev); pvrdma_disable_msi_all(dev); -err_netdevice: - unregister_netdevice_notifier(&dev->nb_netdev); err_free_cq_ring: pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); err_free_async_ring: -- cgit v1.2.3 From d3f4aadd614c4627244452ad64eaf351179f2c31 Mon Sep 17 00:00:00 2001 From: "Amrani, Ram" Date: Mon, 26 Dec 2016 08:40:57 +0200 Subject: RDMA/core: Add the function ib_mtu_int_to_enum As the functionality to convert the MTU from a number to enum_ib_mtu is ubiquitous, define a dedicated function and remove the duplicated code. Signed-off-by: Ram Amrani Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/cxgb3/iwch_provider.c | 11 +---------- drivers/infiniband/hw/cxgb4/provider.c | 11 +---------- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 11 +---------- drivers/infiniband/hw/nes/nes_verbs.c | 12 +----------- 4 files changed, 4 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 9d5fe1853da4..6262dc035f3c 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev, memset(props, 0, sizeof(struct ib_port_attr)); props->max_mtu = IB_MTU_4096; - if (netdev->mtu >= 4096) - props->active_mtu = IB_MTU_4096; - else if (netdev->mtu >= 2048) - props->active_mtu = IB_MTU_2048; - else if (netdev->mtu >= 1024) - props->active_mtu = IB_MTU_1024; - else if (netdev->mtu >= 512) - props->active_mtu = IB_MTU_512; - else - props->active_mtu = IB_MTU_256; + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); if (!netif_carrier_ok(netdev)) props->state = IB_PORT_DOWN; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index fa64f5d93b11..3345e1c312f7 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -373,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, memset(props, 0, sizeof(struct ib_port_attr)); props->max_mtu = IB_MTU_4096; - if (netdev->mtu >= 4096) - props->active_mtu = IB_MTU_4096; - else if (netdev->mtu >= 2048) - props->active_mtu = IB_MTU_2048; - else if (netdev->mtu >= 1024) - props->active_mtu = IB_MTU_1024; - else if (netdev->mtu >= 512) - props->active_mtu = IB_MTU_512; - else - props->active_mtu = IB_MTU_256; + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); if (!netif_carrier_ok(netdev)) props->state = IB_PORT_DOWN; diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 29e97df9e1a7..4c000d60d5c6 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev, memset(props, 0, sizeof(*props)); props->max_mtu = IB_MTU_4096; - if (netdev->mtu >= 4096) - props->active_mtu = IB_MTU_4096; - else if (netdev->mtu >= 2048) - props->active_mtu = IB_MTU_2048; - else if (netdev->mtu >= 1024) - props->active_mtu = IB_MTU_1024; - else if (netdev->mtu >= 512) - props->active_mtu = IB_MTU_512; - else - props->active_mtu = IB_MTU_256; + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); props->lid = 1; if (netif_carrier_ok(iwdev->netdev)) diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index aff9fb14768b..5a31f3c6a421 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr memset(props, 0, sizeof(*props)); props->max_mtu = IB_MTU_4096; - - if (netdev->mtu >= 4096) - props->active_mtu = IB_MTU_4096; - else if (netdev->mtu >= 2048) - props->active_mtu = IB_MTU_2048; - else if (netdev->mtu >= 1024) - props->active_mtu = IB_MTU_1024; - else if (netdev->mtu >= 512) - props->active_mtu = IB_MTU_512; - else - props->active_mtu = IB_MTU_256; + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); props->lid = 1; props->lmc = 0; -- cgit v1.2.3 From 097b615965fb1af714fbc2311f68839b1086ebcb Mon Sep 17 00:00:00 2001 From: "Amrani, Ram" Date: Mon, 26 Dec 2016 08:40:58 +0200 Subject: RDMA/qedr: Fix MTU returned from QP query MTU value returned from QP query should include overhead. Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 57c8de208077..84dcd73bd57b 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2016,7 +2016,7 @@ int qedr_query_qp(struct ib_qp *ibqp, qp_attr->qp_state = qedr_get_ibqp_state(params.state); qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); - qp_attr->path_mtu = iboe_get_mtu(params.mtu); + qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu); qp_attr->path_mig_state = IB_MIG_MIGRATED; qp_attr->rq_psn = params.rq_psn; qp_attr->sq_psn = params.sq_psn; -- cgit v1.2.3 From 865cea40b69741c3da2574176876463233b2b67c Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:50:34 +0200 Subject: RDMA/qedr: Return success when not changing QP state If the user is requesting us to change the QP state to the same state that it is already in, return success instead of failure. Signed-off-by: Ram Amrani Signed-off-by: Michal Kalderon Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 84dcd73bd57b..27d90a82d731 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1657,7 +1657,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev, int status = 0; if (new_state == qp->state) - return 1; + return 0; switch (qp->state) { case QED_ROCE_QP_STATE_RESET: -- cgit v1.2.3 From 59e8970b3798e4cbe575ed9cf4d53098760a2a86 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:50:35 +0200 Subject: RDMA/qedr: Return max inline data in QP query result Return the maximum supported amount of inline data, not the qp's current configured inline data size, when filling out the results of a query qp call. Signed-off-by: Ram Amrani Signed-off-by: Michal Kalderon Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 27d90a82d731..bb907b14c988 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2028,7 +2028,7 @@ int qedr_query_qp(struct ib_qp *ibqp, qp_attr->cap.max_recv_wr = qp->rq.max_wr; qp_attr->cap.max_send_sge = qp->sq.max_sges; qp_attr->cap.max_recv_sge = qp->rq.max_sges; - qp_attr->cap.max_inline_data = qp->max_inline_data; + qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE; qp_init_attr->cap = qp_attr->cap; memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0], -- cgit v1.2.3 From 91bff997db2ec04f9ba761a55c21642f9803b06c Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:50:36 +0200 Subject: RDMA/qedr: Remove CQ spinlock from CM completion handlers There is only a single event queue that triggers the completion events for the RDMA CM and it is being processed serially. This means that inherently there can no parallelism of CQ completion handler callbacks, hence the lock is redundant. Signed-off-by: Ram Amrani Signed-off-by: Michal Kalderon Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/qedr.h | 3 --- drivers/infiniband/hw/qedr/qedr_cm.c | 10 ++-------- 2 files changed, 2 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 620badd7d4fb..94319abb0df2 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -251,9 +251,6 @@ struct qedr_cq { u16 icid; - /* Lock to protect completion handler */ - spinlock_t comp_handler_lock; - /* Lock to protect multiplem CQ's */ spinlock_t cq_lock; u8 arm_flags; diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 63890ebb72bd..00361f310d15 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) qedr_inc_sw_gsi_cons(&qp->sq); spin_unlock_irqrestore(&qp->q_lock, flags); - if (cq->ibcq.comp_handler) { - spin_lock_irqsave(&cq->comp_handler_lock, flags); + if (cq->ibcq.comp_handler) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); - spin_unlock_irqrestore(&cq->comp_handler_lock, flags); - } } void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, @@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, spin_unlock_irqrestore(&qp->q_lock, flags); - if (cq->ibcq.comp_handler) { - spin_lock_irqsave(&cq->comp_handler_lock, flags); + if (cq->ibcq.comp_handler) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); - spin_unlock_irqrestore(&cq->comp_handler_lock, flags); - } } static void qedr_destroy_gsi_cq(struct qedr_dev *dev, -- cgit v1.2.3 From c78c31496111f497b4a03f955c100091185da8b6 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:50:37 +0200 Subject: RDMA/qedr: Don't spam dmesg if QP is in error state It is normal to flush CQEs if the QP is in error state. Hence there's no use in printing a message per CQE to dmesg. Signed-off-by: Ram Amrani Signed-off-by: Michal Kalderon Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/verbs.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index bb907b14c988..0688dce54f92 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3234,9 +3234,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev, IB_WC_SUCCESS, 0); break; case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: - DP_ERR(dev, - "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", - cq->icid, qp->icid); + if (qp->state != QED_ROCE_QP_STATE_ERR) + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, IB_WC_WR_FLUSH_ERR, 1); break; -- cgit v1.2.3 From 933e6dcaa0f65eb2f624ad760274020874a1f35e Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:50:38 +0200 Subject: RDMA/qedr: Don't reset QP when queues aren't flushed Fail QP state transition from error to reset if SQ/RQ are not empty and still in the process of flushing out the queued work entries. Signed-off-by: Ram Amrani Signed-off-by: Michal Kalderon Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/verbs.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 0688dce54f92..3dae9641f821 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1733,6 +1733,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev, /* ERR->XXX */ switch (new_state) { case QED_ROCE_QP_STATE_RESET: + if ((qp->rq.prod != qp->rq.cons) || + (qp->sq.prod != qp->sq.cons)) { + DP_NOTICE(dev, + "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n", + qp->rq.prod, qp->rq.cons, qp->sq.prod, + qp->sq.cons); + status = -EINVAL; + } break; default: status = -EINVAL; -- cgit v1.2.3 From 27a4b1a6d6fcf09314359bacefa1e106927ae21b Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:51:39 +0200 Subject: RDMA/qedr: Mark three functions as static mark qedr_get_state_from_ibqp(), __qedr_alloc_mr() and __qedr_post_send() as static since they are only used in the same file. Signed-off-by: Ram Amrani Signed-off-by: Ariel Elior Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/verbs.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 3dae9641f821..1b9d55965479 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1600,7 +1600,7 @@ err0: return ERR_PTR(-EFAULT); } -enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) +static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) { switch (qp_state) { case QED_ROCE_QP_STATE_RESET: @@ -1621,7 +1621,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) return IB_QPS_ERR; } -enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) +static enum qed_roce_qp_state qedr_get_state_from_ibqp( + enum ib_qp_state qp_state) { switch (qp_state) { case IB_QPS_RESET: @@ -2310,7 +2311,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr) return rc; } -struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) +static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, + int max_page_list_len) { struct qedr_pd *pd = get_qedr_pd(ibpd); struct qedr_dev *dev = get_qedr_dev(ibpd->device); @@ -2712,7 +2714,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp, return 0; } -enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) +static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) { switch (opcode) { case IB_WR_RDMA_WRITE: @@ -2737,7 +2739,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) } } -inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) +static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) { int wq_is_full, err_wr, pbl_is_full; struct qedr_dev *dev = qp->dev; @@ -2774,7 +2776,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) return true; } -int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, +static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct qedr_dev *dev = get_qedr_dev(ibqp->device); -- cgit v1.2.3 From 1a59075197976611bacaa383a6673f9e57e9e98b Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:51:40 +0200 Subject: RDMA/qedr: Fix formatting Remove standalone ';'. List function's parameters in a single line. Signed-off-by: Ram Amrani Signed-off-by: Ariel Elior Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/main.c | 3 +-- drivers/infiniband/hw/qedr/verbs.c | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 7b74d09a8217..eac0bfc1fc99 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev) return 0; } -void qedr_unaffiliated_event(void *context, - u8 event_code) +void qedr_unaffiliated_event(void *context, u8 event_code) { pr_err("unaffiliated event not implemented yet\n"); } diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 1b9d55965479..c4f4c2131a45 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1874,7 +1874,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", qp_params.remote_mac_addr); -; qp_params.mtu = qp->mtu; qp_params.lb_indication = false; -- cgit v1.2.3 From af2b14b8b8ae21b0047a52c767ac8b44f435a280 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:51:41 +0200 Subject: RDMA/qedr: Fix RDMA CM loopback The loopback logic in RDMA CM packets compares Ethernet addresses and was accidently inverse. Signed-off-by: Ram Amrani Signed-off-by: Ariel Elior Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/qedr_cm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 00361f310d15..a9a8d8745d2e 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -398,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev, } if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) - packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; - else packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; + else + packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; packet->roce_mode = roce_mode; memcpy(packet->header.vaddr, ud_header_buffer, header_size); -- cgit v1.2.3 From 9c1e0228ab35e52d30abf4b5629c28350833fbcb Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:51:42 +0200 Subject: RDMA/qedr: Fix and simplify memory leak in PD alloc Free the PD if no internal resources were available. Move userspace code under the relevant 'if'. Signed-off-by: Ram Amrani Signed-off-by: Ariel Elior Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/verbs.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index c4f4c2131a45..c7d6c9a783bd 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct qedr_dev *dev = get_qedr_dev(ibdev); - struct qedr_ucontext *uctx = NULL; - struct qedr_alloc_pd_uresp uresp; struct qedr_pd *pd; u16 pd_id; int rc; @@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, if (!pd) return ERR_PTR(-ENOMEM); - dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); + rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); + if (rc) + goto err; - uresp.pd_id = pd_id; pd->pd_id = pd_id; if (udata && context) { + struct qedr_alloc_pd_uresp uresp; + + uresp.pd_id = pd_id; + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); - if (rc) + if (rc) { DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); - uctx = get_qedr_ucontext(context); - uctx->pd = pd; - pd->uctx = uctx; + dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); + goto err; + } + + pd->uctx = get_qedr_ucontext(context); + pd->uctx->pd = pd; } return &pd->ibpd; + +err: + kfree(pd); + return ERR_PTR(rc); } int qedr_dealloc_pd(struct ib_pd *ibpd) -- cgit v1.2.3 From f449c7a2d822c2d81b5bcb2c50eec80796766726 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:51:43 +0200 Subject: RDMA/qedr: Dispatch port active event from qedr_add Relying on qede to trigger qedr on startup is problematic. When probing both if qedr loads slowly then qede can assume qedr is missing and not trigger it. This patch adds a triggering from qedr and protects against a race via an atomic bit. Signed-off-by: Ram Amrani Signed-off-by: Ariel Elior Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/main.c | 20 ++++++++++++++------ drivers/infiniband/hw/qedr/qedr.h | 5 +++++ 2 files changed, 19 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index eac0bfc1fc99..3ac8aa5ef37d 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -791,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) goto sysfs_err; + if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); return dev; @@ -823,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev) ib_dealloc_device(&dev->ibdev); } -static int qedr_close(struct qedr_dev *dev) +static void qedr_close(struct qedr_dev *dev) { - qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); - - return 0; + if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR); } static void qedr_shutdown(struct qedr_dev *dev) @@ -836,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev) qedr_remove(dev); } +static void qedr_open(struct qedr_dev *dev) +{ + if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); +} + static void qedr_mac_address_change(struct qedr_dev *dev) { union ib_gid *sgid = &dev->sgid_tbl[0]; @@ -862,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); - qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE); if (rc) DP_ERR(dev, "Error updating mac filter\n"); @@ -876,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) { switch (event) { case QEDE_UP: - qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); + qedr_open(dev); break; case QEDE_DOWN: qedr_close(dev); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 94319abb0df2..bb32e4792ec9 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -113,6 +113,8 @@ struct qedr_device_attr { struct qed_rdma_events events; }; +#define QEDR_ENET_STATE_BIT (0) + struct qedr_dev { struct ib_device ibdev; struct qed_dev *cdev; @@ -153,6 +155,8 @@ struct qedr_dev { struct qedr_cq *gsi_sqcq; struct qedr_cq *gsi_rqcq; struct qedr_qp *gsi_qp; + + unsigned long enet_state; }; #define QEDR_MAX_SQ_PBL (0x8000) @@ -188,6 +192,7 @@ struct qedr_dev { #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) #define QEDR_MAX_PORT (1) +#define QEDR_PORT (1) #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) -- cgit v1.2.3 From 828f6fa65ce7e80f77f5ab12942e44eb3d9d174e Mon Sep 17 00:00:00 2001 From: Kenneth Lee Date: Thu, 5 Jan 2017 15:00:05 +0800 Subject: IB/umem: Release pid in error and ODP flow 1. Release pid before enter odp flow 2. Release pid when fail to allocate memory Fixes: 87773dd56d54 ("IB: ib_umem_release() should decrement mm->pinned_vm from ib_umem_get") Fixes: 8ada2c1c0c1d ("IB/core: Add support for on demand paging regions") Signed-off-by: Kenneth Lee Reviewed-by: Haggai Eran Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/umem.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 1e62a5f0cb28..4609b921f899 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); if (access & IB_ACCESS_ON_DEMAND) { + put_pid(umem->pid); ret = ib_umem_odp_get(context, umem); if (ret) { kfree(umem); @@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, page_list = (struct page **) __get_free_page(GFP_KERNEL); if (!page_list) { + put_pid(umem->pid); kfree(umem); return ERR_PTR(-ENOMEM); } -- cgit v1.2.3 From f39f775218a7520e3700de2003c84a042c3b5972 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Thu, 19 Jan 2017 15:25:58 +0200 Subject: IB/rxe: Fix rxe dev insertion to rxe_dev_list The first argument of list_add_tail is the new item and the second is the head of the list. Fix the code to pass arguments in the right order, otherwise not all the rxe devices will be removed during teardown. Fixes: 8700e3e7c4857 ('Soft RoCE driver') Signed-off-by: Maor Gottlieb Reviewed-by: Moni Shoua Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe_net.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 342e78163613..4abdeb359fb4 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev) } spin_lock_bh(&dev_list_lock); - list_add_tail(&rxe_dev_list, &rxe->list); + list_add_tail(&rxe->list, &rxe_dev_list); spin_unlock_bh(&dev_list_lock); return rxe; } -- cgit v1.2.3 From 2d4b21e0a2913612274a69a3ba1bfee4cffc6e77 Mon Sep 17 00:00:00 2001 From: Yonatan Cohen Date: Thu, 19 Jan 2017 15:25:59 +0200 Subject: IB/rxe: Prevent from completer to operate on non valid QP On UD QP completer tasklet is scheduled for each packet sent. If it is followed by a destroy_qp(), the kernel panic will happen as the completer tries to operate on a destroyed QP. Fixes: 8700e3e7c485 ("Soft RoCE driver") Signed-off-by: Yonatan Cohen Reviewed-by: Moni Shoua Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe_qp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 486d576e55bc..44b2108253bd 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp) del_timer_sync(&qp->rnr_nak_timer); rxe_cleanup_task(&qp->req.task); - if (qp_type(qp) == IB_QPT_RC) - rxe_cleanup_task(&qp->comp.task); + rxe_cleanup_task(&qp->comp.task); /* flush out any receive wr's or pending requests */ __rxe_do_task(&qp->req.task); -- cgit v1.2.3 From b4cfe3971f6eab542dd7ecc398bfa1aeec889934 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Sun, 15 Jan 2017 20:15:00 +0200 Subject: RDMA/cma: Fix unknown symbol when CONFIG_IPV6 is not enabled If IPV6 has not been enabled in the underlying kernel, we must avoid calling IPV6 procedures in rdma_cm.ko. This requires using "IS_ENABLED(CONFIG_IPV6)" in "if" statements surrounding any code which calls external IPV6 procedures. In the instance fixed here, procedure cma_bind_addr() called ipv6_addr_type() -- which resulted in calling external procedure __ipv6_addr_type(). Fixes: 6c26a77124ff ("RDMA/cma: fix IPv6 address resolution") Cc: # v4.2+ Cc: Spencer Baugh Signed-off-by: Jack Morgenstein Reviewed-by: Moni Shoua Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index e7dcfac877ca..3e70a9c5d79d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, if (!src_addr || !src_addr->sa_family) { src_addr = (struct sockaddr *) &id->route.addr.src_addr; src_addr->sa_family = dst_addr->sa_family; - if (dst_addr->sa_family == AF_INET6) { + if (IS_ENABLED(CONFIG_IPV6) && + dst_addr->sa_family == AF_INET6) { struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; -- cgit v1.2.3