aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ocrdma/ocrdma_verbs.c')
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c135
1 files changed, 49 insertions, 86 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index b29a4246ef41..dcfbab177faa 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -114,8 +114,8 @@ int ocrdma_query_port(struct ib_device *ibdev,
dev = get_ocrdma_dev(ibdev);
if (port > 1) {
- ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__,
- dev->id, port);
+ pr_err("%s(%d) invalid_port=0x%x\n", __func__,
+ dev->id, port);
return -EINVAL;
}
netdev = dev->nic_info.netdev;
@@ -155,8 +155,7 @@ int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
dev = get_ocrdma_dev(ibdev);
if (port > 1) {
- ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__,
- dev->id, port);
+ pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
return -EINVAL;
}
return 0;
@@ -398,7 +397,6 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
kfree(pd);
return ERR_PTR(status);
}
- atomic_set(&pd->use_cnt, 0);
if (udata && context) {
status = ocrdma_copy_pd_uresp(pd, context, udata);
@@ -419,12 +417,6 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd)
int status;
u64 usr_db;
- if (atomic_read(&pd->use_cnt)) {
- ocrdma_err("%s(%d) pd=0x%x is in use.\n",
- __func__, dev->id, pd->id);
- status = -EFAULT;
- goto dealloc_err;
- }
status = ocrdma_mbx_dealloc_pd(dev, pd);
if (pd->uctx) {
u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
@@ -436,7 +428,6 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd)
ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
}
kfree(pd);
-dealloc_err:
return status;
}
@@ -450,8 +441,8 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
struct ocrdma_dev *dev = pd->dev;
if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
- ocrdma_err("%s(%d) leaving err, invalid access rights\n",
- __func__, dev->id);
+ pr_err("%s(%d) leaving err, invalid access rights\n",
+ __func__, dev->id);
return ERR_PTR(-EINVAL);
}
@@ -474,7 +465,6 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
return ERR_PTR(-ENOMEM);
}
mr->pd = pd;
- atomic_inc(&pd->use_cnt);
mr->ibmr.lkey = mr->hwmr.lkey;
if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
mr->ibmr.rkey = mr->hwmr.lkey;
@@ -664,7 +654,6 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
if (status)
goto mbx_err;
mr->pd = pd;
- atomic_inc(&pd->use_cnt);
mr->ibmr.lkey = mr->hwmr.lkey;
if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
mr->ibmr.rkey = mr->hwmr.lkey;
@@ -689,7 +678,6 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
if (mr->hwmr.fr_mr == 0)
ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
- atomic_dec(&mr->pd->use_cnt);
/* it could be user registered memory. */
if (mr->umem)
ib_umem_release(mr->umem);
@@ -714,8 +702,8 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
uresp.phase_change = cq->phase_change ? 1 : 0;
status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (status) {
- ocrdma_err("%s(%d) copy error cqid=0x%x.\n",
- __func__, cq->dev->id, cq->id);
+ pr_err("%s(%d) copy error cqid=0x%x.\n",
+ __func__, cq->dev->id, cq->id);
goto err;
}
uctx = get_ocrdma_ucontext(ib_ctx);
@@ -752,7 +740,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
spin_lock_init(&cq->cq_lock);
spin_lock_init(&cq->comp_handler_lock);
- atomic_set(&cq->use_cnt, 0);
INIT_LIST_HEAD(&cq->sq_head);
INIT_LIST_HEAD(&cq->rq_head);
cq->dev = dev;
@@ -799,9 +786,6 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
struct ocrdma_dev *dev = cq->dev;
- if (atomic_read(&cq->use_cnt))
- return -EINVAL;
-
status = ocrdma_mbx_destroy_cq(dev, cq);
if (cq->ucontext) {
@@ -837,57 +821,56 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
if (attrs->qp_type != IB_QPT_GSI &&
attrs->qp_type != IB_QPT_RC &&
attrs->qp_type != IB_QPT_UD) {
- ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n",
- __func__, dev->id, attrs->qp_type);
+ pr_err("%s(%d) unsupported qp type=0x%x requested\n",
+ __func__, dev->id, attrs->qp_type);
return -EINVAL;
}
if (attrs->cap.max_send_wr > dev->attr.max_wqe) {
- ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n",
- __func__, dev->id, attrs->cap.max_send_wr);
- ocrdma_err("%s(%d) supported send_wr=0x%x\n",
- __func__, dev->id, dev->attr.max_wqe);
+ pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_send_wr);
+ pr_err("%s(%d) supported send_wr=0x%x\n",
+ __func__, dev->id, dev->attr.max_wqe);
return -EINVAL;
}
if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
- ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n",
- __func__, dev->id, attrs->cap.max_recv_wr);
- ocrdma_err("%s(%d) supported recv_wr=0x%x\n",
- __func__, dev->id, dev->attr.max_rqe);
+ pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_recv_wr);
+ pr_err("%s(%d) supported recv_wr=0x%x\n",
+ __func__, dev->id, dev->attr.max_rqe);
return -EINVAL;
}
if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
- ocrdma_err("%s(%d) unsupported inline data size=0x%x"
- " requested\n", __func__, dev->id,
- attrs->cap.max_inline_data);
- ocrdma_err("%s(%d) supported inline data size=0x%x\n",
- __func__, dev->id, dev->attr.max_inline_data);
+ pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_inline_data);
+ pr_err("%s(%d) supported inline data size=0x%x\n",
+ __func__, dev->id, dev->attr.max_inline_data);
return -EINVAL;
}
if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
- ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n",
- __func__, dev->id, attrs->cap.max_send_sge);
- ocrdma_err("%s(%d) supported send_sge=0x%x\n",
- __func__, dev->id, dev->attr.max_send_sge);
+ pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_send_sge);
+ pr_err("%s(%d) supported send_sge=0x%x\n",
+ __func__, dev->id, dev->attr.max_send_sge);
return -EINVAL;
}
if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
- ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n",
- __func__, dev->id, attrs->cap.max_recv_sge);
- ocrdma_err("%s(%d) supported recv_sge=0x%x\n",
- __func__, dev->id, dev->attr.max_recv_sge);
+ pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_recv_sge);
+ pr_err("%s(%d) supported recv_sge=0x%x\n",
+ __func__, dev->id, dev->attr.max_recv_sge);
return -EINVAL;
}
/* unprivileged user space cannot create special QP */
if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
- ocrdma_err
+ pr_err
("%s(%d) Userspace can't create special QPs of type=0x%x\n",
__func__, dev->id, attrs->qp_type);
return -EINVAL;
}
/* allow creating only one GSI type of QP */
if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
- ocrdma_err("%s(%d) GSI special QPs already created.\n",
- __func__, dev->id);
+ pr_err("%s(%d) GSI special QPs already created.\n",
+ __func__, dev->id);
return -EINVAL;
}
/* verify consumer QPs are not trying to use GSI QP's CQ */
@@ -896,8 +879,8 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
(dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) ||
(dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) ||
(dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
- ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
- __func__, dev->id);
+ pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
+ __func__, dev->id);
return -EINVAL;
}
}
@@ -949,7 +932,7 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
}
status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (status) {
- ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id);
+ pr_err("%s(%d) user copy error.\n", __func__, dev->id);
goto err;
}
status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
@@ -1023,15 +1006,6 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
qp->state = OCRDMA_QPS_RST;
}
-static void ocrdma_set_qp_use_cnt(struct ocrdma_qp *qp, struct ocrdma_pd *pd)
-{
- atomic_inc(&pd->use_cnt);
- atomic_inc(&qp->sq_cq->use_cnt);
- atomic_inc(&qp->rq_cq->use_cnt);
- if (qp->srq)
- atomic_inc(&qp->srq->use_cnt);
- qp->ibqp.qp_num = qp->id;
-}
static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
struct ib_qp_init_attr *attrs)
@@ -1099,7 +1073,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
goto cpy_err;
}
ocrdma_store_gsi_qp_cq(dev, attrs);
- ocrdma_set_qp_use_cnt(qp, pd);
+ qp->ibqp.qp_num = qp->id;
mutex_unlock(&dev->dev_lock);
return &qp->ibqp;
@@ -1112,7 +1086,7 @@ mbx_err:
kfree(qp->wqe_wr_id_tbl);
kfree(qp->rqe_wr_id_tbl);
kfree(qp);
- ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status);
+ pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
gen_err:
return ERR_PTR(status);
}
@@ -1162,10 +1136,10 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
spin_unlock_irqrestore(&qp->q_lock, flags);
if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
- ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for "
- "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
- __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
- old_qps, new_qps);
+ pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
+ "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
+ __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
+ old_qps, new_qps);
goto param_err;
}
@@ -1475,11 +1449,6 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
ocrdma_del_flush_qp(qp);
- atomic_dec(&qp->pd->use_cnt);
- atomic_dec(&qp->sq_cq->use_cnt);
- atomic_dec(&qp->rq_cq->use_cnt);
- if (qp->srq)
- atomic_dec(&qp->srq->use_cnt);
kfree(qp->wqe_wr_id_tbl);
kfree(qp->rqe_wr_id_tbl);
kfree(qp);
@@ -1565,14 +1534,12 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
goto arm_err;
}
- atomic_set(&srq->use_cnt, 0);
if (udata) {
status = ocrdma_copy_srq_uresp(srq, udata);
if (status)
goto arm_err;
}
- atomic_inc(&pd->use_cnt);
return &srq->ibsrq;
arm_err:
@@ -1618,18 +1585,12 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
srq = get_ocrdma_srq(ibsrq);
dev = srq->dev;
- if (atomic_read(&srq->use_cnt)) {
- ocrdma_err("%s(%d) err, srq=0x%x in use\n",
- __func__, dev->id, srq->id);
- return -EAGAIN;
- }
status = ocrdma_mbx_destroy_srq(dev, srq);
if (srq->pd->uctx)
ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len);
- atomic_dec(&srq->pd->use_cnt);
kfree(srq->idx_bit_fields);
kfree(srq->rqe_wr_id_tbl);
kfree(srq);
@@ -1677,9 +1638,9 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
{
if (wr->send_flags & IB_SEND_INLINE) {
if (wr->sg_list[0].length > qp->max_inline_data) {
- ocrdma_err("%s() supported_len=0x%x,"
- " unspported len req=0x%x\n", __func__,
- qp->max_inline_data, wr->sg_list[0].length);
+ pr_err("%s() supported_len=0x%x,\n"
+ " unspported len req=0x%x\n", __func__,
+ qp->max_inline_data, wr->sg_list[0].length);
return -EINVAL;
}
memcpy(sge,
@@ -1773,12 +1734,14 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
return -EINVAL;
}
while (wr) {
if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
wr->num_sge > qp->sq.max_sges) {
+ *bad_wr = wr;
status = -ENOMEM;
break;
}
@@ -1856,7 +1819,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
{
- u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp));
+ u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
iowrite32(val, qp->rq_db);
}
@@ -2094,8 +2057,8 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
break;
default:
ibwc->status = IB_WC_GENERAL_ERR;
- ocrdma_err("%s() invalid opcode received = 0x%x\n",
- __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
+ pr_err("%s() invalid opcode received = 0x%x\n",
+ __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
break;
};
}