aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c16
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c70
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c80
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
10 files changed, 87 insertions, 103 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index d5d1929753e..cedda25232b 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
return "C2_QP_STATE_ERROR";
default:
return "<invalid QP state>";
- };
+ }
}
void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index d6c5a73becf..f0612645de9 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1691,9 +1691,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
ibdev->ib_dev.uverbs_cmd_mask |=
(1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
(1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
}
mlx4_ib_alloc_eqs(dev, ibdev);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3f831de9a4d..b1a6cb3a280 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
{
struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+ char name[MLX5_MAX_EQ_NAME];
struct mlx5_eq *eq, *n;
int ncomp_vec;
int nent;
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
goto clean;
}
- snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+ snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(&dev->mdev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
- eq->name,
- &dev->mdev.priv.uuari.uars[0]);
+ name, &dev->mdev.priv.uuari.uars[0]);
if (err) {
kfree(eq);
goto clean;
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_srq_sge = max_rq_sg - 1;
props->max_fast_reg_page_list_len = (unsigned int)-1;
props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
- props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ?
- IB_ATOMIC_HCA : IB_ATOMIC_NONE;
- props->masked_atomic_cap = IB_ATOMIC_HCA;
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
ibev.device = &ibdev->ib_dev;
ibev.element.port_num = port;
+ if (port < 1 || port > ibdev->num_ports) {
+ mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
+ return;
+ }
+
if (ibdev->ib_active)
ib_dispatch_event(&ibev);
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bd41df95b6f..3453580b1eb 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -42,6 +42,10 @@ enum {
DEF_CACHE_SIZE = 10,
};
+enum {
+ MLX5_UMR_ALIGN = 2048
+};
+
static __be64 *mr_align(__be64 *ptr, int align)
{
unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
{
- struct device *ddev = dev->ib_dev.dma_device;
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_ib_mr *mr;
int npages = 1 << ent->order;
- int size = sizeof(u64) * npages;
int err = 0;
int i;
@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
}
mr->order = ent->order;
mr->umred = 1;
- mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
- if (!mr->pas) {
- kfree(mr);
- err = -ENOMEM;
- goto out;
- }
- mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, mr->dma)) {
- kfree(mr->pas);
- kfree(mr);
- err = -ENOMEM;
- goto out;
- }
-
in->seg.status = 1 << 6;
in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
sizeof(*in));
if (err) {
mlx5_ib_warn(dev, "create mkey failed %d\n", err);
- dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
- kfree(mr->pas);
kfree(mr);
goto out;
}
@@ -129,11 +114,9 @@ out:
static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
{
- struct device *ddev = dev->ib_dev.dma_device;
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
struct mlx5_ib_mr *mr;
- int size;
int err;
int i;
@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
ent->size--;
spin_unlock(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
- if (err) {
+ if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
- } else {
- size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
- dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
- kfree(mr->pas);
+ else
kfree(mr);
- }
}
}
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
static void clean_keys(struct mlx5_ib_dev *dev, int c)
{
- struct device *ddev = dev->ib_dev.dma_device;
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
struct mlx5_ib_mr *mr;
- int size;
int err;
+ cancel_delayed_work(&ent->dwork);
while (1) {
spin_lock(&ent->lock);
if (list_empty(&ent->head)) {
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->size--;
spin_unlock(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
- if (err) {
+ if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
- } else {
- size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
- dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
- kfree(mr->pas);
+ else
kfree(mr);
- }
}
}
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
int i;
dev->cache.stopped = 1;
- destroy_workqueue(dev->cache.wq);
+ flush_workqueue(dev->cache.wq);
mlx5_mr_cache_debugfs_cleanup(dev);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
clean_keys(dev, i);
+ destroy_workqueue(dev->cache.wq);
+
return 0;
}
@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
int page_shift, int order, int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct device *ddev = dev->ib_dev.dma_device;
struct umr_common *umrc = &dev->umrc;
struct ib_send_wr wr, *bad;
struct mlx5_ib_mr *mr;
struct ib_sge sg;
+ int size = sizeof(u64) * npages;
int err;
int i;
@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
if (!mr)
return ERR_PTR(-EAGAIN);
- mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1);
+ mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
+ if (!mr->pas) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ mlx5_ib_populate_pas(dev, umem, page_shift,
+ mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
+
+ mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, mr->dma)) {
+ kfree(mr->pas);
+ err = -ENOMEM;
+ goto error;
+ }
memset(&wr, 0, sizeof(wr));
wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
wait_for_completion(&mr->done);
up(&umrc->sem);
+ dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
+ kfree(mr->pas);
+
if (mr->status != IB_WC_SUCCESS) {
mlx5_ib_warn(dev, "reg umr failed\n");
err = -EFAULT;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 045f8cdbd30..5659ea88074 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type)
switch (qp_type) {
case IB_QPT_XRC_INI:
- size = sizeof(struct mlx5_wqe_xrc_seg);
+ size += sizeof(struct mlx5_wqe_xrc_seg);
/* fall through */
case IB_QPT_RC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type)
sizeof(struct mlx5_wqe_raddr_seg);
break;
+ case IB_QPT_XRC_TGT:
+ return 0;
+
case IB_QPT_UC:
- size = sizeof(struct mlx5_wqe_ctrl_seg) +
+ size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg);
break;
case IB_QPT_UD:
case IB_QPT_SMI:
case IB_QPT_GSI:
- size = sizeof(struct mlx5_wqe_ctrl_seg) +
+ size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_datagram_seg);
break;
case MLX5_IB_QPT_REG_UMR:
- size = sizeof(struct mlx5_wqe_ctrl_seg) +
+ size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg);
break;
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
return wqe_size;
if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
- mlx5_ib_dbg(dev, "\n");
+ mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
+ wqe_size, dev->mdev.caps.max_sq_desc_sz);
return -EINVAL;
}
@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
+ if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
+ mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
+ qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
+ return -ENOMEM;
+ }
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
qp->sq.max_gs = attr->cap.max_send_sge;
- qp->sq.max_post = 1 << ilog2(wq_size / wqe_size);
+ qp->sq.max_post = wq_size / wqe_size;
+ attr->cap.max_send_wr = qp->sq.max_post;
return wq_size;
}
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX,
},
},
[MLX5_QP_STATE_RTR] = {
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
[MLX5_QP_STATE_RTS] = {
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
},
},
};
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
rseg->reserved = 0;
}
-static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
-{
- if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
- aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
- } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
- aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
- aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
- } else {
- aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
- aseg->compare = 0;
- }
-}
-
-static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
- struct ib_send_wr *wr)
-{
- aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
- aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
- aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
- aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
-}
-
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
struct ib_send_wr *wr)
{
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
- set_raddr_seg(seg, wr->wr.atomic.remote_addr,
- wr->wr.atomic.rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
-
- set_atomic_seg(seg, wr);
- seg += sizeof(struct mlx5_wqe_atomic_seg);
-
- size += (sizeof(struct mlx5_wqe_raddr_seg) +
- sizeof(struct mlx5_wqe_atomic_seg)) / 16;
- break;
-
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- set_raddr_seg(seg, wr->wr.atomic.remote_addr,
- wr->wr.atomic.rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
-
- set_masked_atomic_seg(seg, wr);
- seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
-
- size += (sizeof(struct mlx5_wqe_raddr_seg) +
- sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
- break;
+ mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
+ err = -ENOSYS;
+ *bad_wr = wr;
+ goto out;
case IB_WR_LOCAL_INV:
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 84d297afd6a..0aa478bc291 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -295,7 +295,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
mlx5_vfree(in);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
- goto err_srq;
+ goto err_usr_kern_srq;
}
mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
@@ -316,6 +316,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
err_core:
mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
+
+err_usr_kern_srq:
if (pd->uobject)
destroy_srq_user(pd, srq);
else
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 7c9d35f39d7..69020173899 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -357,7 +357,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
eqe->type, eqe->subtype, eq->eqn);
break;
- };
+ }
set_eqe_hw(eqe);
++eq->cons_index;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 4ed8235d2d3..50219ab2279 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -150,7 +150,7 @@ enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
return IB_QPS_SQE;
case OCRDMA_QPS_ERR:
return IB_QPS_ERR;
- };
+ }
return IB_QPS_ERR;
}
@@ -171,7 +171,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
return OCRDMA_QPS_SQE;
case IB_QPS_ERR:
return OCRDMA_QPS_ERR;
- };
+ }
return OCRDMA_QPS_ERR;
}
@@ -1982,7 +1982,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
break;
default:
return -EINVAL;
- };
+ }
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
if (!cmd)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 56e004940f1..0ce7674621e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -531,7 +531,7 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
case BE_DEV_DOWN:
ocrdma_close(dev);
break;
- };
+ }
}
static struct ocrdma_driver ocrdma_drv = {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 6e982bb43c3..69f1d1221a6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -141,7 +141,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
/* Unsupported */
*ib_speed = IB_SPEED_SDR;
*ib_width = IB_WIDTH_1X;
- };
+ }
}
@@ -2331,7 +2331,7 @@ static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
default:
ibwc_status = IB_WC_GENERAL_ERR;
break;
- };
+ }
return ibwc_status;
}
@@ -2370,7 +2370,7 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
pr_err("%s() invalid opcode received = 0x%x\n",
__func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
break;
- };
+ }
}
static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,