aboutsummaryrefslogtreecommitdiff
path: root/include/linux/mlx4/device.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-01 10:51:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-01 10:51:38 -0700
commitf470f8d4e702593ee1d0852871ad80373bce707b (patch)
tree85a67e65c5e5b9777639bd8f4c763a4cf8787e0e /include/linux/mlx4/device.h
parentdc47d3810cdcb4f32bfa31d50f26af97aced0638 (diff)
parent504255f8d0480cf293962adf4bc3aecac645ae71 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (62 commits) mlx4_core: Deprecate log_num_vlan module param IB/mlx4: Don't set VLAN in IBoE WQEs' control segment IB/mlx4: Enable 4K mtu for IBoE RDMA/cxgb4: Mark QP in error before disabling the queue in firmware RDMA/cxgb4: Serialize calls to CQ's comp_handler RDMA/cxgb3: Serialize calls to CQ's comp_handler IB/qib: Fix issue with link states and QSFP cables IB/mlx4: Configure extended active speeds mlx4_core: Add extended port capabilities support IB/qib: Hold links until tuning data is available IB/qib: Clean up checkpatch issue IB/qib: Remove s_lock around header validation IB/qib: Precompute timeout jiffies to optimize latency IB/qib: Use RCU for qpn lookup IB/qib: Eliminate divide/mod in converting idx to egr buf pointer IB/qib: Decode path MTU optimization IB/qib: Optimize RC/UC code by IB operation IPoIB: Use the right function to do DMA unmap pages RDMA/cxgb4: Use correct QID in insert_recv_cqe() RDMA/cxgb4: Make sure flush CQ entries are collected on connection close ...
Diffstat (limited to 'include/linux/mlx4/device.h')
-rw-r--r--include/linux/mlx4/device.h16
1 files changed, 14 insertions, 2 deletions
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 2366f94a095..84b0b1848f1 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -61,6 +61,7 @@ enum {
MLX4_DEV_CAP_FLAG_RC = 1LL << 0,
MLX4_DEV_CAP_FLAG_UC = 1LL << 1,
MLX4_DEV_CAP_FLAG_UD = 1LL << 2,
+ MLX4_DEV_CAP_FLAG_XRC = 1LL << 3,
MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6,
MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7,
MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
@@ -83,6 +84,12 @@ enum {
MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48
};
+#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
+
+enum {
+ MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
+};
+
enum {
MLX4_BMME_FLAG_LOCAL_INV = 1 << 6,
MLX4_BMME_FLAG_REMOTE_INV = 1 << 7,
@@ -257,6 +264,8 @@ struct mlx4_caps {
int num_qp_per_mgm;
int num_pds;
int reserved_pds;
+ int max_xrcds;
+ int reserved_xrcds;
int mtt_entry_sz;
u32 max_msg_sz;
u32 page_size_cap;
@@ -277,6 +286,7 @@ struct mlx4_caps {
u32 port_mask;
enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
u32 max_counters;
+ u8 ext_port_cap[MLX4_MAX_PORTS + 1];
};
struct mlx4_buf_list {
@@ -500,6 +510,8 @@ static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
+int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
+void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
@@ -539,8 +551,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
- u64 db_rec, struct mlx4_srq *srq);
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
+ struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);