aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c189
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c412
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c52
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c23
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c76
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c38
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c162
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c5
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/skge.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c52
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c87
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c28
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h56
-rw-r--r--drivers/net/ethernet/sfc/nic.c9
-rw-r--r--drivers/net/ethernet/sfc/nic.h12
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c19
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c9
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c6
47 files changed, 994 insertions, 664 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 97b3d32a98b..c5e375ddd6c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1197,8 +1197,9 @@ union cdu_context {
/* TM (timers) host DB constants */
#define TM_ILT_PAGE_SZ_HW 0
#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
-/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
-#define TM_CONN_NUM 1024
+#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \
+ BNX2X_VF_CIDS + \
+ CNIC_ISCSI_CID_MAX)
#define TM_ILT_SZ (8 * TM_CONN_NUM)
#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
@@ -1527,7 +1528,6 @@ struct bnx2x {
#define PCI_32BIT_FLAG (1 << 1)
#define ONE_PORT_FLAG (1 << 2)
#define NO_WOL_FLAG (1 << 3)
-#define USING_DAC_FLAG (1 << 4)
#define USING_MSIX_FLAG (1 << 5)
#define USING_MSI_FLAG (1 << 6)
#define DISABLE_MSI_FLAG (1 << 7)
@@ -1621,7 +1621,7 @@ struct bnx2x {
u16 rx_ticks_int;
u16 rx_ticks;
/* Maximal coalescing timeout in us */
-#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
+#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR)
u32 lin_cnt;
@@ -2072,7 +2072,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
u8 src_type, u8 dst_type);
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u32 *comp);
/* FLR related routines */
u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -2498,4 +2499,8 @@ enum bnx2x_pci_bus_speed {
};
void bnx2x_set_local_cmng(struct bnx2x *bp);
+
+#define MCPR_SCRATCH_BASE(bp) \
+ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 61726af1de6..4ab4c89c60c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -681,6 +681,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
}
#endif
+ skb_record_rx_queue(skb, fp->rx_queue);
napi_gro_receive(&fp->napi, skb);
}
@@ -2481,8 +2482,7 @@ load_error_cnic2:
load_error_cnic1:
bnx2x_napi_disable_cnic(bp);
/* Update the number of queues without the cnic queues */
- rc = bnx2x_set_real_num_queues(bp, 0);
- if (rc)
+ if (bnx2x_set_real_num_queues(bp, 0))
BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
load_error_cnic0:
BNX2X_ERR("CNIC-related load failed\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 324de5f0533..e8efa1c93ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -891,17 +891,8 @@ static void bnx2x_get_regs(struct net_device *dev,
* will re-enable parity attentions right after the dump.
*/
- /* Disable parity on path 0 */
- bnx2x_pretend_func(bp, 0);
bnx2x_disable_blocks_parity(bp);
- /* Disable parity on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_disable_blocks_parity(bp);
-
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
dump_hdr.preset = DUMP_ALL_PRESETS;
dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -928,18 +919,9 @@ static void bnx2x_get_regs(struct net_device *dev,
/* Actually read the registers */
__bnx2x_get_regs(bp, p);
- /* Re-enable parity attentions on path 0 */
- bnx2x_pretend_func(bp, 0);
+ /* Re-enable parity attentions */
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
-
- /* Re-enable parity attentions on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_clear_blocks_parity(bp);
- bnx2x_enable_blocks_parity(bp);
-
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
}
static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
@@ -993,17 +975,8 @@ static int bnx2x_get_dump_data(struct net_device *dev,
* will re-enable parity attentions right after the dump.
*/
- /* Disable parity on path 0 */
- bnx2x_pretend_func(bp, 0);
bnx2x_disable_blocks_parity(bp);
- /* Disable parity on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_disable_blocks_parity(bp);
-
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
dump_hdr.preset = bp->dump_preset_idx;
dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -1032,19 +1005,10 @@ static int bnx2x_get_dump_data(struct net_device *dev,
/* Actually read the registers */
__bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
- /* Re-enable parity attentions on path 0 */
- bnx2x_pretend_func(bp, 0);
+ /* Re-enable parity attentions */
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
- /* Re-enable parity attentions on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_clear_blocks_parity(bp);
- bnx2x_enable_blocks_parity(bp);
-
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 76df015f486..c2dfea7968f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -640,23 +640,35 @@ static const struct {
* [30] MCP Latched ump_tx_parity
* [31] MCP Latched scpad_parity
*/
-#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \
(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+ (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
/* Below registers control the MCP parity attention output. When
* MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
* enabled, when cleared - disabled.
*/
-static const u32 mcp_attn_ctl_regs[] = {
- MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
- MISC_REG_AEU_ENABLE4_NIG_0,
- MISC_REG_AEU_ENABLE4_PXP_0,
- MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
- MISC_REG_AEU_ENABLE4_NIG_1,
- MISC_REG_AEU_ENABLE4_PXP_1
+static const struct {
+ u32 addr;
+ u32 bits;
+} mcp_attn_ctl_regs[] = {
+ { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+ MISC_AEU_ENABLE_MCP_PRTY_BITS },
+ { MISC_REG_AEU_ENABLE4_NIG_0,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_PXP_0,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+ MISC_AEU_ENABLE_MCP_PRTY_BITS },
+ { MISC_REG_AEU_ENABLE4_NIG_1,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_PXP_1,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
};
static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
@@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
u32 reg_val;
for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
- reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+ reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
if (enable)
- reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ reg_val |= mcp_attn_ctl_regs[i].bits;
else
- reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ reg_val &= ~mcp_attn_ctl_regs[i].bits;
- REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+ REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d60a2ea3da1..51468227bf3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
#define EDC_MODE_LINEAR 0x0022
#define EDC_MODE_LIMITING 0x0044
#define EDC_MODE_PASSIVE_DAC 0x0055
+#define EDC_MODE_ACTIVE_DAC 0x0066
/* ETS defines*/
#define DCBX_INVALID_COS (0xFF)
@@ -3684,6 +3685,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
bnx2x_update_link_attr(params, vars->link_attr_sync);
}
+static void bnx2x_disable_kr2(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ int i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Step 1 - Program the TX/RX alignment markers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+ };
+ DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+ vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, vars->link_attr_sync);
+
+ vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
+}
+
static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -3715,7 +3751,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
u16 lane, i, cl72_ctrl, an_adv = 0;
- u16 ucode_ver;
struct bnx2x *bp = params->bp;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3806,15 +3841,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Advertise pause */
bnx2x_ext_phy_set_pause(params, phy, vars);
- /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
- */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
- if (ucode_ver < 0xd108) {
- DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
- ucode_ver);
- vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
- }
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
@@ -3838,6 +3865,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_set_aer_mmd(params, phy);
bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+ } else {
+ bnx2x_disable_kr2(params, vars, phy);
}
/* Enable Autoneg: only on the main lane */
@@ -4347,20 +4376,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u32 serdes_net_if;
u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
- u16 lane = bnx2x_get_warpcore_lane(phy, params);
vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
if (!vars->turn_to_run_wc_rt)
return;
- /* Return if there is no link partner */
- if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
- DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
- return;
- }
-
if (vars->rx_tx_asic_rst) {
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
serdes_net_if = (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
@@ -4375,14 +4398,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
/*10G KR*/
lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
- DP(NETIF_MSG_LINK,
- "gp_status1 0x%x\n", gp_status1);
-
if (lnkup_kr || lnkup) {
- vars->rx_tx_asic_rst = 0;
- DP(NETIF_MSG_LINK,
- "link up, rx_tx_asic_rst 0x%x\n",
- vars->rx_tx_asic_rst);
+ vars->rx_tx_asic_rst = 0;
} else {
/* Reset the lane to see if link comes up.*/
bnx2x_warpcore_reset_lane(bp, phy, 1);
@@ -4507,10 +4524,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
* enabled transmitter to avoid current leakage in case
* no module is connected
*/
- if (bnx2x_is_sfp_module_plugged(phy, params))
- bnx2x_sfp_module_detection(phy, params);
- else
- bnx2x_sfp_e3_set_transmitter(params, phy, 1);
+ if ((params->loopback_mode == LOOPBACK_NONE) ||
+ (params->loopback_mode == LOOPBACK_EXT)) {
+ if (bnx2x_is_sfp_module_plugged(phy, params))
+ bnx2x_sfp_module_detection(phy, params);
+ else
+ bnx2x_sfp_e3_set_transmitter(params,
+ phy, 1);
+ }
bnx2x_warpcore_config_sfi(phy, params);
break;
@@ -5757,6 +5778,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
duplex);
+ /* In case of KR link down, start up the recovering procedure */
+ if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
+ (!(phy->flags & FLAGS_WC_DUAL_MODE)))
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
@@ -6507,6 +6533,11 @@ static int bnx2x_link_initialize(struct link_params *params,
params->phy[INT_PHY].config_init(phy, params, vars);
}
+ /* Re-read this value in case it was changed inside config_init due to
+ * limitations of optic module
+ */
+ vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
/* Init external phy*/
if (non_ext_phy) {
if (params->phy[INT_PHY].supported &
@@ -8080,7 +8111,10 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
if (copper_module_type &
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
- check_limiting_mode = 1;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ *edc_mode = EDC_MODE_ACTIVE_DAC;
+ else
+ check_limiting_mode = 1;
} else if (copper_module_type &
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
DP(NETIF_MSG_LINK,
@@ -8555,6 +8589,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
break;
case EDC_MODE_PASSIVE_DAC:
+ case EDC_MODE_ACTIVE_DAC:
mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
break;
default:
@@ -9730,32 +9765,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
an_1000_val);
- /* set 100 speed advertisement */
- if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
- an_10_100_val |= (1<<7);
- /* Enable autoneg and restart autoneg for legacy speeds */
- autoneg_val |= (1<<9 | 1<<12);
-
- if (phy->req_duplex == DUPLEX_FULL)
+ /* Set 10/100 speed advertisement */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1<<9 | 1<<12);
an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK, "Advertising 100M\n");
- }
- /* set 10 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
- (phy->supported &
- (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full)))) {
- an_10_100_val |= (1<<5);
- autoneg_val |= (1<<9 | 1<<12);
- if (phy->req_duplex == DUPLEX_FULL)
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
+
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1<<9 | 1<<12);
+ an_10_100_val |= (1<<7);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->supported & SUPPORTED_10baseT_Full)) {
an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
+ (phy->supported & SUPPORTED_10baseT_Half)) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
}
/* Only 10/100 are allowed to work in FORCE mode */
@@ -13432,43 +13476,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
}
}
}
-static void bnx2x_disable_kr2(struct link_params *params,
- struct link_vars *vars,
- struct bnx2x_phy *phy)
-{
- struct bnx2x *bp = params->bp;
- int i;
- static struct bnx2x_reg_set reg_set[] = {
- /* Step 1 - Program the TX/RX alignment markers */
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
- };
- DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
-
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
- bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
- reg_set[i].val);
- vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
- bnx2x_update_link_attr(params, vars->link_attr_sync);
-
- vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
- /* Restart AN on leading lane */
- bnx2x_warpcore_restart_AN_KR(phy, params);
-}
-
static void bnx2x_kr2_recovery(struct link_params *params,
struct link_vars *vars,
struct bnx2x_phy *phy)
@@ -13546,6 +13553,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Disable KR2 on both lanes */
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
bnx2x_disable_kr2(params, vars, phy);
+ /* Restart AN on leading lane */
+ bnx2x_warpcore_restart_AN_KR(phy, params);
return;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a6704b55504..b42f89ce02e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -503,9 +503,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
}
/* issue a dmae command over the init-channel and wait for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u32 *comp)
{
- u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
int rc = 0;
@@ -518,14 +518,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
spin_lock_bh(&bp->dmae_lock);
/* reset completion */
- *wb_comp = 0;
+ *comp = 0;
/* post the command on the channel used for initializations */
bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
/* wait for completion */
udelay(5);
- while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+ while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
if (!cnt ||
(bp->recovery_state != BNX2X_RECOVERY_DONE &&
@@ -537,7 +537,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
cnt--;
udelay(50);
}
- if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+ if (*comp & DMAE_PCI_ERR_FLAG) {
BNX2X_ERR("DMAE PCI error!\n");
rc = DMAE_PCI_ERROR;
}
@@ -574,7 +574,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
dmae.len = len32;
/* issue the command and wait for completion */
- rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+ rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
if (rc) {
BNX2X_ERR("DMAE returned failure %d\n", rc);
bnx2x_panic();
@@ -611,7 +611,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
dmae.len = len32;
/* issue the command and wait for completion */
- rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+ rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
if (rc) {
BNX2X_ERR("DMAE returned failure %d\n", rc);
bnx2x_panic();
@@ -751,6 +751,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
return rc;
}
+#define MCPR_TRACE_BUFFER_SIZE (0x800)
+#define SCRATCH_BUFFER_SIZE(bp) \
+ (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
+
void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
{
u32 addr, val;
@@ -775,7 +779,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
trace_shmem_base = bp->common.shmem_base;
else
trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
- addr = trace_shmem_base - 0x800;
+
+ /* sanity */
+ if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
+ trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
+ SCRATCH_BUFFER_SIZE(bp)) {
+ BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
+ trace_shmem_base);
+ return;
+ }
+
+ addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
/* validate TRCB signature */
mark = REG_RD(bp, addr);
@@ -787,14 +801,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
/* read cyclic buffer pointer */
addr += 4;
mark = REG_RD(bp, addr);
- mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
- + ((mark + 0x3) & ~0x3) - 0x08000000;
+ mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
+ if (mark >= trace_shmem_base || mark < addr + 4) {
+ BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
+ return;
+ }
printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
printk("%s", lvl);
/* dump buffer after the mark */
- for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
+ for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
@@ -4280,65 +4297,60 @@ static void _print_next_block(int idx, const char *blk)
pr_cont("%s%s", idx ? ", " : "", blk);
}
-static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
- int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "BRB");
+ res |= true; /* Each bit is real error! */
+
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ _print_next_block((*par_num)++, "BRB");
_print_parity(bp,
BRB1_REG_BRB1_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "PARSER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PARSER");
_print_parity(bp, PRS_REG_PRS_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "TSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ _print_next_block((*par_num)++, "TSDM");
_print_parity(bp,
TSDM_REG_TSDM_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++,
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ _print_next_block((*par_num)++,
"SEARCHER");
_print_parity(bp, SRC_REG_SRC_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "TCM");
- _print_parity(bp,
- TCM_REG_TCM_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "TSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+ _print_next_block((*par_num)++, "TCM");
+ _print_parity(bp, TCM_REG_TCM_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "TSEMI");
_print_parity(bp,
TSEM_REG_TSEM_PRTY_STS_0);
_print_parity(bp,
TSEM_REG_TSEM_PRTY_STS_1);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "XPB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ _print_next_block((*par_num)++, "XPB");
_print_parity(bp, GRCBASE_XPB +
PB_REG_PB_PRTY_STS);
+ break;
}
- break;
}
/* Clear the bit */
@@ -4346,53 +4358,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
- int par_num, bool *global,
+static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
+ int *par_num, bool *global,
bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
+ res |= true; /* Each bit is real error! */
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "PBF");
+ _print_next_block((*par_num)++, "PBF");
_print_parity(bp, PBF_REG_PBF_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "QM");
+ _print_next_block((*par_num)++, "QM");
_print_parity(bp, QM_REG_QM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "TM");
+ _print_next_block((*par_num)++, "TM");
_print_parity(bp, TM_REG_TM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "XSDM");
+ _print_next_block((*par_num)++, "XSDM");
_print_parity(bp,
XSDM_REG_XSDM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "XCM");
+ _print_next_block((*par_num)++, "XCM");
_print_parity(bp, XCM_REG_XCM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "XSEMI");
+ _print_next_block((*par_num)++,
+ "XSEMI");
_print_parity(bp,
XSEM_REG_XSEM_PRTY_STS_0);
_print_parity(bp,
@@ -4401,7 +4419,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"DOORBELLQ");
_print_parity(bp,
DORQ_REG_DORQ_PRTY_STS);
@@ -4409,7 +4427,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "NIG");
+ _print_next_block((*par_num)++, "NIG");
if (CHIP_IS_E1x(bp)) {
_print_parity(bp,
NIG_REG_NIG_PRTY_STS);
@@ -4423,32 +4441,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"VAUX PCI CORE");
*global = true;
break;
case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "DEBUG");
+ _print_next_block((*par_num)++,
+ "DEBUG");
_print_parity(bp, DBG_REG_DBG_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "USDM");
+ _print_next_block((*par_num)++, "USDM");
_print_parity(bp,
USDM_REG_USDM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "UCM");
+ _print_next_block((*par_num)++, "UCM");
_print_parity(bp, UCM_REG_UCM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "USEMI");
+ _print_next_block((*par_num)++,
+ "USEMI");
_print_parity(bp,
USEM_REG_USEM_PRTY_STS_0);
_print_parity(bp,
@@ -4457,21 +4477,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
break;
case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "UPB");
+ _print_next_block((*par_num)++, "UPB");
_print_parity(bp, GRCBASE_UPB +
PB_REG_PB_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "CSDM");
+ _print_next_block((*par_num)++, "CSDM");
_print_parity(bp,
CSDM_REG_CSDM_PRTY_STS);
}
break;
case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
if (print) {
- _print_next_block(par_num++, "CCM");
+ _print_next_block((*par_num)++, "CCM");
_print_parity(bp, CCM_REG_CCM_PRTY_STS);
}
break;
@@ -4482,80 +4502,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
- int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "CSEMI");
+ res |= true; /* Each bit is real error! */
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "CSEMI");
_print_parity(bp,
CSEM_REG_CSEM_PRTY_STS_0);
_print_parity(bp,
CSEM_REG_CSEM_PRTY_STS_1);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "PXP");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ _print_next_block((*par_num)++, "PXP");
_print_parity(bp, PXP_REG_PXP_PRTY_STS);
_print_parity(bp,
PXP2_REG_PXP2_PRTY_STS_0);
_print_parity(bp,
PXP2_REG_PXP2_PRTY_STS_1);
- }
- break;
- case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
- if (print)
- _print_next_block(par_num++,
- "PXPPCICLOCKCLIENT");
- break;
- case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "CFC");
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "CFC");
_print_parity(bp,
CFC_REG_CFC_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ _print_next_block((*par_num)++, "CDU");
_print_parity(bp, CDU_REG_CDU_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "DMAE");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+ _print_next_block((*par_num)++, "DMAE");
_print_parity(bp,
DMAE_REG_DMAE_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "IGU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ _print_next_block((*par_num)++, "IGU");
if (CHIP_IS_E1x(bp))
_print_parity(bp,
HC_REG_HC_PRTY_STS);
else
_print_parity(bp,
IGU_REG_IGU_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "MISC");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "MISC");
_print_parity(bp,
MISC_REG_MISC_PRTY_STS);
+ break;
}
- break;
}
/* Clear the bit */
@@ -4563,40 +4576,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
- bool *global, bool print)
+static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
+ int *par_num, bool *global,
+ bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ bool res = false;
+ u32 cur_bit;
+ int i;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
if (print)
- _print_next_block(par_num++, "MCP ROM");
+ _print_next_block((*par_num)++,
+ "MCP ROM");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP UMP RX");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP UMP TX");
*global = true;
+ res |= true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
if (print)
- _print_next_block(par_num++,
+ _print_next_block((*par_num)++,
"MCP SCPAD");
- *global = true;
+ /* clear latched SCPAD PATIRY from MCP */
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
+ 1UL << 10);
break;
}
@@ -4605,45 +4627,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
}
}
- return par_num;
+ return res;
}
-static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
- int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
{
- int i = 0;
- u32 cur_bit = 0;
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
for (i = 0; sig; i++) {
- cur_bit = ((u32)0x1 << i);
+ cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "PGLUE_B");
+ res |= true; /* Each bit is real error! */
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PGLUE_B");
_print_parity(bp,
- PGLUE_B_REG_PGLUE_B_PRTY_STS);
- }
- break;
- case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
- if (print) {
- _print_next_block(par_num++, "ATC");
+ PGLUE_B_REG_PGLUE_B_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "ATC");
_print_parity(bp,
ATC_REG_ATC_PRTY_STS);
+ break;
}
- break;
}
-
/* Clear the bit */
sig &= ~cur_bit;
}
}
- return par_num;
+ return res;
}
static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
u32 *sig)
{
+ bool res = false;
+
if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
(sig[1] & HW_PRTY_ASSERT_SET_1) ||
(sig[2] & HW_PRTY_ASSERT_SET_2) ||
@@ -4660,23 +4687,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
if (print)
netdev_err(bp->dev,
"Parity errors detected in blocks: ");
- par_num = bnx2x_check_blocks_with_parity0(bp,
- sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
- par_num = bnx2x_check_blocks_with_parity1(bp,
- sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
- par_num = bnx2x_check_blocks_with_parity2(bp,
- sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
- par_num = bnx2x_check_blocks_with_parity3(
- sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
- par_num = bnx2x_check_blocks_with_parity4(bp,
- sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+ res |= bnx2x_check_blocks_with_parity0(bp,
+ sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
+ res |= bnx2x_check_blocks_with_parity1(bp,
+ sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
+ res |= bnx2x_check_blocks_with_parity2(bp,
+ sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
+ res |= bnx2x_check_blocks_with_parity3(bp,
+ sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
+ res |= bnx2x_check_blocks_with_parity4(bp,
+ sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
if (print)
pr_cont("\n");
+ }
- return true;
- } else
- return false;
+ return res;
}
/**
@@ -4703,6 +4729,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
attn.sig[3] = REG_RD(bp,
MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
port*4);
+ /* Since MCP attentions can't be disabled inside the block, we need to
+ * read AEU registers to see whether they're currently disabled
+ */
+ attn.sig[3] &= ((REG_RD(bp,
+ !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
+ : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
+ MISC_AEU_ENABLE_MCP_PRTY_BITS) |
+ ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
if (!CHIP_IS_E1x(bp))
attn.sig[4] = REG_RD(bp,
@@ -5447,26 +5481,24 @@ static void bnx2x_timer(unsigned long data)
if (IS_PF(bp) &&
!BP_NOMCP(bp)) {
int mb_idx = BP_FW_MB_IDX(bp);
- u32 drv_pulse;
- u32 mcp_pulse;
+ u16 drv_pulse;
+ u16 mcp_pulse;
++bp->fw_drv_pulse_wr_seq;
bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
- /* TBD - add SYSTEM_TIME */
drv_pulse = bp->fw_drv_pulse_wr_seq;
bnx2x_drv_pulse(bp);
mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
MCP_PULSE_SEQ_MASK);
/* The delta between driver pulse and mcp response
- * should be 1 (before mcp response) or 0 (after mcp response)
+ * should not get too big. If the MFW is more than 5 pulses
+ * behind, we should worry about it enough to generate an error
+ * log.
*/
- if ((drv_pulse != mcp_pulse) &&
- (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
- /* someone lost a heartbeat... */
- BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+ if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
+ BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
drv_pulse, mcp_pulse);
- }
}
if (bp->state == BNX2X_STATE_OPEN)
@@ -7120,7 +7152,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
int port = BP_PORT(bp);
int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
u32 low, high;
- u32 val;
+ u32 val, reg;
DP(NETIF_MSG_HW, "starting port init port %d\n", port);
@@ -7265,6 +7297,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
val |= CHIP_IS_E1(bp) ? 0 : 0x10;
REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
+ /* SCPAD_PARITY should NOT trigger close the gates */
+ reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
+ REG_WR(bp, reg,
+ REG_RD(bp, reg) &
+ ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
+ reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
+ REG_WR(bp, reg,
+ REG_RD(bp, reg) &
+ ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
bnx2x_init_block(bp, BLOCK_NIG, init_phase);
if (!CHIP_IS_E1x(bp)) {
@@ -11679,9 +11722,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
static int bnx2x_open(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- bool global = false;
- int other_engine = BP_PATH(bp) ? 0 : 1;
- bool other_load_status, load_status;
int rc;
bp->stats_init = true;
@@ -11697,6 +11737,10 @@ static int bnx2x_open(struct net_device *dev)
* Parity recovery is only relevant for PF driver.
*/
if (IS_PF(bp)) {
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ bool other_load_status, load_status;
+ bool global = false;
+
other_load_status = bnx2x_get_load_status(bp, other_engine);
load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
@@ -12074,7 +12118,6 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
struct device *dev = &bp->pdev->dev;
if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
- bp->flags |= USING_DAC_FLAG;
if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
return -EIO;
@@ -12242,8 +12285,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
- if (bp->flags & USING_DAC_FLAG)
- dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_HIGHDMA;
/* Add Loopback capability to the device */
dev->hw_features |= NETIF_F_LOOPBACK;
@@ -12606,24 +12648,24 @@ static int set_max_cos_est(int chip_id)
return BNX2X_MULTI_TX_COS_E1X;
case BCM57712:
case BCM57712_MF:
- case BCM57712_VF:
return BNX2X_MULTI_TX_COS_E2_E3A0;
case BCM57800:
case BCM57800_MF:
- case BCM57800_VF:
case BCM57810:
case BCM57810_MF:
case BCM57840_4_10:
case BCM57840_2_20:
case BCM57840_O:
case BCM57840_MFO:
- case BCM57810_VF:
case BCM57840_MF:
- case BCM57840_VF:
case BCM57811:
case BCM57811_MF:
- case BCM57811_VF:
return BNX2X_MULTI_TX_COS_E3B0;
+ case BCM57712_VF:
+ case BCM57800_VF:
+ case BCM57810_VF:
+ case BCM57840_VF:
+ case BCM57811_VF:
return 1;
default:
pr_err("Unknown board_type (%d), aborting\n", chip_id);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2604b6204ab..bf08ad68b40 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -470,10 +470,10 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
bnx2x_vfop_qdtor, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
cmd->block);
+ } else {
+ BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
+ return -ENOMEM;
}
- DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
- vf->abs_vfid, vfop->rc);
- return -ENOMEM;
}
static void
@@ -1819,7 +1819,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
if (fid & IGU_FID_ENCODE_IS_PF)
current_pf = fid & IGU_FID_PF_NUM_MASK;
- else if (current_pf == BP_ABS_FUNC(bp))
+ else if (current_pf == BP_FUNC(bp))
bnx2x_vf_set_igu_info(bp, sb_id,
(fid & IGU_FID_VF_NUM_MASK));
DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
@@ -3180,6 +3180,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
/* set local queue arrays */
vf->vfqs = &bp->vfdb->vfqs[qcount];
qcount += vf_sb_count(vf);
+ bnx2x_iov_static_resc(bp, vf);
}
/* prepare msix vectors in VF configuration space */
@@ -3187,6 +3188,8 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
num_vf_queues);
+ DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
+ vf_idx, num_vf_queues);
}
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
@@ -3387,14 +3390,16 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete eth macs\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
/* remove existing uc list macs */
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete uc_list macs\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
/* configure the new mac to device */
@@ -3402,6 +3407,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
BNX2X_ETH_MAC, &ramrod_flags);
+out:
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
@@ -3464,7 +3470,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
&ramrod_flags);
if (rc) {
BNX2X_ERR("failed to delete vlans\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
/* send queue update ramrod to configure default vlan and silent
@@ -3498,7 +3505,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
if (rc) {
BNX2X_ERR("failed to configure vlan\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
/* configure default vlan to vf queue and set silent
@@ -3516,18 +3524,18 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
rc = bnx2x_queue_state_change(bp, &q_params);
if (rc) {
BNX2X_ERR("Failed to configure default VLAN\n");
- return rc;
+ goto out;
}
/* clear the flag indicating that this VF needs its vlan
- * (will only be set if the HV configured th Vlan before vf was
- * and we were called because the VF came up later
+ * (will only be set if the HV configured the Vlan before vf was
+ * up and we were called because the VF came up later
*/
+out:
vf->cfg_flags &= ~VF_CFG_VLAN;
-
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
}
- return 0;
+ return rc;
}
/* crc is the first field in the bulletin board. Compute the crc over the
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 86436c77af0..3b75070411a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
} else if (bp->func_stx) {
*stats_comp = 0;
- bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+ bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 6cfb8873245..28757dfacf0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -980,7 +980,7 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
dmae.len = len32;
/* issue the command and wait for completion */
- return bnx2x_issue_dmae_with_comp(bp, &dmae);
+ return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
}
static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
@@ -1765,28 +1765,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
switch (mbx->first_tlv.tl.type) {
case CHANNEL_TLV_ACQUIRE:
bnx2x_vf_mbx_acquire(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_INIT:
bnx2x_vf_mbx_init_vf(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_SETUP_Q:
bnx2x_vf_mbx_setup_q(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_SET_Q_FILTERS:
bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_TEARDOWN_Q:
bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_CLOSE:
bnx2x_vf_mbx_close_vf(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_RELEASE:
bnx2x_vf_mbx_release_vf(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_UPDATE_RSS:
bnx2x_vf_mbx_update_rss(bp, vf, mbx);
- break;
+ return;
}
} else {
@@ -1802,26 +1802,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
for (i = 0; i < 20; i++)
DP_CONT(BNX2X_MSG_IOV, "%x ",
mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
+ }
- /* test whether we can respond to the VF (do we have an address
- * for it?)
- */
- if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
- /* mbx_resp uses the op_rc of the VF */
- vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
+ /* can we respond to VF (do we have an address for it?) */
+ if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
+ /* mbx_resp uses the op_rc of the VF */
+ vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
- /* notify the VF that we do not support this request */
- bnx2x_vf_mbx_resp(bp, vf);
- } else {
- /* can't send a response since this VF is unknown to us
- * just ack the FW to release the mailbox and unlock
- * the channel.
- */
- storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
- mmiowb();
- bnx2x_unlock_vf_pf_channel(bp, vf,
- mbx->first_tlv.tl.type);
- }
+ /* notify the VF that we do not support this request */
+ bnx2x_vf_mbx_resp(bp, vf);
+ } else {
+ /* can't send a response since this VF is unknown to us
+ * just ack the FW to release the mailbox and unlock
+ * the channel.
+ */
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ /* Firmware ack should be written before unlocking channel */
+ mmiowb();
+ bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
}
}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 78d6d6b970e..48f52882a22 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -106,7 +106,6 @@
#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
#define XGMAC_ADDR_AE 0x80000000
-#define XGMAC_MAX_FILTER_ADDR 31
/* PMT Control and Status */
#define XGMAC_PMT_POINTER_RESET 0x80000000
@@ -384,6 +383,7 @@ struct xgmac_priv {
struct device *device;
struct napi_struct napi;
+ int max_macs;
struct xgmac_extra_stats xstats;
spinlock_t stats_lock;
@@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev)
netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
netdev_mc_count(dev), netdev_uc_count(dev));
- if (dev->flags & IFF_PROMISC) {
- writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
- return;
- }
+ if (dev->flags & IFF_PROMISC)
+ value |= XGMAC_FRAME_FILTER_PR;
memset(hash_filter, 0, sizeof(hash_filter));
- if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+ if (netdev_uc_count(dev) > priv->max_macs) {
use_hash = true;
value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
}
@@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev)
goto out;
}
- if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+ if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
use_hash = true;
value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
} else {
@@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
}
out:
- for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
- xgmac_set_mac_addr(ioaddr, NULL, reg);
+ for (i = reg; i <= priv->max_macs; i++)
+ xgmac_set_mac_addr(ioaddr, NULL, i);
for (i = 0; i < XGMAC_NUM_HASH; i++)
writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
@@ -1761,6 +1759,13 @@ static int xgmac_probe(struct platform_device *pdev)
uid = readl(priv->base + XGMAC_VERSION);
netdev_info(ndev, "h/w version is 0x%x\n", uid);
+ /* Figure out how many valid mac address filter registers we have */
+ writel(1, priv->base + XGMAC_ADDR_HIGH(31));
+ if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
+ priv->max_macs = 31;
+ else
+ priv->max_macs = 7;
+
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq == -ENXIO) {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 5f5896e522d..a7a941b1a65 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -158,18 +158,6 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
/* DM9000 network board routine ---------------------------- */
-static void
-dm9000_reset(board_info_t * db)
-{
- dev_dbg(db->dev, "resetting device\n");
-
- /* RESET device */
- writeb(DM9000_NCR, db->io_addr);
- udelay(200);
- writeb(NCR_RST, db->io_data);
- udelay(200);
-}
-
/*
* Read a byte from I/O port
*/
@@ -191,6 +179,27 @@ iow(board_info_t * db, int reg, int value)
writeb(value, db->io_data);
}
+static void
+dm9000_reset(board_info_t *db)
+{
+ dev_dbg(db->dev, "resetting device\n");
+
+ /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
+ * The essential point is that we have to do a double reset, and the
+ * instruction is to set LBK into MAC internal loopback mode.
+ */
+ iow(db, DM9000_NCR, 0x03);
+ udelay(100); /* Application note says at least 20 us */
+ if (ior(db, DM9000_NCR) & 1)
+ dev_err(db->dev, "dm9000 did not respond to first reset\n");
+
+ iow(db, DM9000_NCR, 0);
+ iow(db, DM9000_NCR, 0x03);
+ udelay(100);
+ if (ior(db, DM9000_NCR) & 1)
+ dev_err(db->dev, "dm9000 did not respond to second reset\n");
+}
+
/* routines for sending block to chip */
static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
@@ -744,15 +753,20 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
static void dm9000_show_carrier(board_info_t *db,
unsigned carrier, unsigned nsr)
{
+ int lpa;
struct net_device *ndev = db->ndev;
+ struct mii_if_info *mii = &db->mii;
unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
- if (carrier)
- dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
+ if (carrier) {
+ lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
+ dev_info(db->dev,
+ "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
- (ncr & NCR_FDX) ? "full" : "half");
- else
+ (ncr & NCR_FDX) ? "full" : "half", lpa);
+ } else {
dev_info(db->dev, "%s: link down\n", ndev->name);
+ }
}
static void
@@ -890,9 +904,15 @@ dm9000_init_dm9000(struct net_device *dev)
(dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
+ iow(db, DM9000_GPR, 0);
- dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
- dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
+ /* If we are dealing with DM9000B, some extra steps are required: a
+ * manual phy reset, and setting init params.
+ */
+ if (db->type == TYPE_DM9000B) {
+ dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
+ dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
+ }
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index ace5050dba3..db020230bd0 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -88,6 +88,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE_MIN_MTU 256
#define BE_NUM_VLANS_SUPPORTED 64
+#define BE_UMC_NUM_VLANS_SUPPORTED 15
#define BE_MAX_EQD 96u
#define BE_MAX_TX_FRAG_COUNT 30
@@ -333,6 +334,7 @@ enum vf_state {
#define BE_FLAGS_LINK_STATUS_INIT 1
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
+#define BE_FLAGS_VLAN_PROMISC (1 << 4)
#define BE_FLAGS_NAPI_ENABLED (1 << 9)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1ab5dab11ef..c08fd32bb8e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -180,6 +180,9 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
dev_err(&adapter->pdev->dev,
"opcode %d-%d failed:status %d-%d\n",
opcode, subsystem, compl_status, extd_status);
+
+ if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+ return extd_status;
}
}
done:
@@ -1195,7 +1198,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
if (lancer_chip(adapter)) {
req->hdr.version = 1;
- req->if_id = cpu_to_le16(adapter->if_handle);
} else if (BEx_chip(adapter)) {
if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
req->hdr.version = 2;
@@ -1203,6 +1205,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
req->hdr.version = 2;
}
+ if (req->hdr.version > 0)
+ req->if_id = cpu_to_le16(adapter->if_handle);
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
req->ulp_num = BE_ULP1_NUM;
req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -1812,6 +1816,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
} else if (flags & IFF_ALLMULTI) {
req->if_flags_mask = req->if_flags =
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ } else if (flags & BE_FLAGS_VLAN_PROMISC) {
+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
+
+ if (value == ON)
+ req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
} else {
struct netdev_hw_addr *ha;
int i = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d026226db88..108ca8abf0a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -60,6 +60,8 @@ enum {
MCC_STATUS_NOT_SUPPORTED = 66
};
+#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16
+
#define CQE_STATUS_COMPL_MASK 0xFFFF
#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
#define CQE_STATUS_EXTD_MASK 0xFFFF
@@ -1791,7 +1793,7 @@ struct be_nic_res_desc {
u8 acpi_params;
u8 wol_param;
u16 rsvd7;
- u32 rsvd8[3];
+ u32 rsvd8[7];
} __packed;
struct be_cmd_req_get_func_config {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 100b528b9bd..2c38cc40211 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -855,11 +855,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
unsigned int eth_hdr_len;
struct iphdr *ip;
- /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
+ /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
* may cause a transmit stall on that port. So the work-around is to
- * pad such packets to a 36-byte length.
+ * pad short packets (<= 32 bytes) to a 36-byte length.
*/
- if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
+ if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
if (skb_padto(skb, 36))
goto tx_drop;
skb->len = 36;
@@ -1013,18 +1013,40 @@ static int be_vid_config(struct be_adapter *adapter)
status = be_cmd_vlan_config(adapter, adapter->if_handle,
vids, num, 1, 0);
- /* Set to VLAN promisc mode as setting VLAN filter failed */
if (status) {
- dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
- dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
- goto set_vlan_promisc;
+ /* Set to VLAN promisc mode as setting VLAN filter failed */
+ if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+ goto set_vlan_promisc;
+ dev_err(&adapter->pdev->dev,
+ "Setting HW VLAN filtering failed.\n");
+ } else {
+ if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
+ /* hw VLAN filtering re-enabled. */
+ status = be_cmd_rx_filter(adapter,
+ BE_FLAGS_VLAN_PROMISC, OFF);
+ if (!status) {
+ dev_info(&adapter->pdev->dev,
+ "Disabling VLAN Promiscuous mode.\n");
+ adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+ dev_info(&adapter->pdev->dev,
+ "Re-Enabling HW VLAN filtering\n");
+ }
+ }
}
return status;
set_vlan_promisc:
- status = be_cmd_vlan_config(adapter, adapter->if_handle,
- NULL, 0, 1, 1);
+ dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
+
+ status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
+ if (!status) {
+ dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
+ dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
+ adapter->flags |= BE_FLAGS_VLAN_PROMISC;
+ } else
+ dev_err(&adapter->pdev->dev,
+ "Failed to enable VLAN Promiscuous mode.\n");
return status;
}
@@ -1033,10 +1055,6 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!lancer_chip(adapter) && !be_physfn(adapter)) {
- status = -EINVAL;
- goto ret;
- }
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
@@ -1059,11 +1077,6 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!lancer_chip(adapter) && !be_physfn(adapter)) {
- status = -EINVAL;
- goto ret;
- }
-
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
goto ret;
@@ -1188,8 +1201,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
vi->vf = vf;
vi->tx_rate = vf_cfg->tx_rate;
- vi->vlan = vf_cfg->vlan_tag;
- vi->qos = 0;
+ vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
+ vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
return 0;
@@ -1199,28 +1212,29 @@ static int be_set_vf_vlan(struct net_device *netdev,
int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status = 0;
if (!sriov_enabled(adapter))
return -EPERM;
- if (vf >= adapter->num_vfs || vlan > 4095)
+ if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
return -EINVAL;
- if (vlan) {
- if (adapter->vf_cfg[vf].vlan_tag != vlan) {
+ if (vlan || qos) {
+ vlan |= qos << VLAN_PRIO_SHIFT;
+ if (vf_cfg->vlan_tag != vlan) {
/* If this is new value, program it. Else skip. */
- adapter->vf_cfg[vf].vlan_tag = vlan;
-
- status = be_cmd_set_hsw_config(adapter, vlan,
- vf + 1, adapter->vf_cfg[vf].if_handle, 0);
+ vf_cfg->vlan_tag = vlan;
+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
+ vf_cfg->if_handle, 0);
}
} else {
/* Reset Transparent Vlan Tagging. */
- adapter->vf_cfg[vf].vlan_tag = 0;
- vlan = adapter->vf_cfg[vf].def_vid;
+ vf_cfg->vlan_tag = 0;
+ vlan = vf_cfg->def_vid;
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
- adapter->vf_cfg[vf].if_handle, 0);
+ vf_cfg->if_handle, 0);
}
@@ -2963,6 +2977,8 @@ static void BEx_get_resources(struct be_adapter *adapter,
if (adapter->function_mode & FLEX10_MODE)
res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+ else if (adapter->function_mode & UMC_ENABLED)
+ res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
else
res->max_vlans = BE_NUM_VLANS_SUPPORTED;
res->max_mcast_mac = BE_MAX_MC;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index c4eaadeb572..9fbe4dda7a0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -88,6 +88,7 @@
#include <asm/io.h>
#include <asm/reg.h>
+#include <asm/mpc85xx.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
@@ -939,9 +940,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
}
}
-static void gfar_detect_errata(struct gfar_private *priv)
+static void __gfar_detect_errata_83xx(struct gfar_private *priv)
{
- struct device *dev = &priv->ofdev->dev;
unsigned int pvr = mfspr(SPRN_PVR);
unsigned int svr = mfspr(SPRN_SVR);
unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
@@ -957,15 +957,33 @@ static void gfar_detect_errata(struct gfar_private *priv)
(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_76;
- /* MPC8313 and MPC837x all rev */
- if ((pvr == 0x80850010 && mod == 0x80b0) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
- priv->errata |= GFAR_ERRATA_A002;
+ /* MPC8313 Rev < 2.0 */
+ if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
+ priv->errata |= GFAR_ERRATA_12;
+}
- /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
- if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
- (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+static void __gfar_detect_errata_85xx(struct gfar_private *priv)
+{
+ unsigned int svr = mfspr(SPRN_SVR);
+
+ if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
priv->errata |= GFAR_ERRATA_12;
+ if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
+ priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
+}
+
+static void gfar_detect_errata(struct gfar_private *priv)
+{
+ struct device *dev = &priv->ofdev->dev;
+
+ /* no plans to fix */
+ priv->errata |= GFAR_ERRATA_A002;
+
+ if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
+ __gfar_detect_errata_85xx(priv);
+ else /* non-mpc85xx parts, i.e. e300 core based */
+ __gfar_detect_errata_83xx(priv);
if (priv->errata)
dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
@@ -1599,7 +1617,7 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
/* Normaly TSEC should not hang on GRS commands, so we should
* actually wait for IEVENT_GRSC flag.
*/
- if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
+ if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
return 0;
/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 098f133908a..e006a09ba89 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -452,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev)
err = -ENODEV;
etsects->caps = ptp_gianfar_caps;
- etsects->cksel = DEFAULT_CKSEL;
+
+ if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
+ etsects->cksel = DEFAULT_CKSEL;
if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0c524fa9f81..cfef7fc32cd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -701,8 +701,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
- memcpy(details, cmd_details,
- sizeof(struct i40e_asq_cmd_details));
+ *details = *cmd_details;
/* If the cmd_details are defined copy the cookie. The
* cpu_to_le32 is not needed here because the data is ignored
@@ -760,7 +759,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
/* if the desc is available copy the temp desc to the right place */
- memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc));
+ *desc_on_ring = *desc;
/* if buff is not NULL assume indirect command */
if (buff != NULL) {
@@ -807,7 +806,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
/* if ready, copy the desc back to temp */
if (i40e_asq_done(hw)) {
- memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc));
+ *desc = *desc_on_ring;
if (buff != NULL)
memcpy(buff, dma_buff->va, buff_size);
retval = le16_to_cpu(desc->retval);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index c21df7bc3b1..1e4ea134975 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -507,7 +507,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
/* save link status information */
if (link)
- memcpy(link, hw_link_info, sizeof(struct i40e_link_status));
+ *link = *hw_link_info;
/* flag cleared so helper functions don't call AQ again */
hw->phy.get_link_info = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 601d482694e..221aa479501 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -101,10 +101,10 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
mem->size = ALIGN(size, alignment);
mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
&mem->pa, GFP_KERNEL);
- if (mem->va)
- return 0;
+ if (!mem->va)
+ return -ENOMEM;
- return -ENOMEM;
+ return 0;
}
/**
@@ -136,10 +136,10 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
mem->size = size;
mem->va = kzalloc(size, GFP_KERNEL);
- if (mem->va)
- return 0;
+ if (!mem->va)
+ return -ENOMEM;
- return -ENOMEM;
+ return 0;
}
/**
@@ -174,8 +174,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
u16 needed, u16 id)
{
int ret = -ENOMEM;
- int i = 0;
- int j = 0;
+ int i, j;
if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
dev_info(&pf->pdev->dev,
@@ -186,7 +185,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
/* start the linear search with an imperfect hint */
i = pile->search_hint;
- while (i < pile->num_entries && ret < 0) {
+ while (i < pile->num_entries) {
/* skip already allocated entries */
if (pile->list[i] & I40E_PILE_VALID_BIT) {
i++;
@@ -205,6 +204,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
pile->list[i+j] = id | I40E_PILE_VALID_BIT;
ret = i;
pile->search_hint = i + j;
+ break;
} else {
/* not enough, so skip over it and continue looking */
i += j;
@@ -1388,7 +1388,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
bool add_happened = false;
int filter_list_len = 0;
u32 changed_flags = 0;
- i40e_status ret = 0;
+ i40e_status aq_ret = 0;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
@@ -1449,28 +1449,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_del == filter_list_len) {
- ret = i40e_aq_remove_macvlan(&pf->hw,
+ aq_ret = i40e_aq_remove_macvlan(&pf->hw,
vsi->seid, del_list, num_del,
NULL);
num_del = 0;
memset(del_list, 0, sizeof(*del_list));
- if (ret)
+ if (aq_ret)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
- ret,
+ aq_ret,
pf->hw.aq.asq_last_status);
}
}
if (num_del) {
- ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+ aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
del_list, num_del, NULL);
num_del = 0;
- if (ret)
+ if (aq_ret)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ aq_ret, pf->hw.aq.asq_last_status);
}
kfree(del_list);
@@ -1515,32 +1515,30 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_add == filter_list_len) {
- ret = i40e_aq_add_macvlan(&pf->hw,
- vsi->seid,
- add_list,
- num_add,
- NULL);
+ aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add,
+ NULL);
num_add = 0;
- if (ret)
+ if (aq_ret)
break;
memset(add_list, 0, sizeof(*add_list));
}
}
if (num_add) {
- ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
- add_list, num_add, NULL);
+ aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add, NULL);
num_add = 0;
}
kfree(add_list);
add_list = NULL;
- if (add_happened && (!ret)) {
+ if (add_happened && (!aq_ret)) {
/* do nothing */;
- } else if (add_happened && (ret)) {
+ } else if (add_happened && (aq_ret)) {
dev_info(&pf->pdev->dev,
"add filter failed, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ aq_ret, pf->hw.aq.asq_last_status);
if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
!test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state)) {
@@ -1556,28 +1554,27 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
- ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
- vsi->seid,
- cur_multipromisc,
- NULL);
- if (ret)
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_multipromisc,
+ NULL);
+ if (aq_ret)
dev_info(&pf->pdev->dev,
"set multi promisc failed, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ aq_ret, pf->hw.aq.asq_last_status);
}
if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state));
- ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
- vsi->seid,
- cur_promisc,
- NULL);
- if (ret)
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (aq_ret)
dev_info(&pf->pdev->dev,
"set uni promisc failed, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ aq_ret, pf->hw.aq.asq_last_status);
}
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1790,6 +1787,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
* i40e_vsi_kill_vlan - Remove vsi membership for given vlan
* @vsi: the vsi being configured
* @vid: vlan id to be removed (0 = untagged only , -1 = any)
+ *
+ * Return: 0 on success or negative otherwise
**/
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
{
@@ -1863,37 +1862,39 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
* i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
* @netdev: network interface to be adjusted
* @vid: vlan id to be added
+ *
+ * net_device_ops implementation for adding vlan ids
**/
static int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- int ret;
+ int ret = 0;
if (vid > 4095)
- return 0;
+ return -EINVAL;
+
+ netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
- netdev_info(vsi->netdev, "adding %pM vid=%d\n",
- netdev->dev_addr, vid);
/* If the network stack called us with vid = 0, we should
* indicate to i40e_vsi_add_vlan() that we want to receive
* any traffic (i.e. with any vlan tag, or untagged)
*/
ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
- if (!ret) {
- if (vid < VLAN_N_VID)
- set_bit(vid, vsi->active_vlans);
- }
+ if (!ret && (vid < VLAN_N_VID))
+ set_bit(vid, vsi->active_vlans);
- return 0;
+ return ret;
}
/**
* i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
* @netdev: network interface to be adjusted
* @vid: vlan id to be removed
+ *
+ * net_device_ops implementation for adding vlan ids
**/
static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
@@ -1901,15 +1902,16 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- netdev_info(vsi->netdev, "removing %pM vid=%d\n",
- netdev->dev_addr, vid);
+ netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
+
/* return code is ignored as there is nothing a user
* can do about failure to remove and a log message was
- * already printed from another function
+ * already printed from the other function
*/
i40e_vsi_kill_vlan(vsi, vid);
clear_bit(vid, vsi->active_vlans);
+
return 0;
}
@@ -1936,10 +1938,10 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
* @vsi: the vsi being adjusted
* @vid: the vlan id to set as a PVID
**/
-i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ i40e_status aq_ret;
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
@@ -1948,14 +1950,15 @@ i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
ctxt.seid = vsi->seid;
memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
- ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (ret) {
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
dev_info(&vsi->back->pdev->dev,
"%s: update vsi failed, aq_err=%d\n",
__func__, vsi->back->hw.aq.asq_last_status);
+ return -ENOENT;
}
- return ret;
+ return 0;
}
/**
@@ -3326,7 +3329,8 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
**/
static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
{
- int num_tc = 0, i;
+ u8 num_tc = 0;
+ int i;
/* Scan the ETS Config Priority Table to find
* traffic class enabled for a given priority
@@ -3341,9 +3345,7 @@ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
/* Traffic class index starts from zero so
* increment to return the actual count
*/
- num_tc++;
-
- return num_tc;
+ return num_tc + 1;
}
/**
@@ -3451,28 +3453,27 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret;
u32 tc_bw_max;
- int ret;
int i;
/* Get the VSI level BW configuration */
- ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
- if (ret) {
+ aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (aq_ret) {
dev_info(&pf->pdev->dev,
"couldn't get pf vsi bw config, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
- return ret;
+ aq_ret, pf->hw.aq.asq_last_status);
+ return -EINVAL;
}
/* Get the VSI level BW configuration per TC */
- ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
- &bw_ets_config,
- NULL);
- if (ret) {
+ aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+ NULL);
+ if (aq_ret) {
dev_info(&pf->pdev->dev,
"couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
- return ret;
+ aq_ret, pf->hw.aq.asq_last_status);
+ return -EINVAL;
}
if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
@@ -3494,7 +3495,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
/* 3 bits out of 4 for each TC */
vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
}
- return ret;
+
+ return 0;
}
/**
@@ -3505,30 +3507,30 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
*
* Returns 0 on success, negative value on failure
**/
-static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi,
- u8 enabled_tc,
+static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
- int i, ret = 0;
+ i40e_status aq_ret;
+ int i;
bw_data.tc_valid_bits = enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
- ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid,
- &bw_data, NULL);
- if (ret) {
+ aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+ NULL);
+ if (aq_ret) {
dev_info(&vsi->back->pdev->dev,
"%s: AQ command Config VSI BW allocation per TC failed = %d\n",
__func__, vsi->back->hw.aq.asq_last_status);
- return ret;
+ return -EINVAL;
}
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
vsi->info.qs_handle[i] = bw_data.qs_handles[i];
- return ret;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 48cbc833b05..151e00cad11 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1607,6 +1607,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
}
+ } else if (hw->phy.type == e1000_phy_82580) {
+ /* enable MII loopback */
+ igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
}
/* add small delay to avoid loopback test failure */
@@ -2652,6 +2655,8 @@ static int igb_set_eee(struct net_device *netdev,
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
+ memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+
ret_val = igb_get_eee(netdev, &eee_curr);
if (ret_val)
return ret_val;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 7fb5677451f..2c210ec35d5 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
spin_unlock_bh(&mp->mib_counters_lock);
-
- mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
static void mib_counters_timer_wrapper(unsigned long _mp)
{
struct mv643xx_eth_private *mp = (void *)_mp;
-
mib_counters_update(mp);
+ mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
@@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev)
mp->int_mask |= INT_TX_END_0 << i;
}
+ add_timer(&mp->mib_counters_timer);
port_start(mp);
wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
@@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
if (!ppdev)
return -ENOMEM;
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ppdev->dev.of_node = pnp;
ret = platform_device_add_resources(ppdev, &res, 1);
if (ret)
@@ -2916,7 +2916,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->mib_counters_timer.data = (unsigned long)mp;
mp->mib_counters_timer.function = mib_counters_timer_wrapper;
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
- add_timer(&mp->mib_counters_timer);
spin_lock_init(&mp->mib_counters_lock);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 1a9c4f6269e..ecc7f7b696b 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3086,13 +3086,16 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
PCI_DMA_FROMDEVICE);
skge_rx_reuse(e, skge->rx_buf_size);
} else {
+ struct skge_element ee;
struct sk_buff *nskb;
nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
if (!nskb)
goto resubmit;
- skb = e->skb;
+ ee = *e;
+
+ skb = ee.skb;
prefetch(skb->data);
if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
@@ -3101,8 +3104,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
}
pci_unmap_single(skge->hw->pdev,
- dma_unmap_addr(e, mapaddr),
- dma_unmap_len(e, maplen),
+ dma_unmap_addr(&ee, mapaddr),
+ dma_unmap_len(&ee, maplen),
PCI_DMA_FROMDEVICE);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index dec455c8f62..afe2efa69c8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
put_page(page);
return -ENOMEM;
}
- page_alloc->size = PAGE_SIZE << order;
+ page_alloc->page_size = PAGE_SIZE << order;
page_alloc->page = page;
page_alloc->dma = dma;
- page_alloc->offset = frag_info->frag_align;
+ page_alloc->page_offset = frag_info->frag_align;
/* Not doing get_page() for each frag is a big win
* on asymetric workloads.
*/
- atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride);
+ atomic_set(&page->_count,
+ page_alloc->page_size / frag_info->frag_stride);
return 0;
}
@@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
for (i = 0; i < priv->num_frags; i++) {
frag_info = &priv->frag_info[i];
page_alloc[i] = ring_alloc[i];
- page_alloc[i].offset += frag_info->frag_stride;
- if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size)
+ page_alloc[i].page_offset += frag_info->frag_stride;
+
+ if (page_alloc[i].page_offset + frag_info->frag_stride <=
+ ring_alloc[i].page_size)
continue;
+
if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
goto out;
}
for (i = 0; i < priv->num_frags; i++) {
frags[i] = ring_alloc[i];
- dma = ring_alloc[i].dma + ring_alloc[i].offset;
+ dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
ring_alloc[i] = page_alloc[i];
rx_desc->data[i].addr = cpu_to_be64(dma);
}
@@ -117,7 +121,7 @@ out:
frag_info = &priv->frag_info[i];
if (page_alloc[i].page != ring_alloc[i].page) {
dma_unmap_page(priv->ddev, page_alloc[i].dma,
- page_alloc[i].size, PCI_DMA_FROMDEVICE);
+ page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
page = page_alloc[i].page;
atomic_set(&page->_count, 1);
put_page(page);
@@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
int i)
{
const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+ u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
+
- if (frags[i].offset + frag_info->frag_stride > frags[i].size)
- dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size,
- PCI_DMA_FROMDEVICE);
+ if (next_frag_end > frags[i].page_size)
+ dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
+ PCI_DMA_FROMDEVICE);
if (frags[i].page)
put_page(frags[i].page);
@@ -161,7 +167,7 @@ out:
page_alloc = &ring->page_alloc[i];
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->size, PCI_DMA_FROMDEVICE);
+ page_alloc->page_size, PCI_DMA_FROMDEVICE);
page = page_alloc->page;
atomic_set(&page->_count, 1);
put_page(page);
@@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
i, page_count(page_alloc->page));
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->size, PCI_DMA_FROMDEVICE);
- while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) {
+ page_alloc->page_size, PCI_DMA_FROMDEVICE);
+ while (page_alloc->page_offset + frag_info->frag_stride <
+ page_alloc->page_size) {
put_page(page_alloc->page);
- page_alloc->offset += frag_info->frag_stride;
+ page_alloc->page_offset += frag_info->frag_stride;
}
page_alloc->page = NULL;
}
@@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
/* Save page reference in skb */
__skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
- skb_frags_rx[nr].page_offset = frags[nr].offset;
+ skb_frags_rx[nr].page_offset = frags[nr].page_offset;
skb->truesize += frag_info->frag_stride;
frags[nr].page = NULL;
}
@@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
- va = page_address(frags[0].page) + frags[0].offset;
+ va = page_address(frags[0].page) + frags[0].page_offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
@@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
ethh = (struct ethhdr *)(page_address(frags[0].page) +
- frags[0].offset);
+ frags[0].page_offset);
if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5e0aa569306..bf06e3610d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
dma_addr_t dma;
- u32 offset;
- u32 size;
+ u32 page_offset;
+ u32 page_size;
};
struct mlx4_en_tx_ring {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 5472cbd3402..6ca30739625 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -180,28 +180,32 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
return 0;
}
-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token)
+static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
+ int csum)
{
block->token = token;
- block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2);
- block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+ if (csum) {
+ block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
+ sizeof(block->data) - 2);
+ block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+ }
}
-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token)
+static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
{
struct mlx5_cmd_mailbox *next = msg->next;
while (next) {
- calc_block_sig(next->buf, token);
+ calc_block_sig(next->buf, token, csum);
next = next->next;
}
}
-static void set_signature(struct mlx5_cmd_work_ent *ent)
+static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
{
ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
- calc_chain_sig(ent->in, ent->token);
- calc_chain_sig(ent->out, ent->token);
+ calc_chain_sig(ent->in, ent->token, csum);
+ calc_chain_sig(ent->out, ent->token, csum);
}
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -539,8 +543,7 @@ static void cmd_work_handler(struct work_struct *work)
lay->type = MLX5_PCI_CMD_XPORT;
lay->token = ent->token;
lay->status_own = CMD_OWNER_HW;
- if (!cmd->checksum_disabled)
- set_signature(ent);
+ set_signature(ent, !cmd->checksum_disabled);
dump_command(dev, ent, 1);
ktime_get_ts(&ent->ts1);
@@ -773,8 +776,6 @@ static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
block = next->buf;
- if (xor8_buf(block, sizeof(*block)) != 0xff)
- return -EINVAL;
memcpy(to, block->data, copy);
to += copy;
@@ -1361,6 +1362,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_map;
}
+ cmd->checksum_disabled = 1;
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
@@ -1510,7 +1512,7 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
- case MLX5_CMD_STAT_LIM_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 443cc4d7b02..2231d93cc7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -366,9 +366,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
goto err_in;
}
+ snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+ name, pci_name(dev->pdev));
eq->eqn = out.eq_number;
err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
- name, eq);
+ eq->name, eq);
if (err)
goto err_eq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b47739b0b5f..bc0f5fb66e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -165,9 +165,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
struct mlx5_cmd_set_hca_cap_mbox_out set_out;
- struct mlx5_profile *prof = dev->profile;
u64 flags;
- int csum = 1;
int err;
memset(&query_ctx, 0, sizeof(query_ctx));
@@ -197,20 +195,14 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
sizeof(set_ctx->hca_cap));
- if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
- csum = !!prof->cmdif_csum;
- flags = be64_to_cpu(set_ctx->hca_cap.flags);
- if (csum)
- flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
- else
- flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
-
- set_ctx->hca_cap.flags = cpu_to_be64(flags);
- }
-
if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
+ flags = be64_to_cpu(query_out->hca_cap.flags);
+ /* disable checksum */
+ flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+
+ set_ctx->hca_cap.flags = cpu_to_be64(flags);
memset(&set_out, 0, sizeof(set_out));
set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
@@ -225,9 +217,6 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
if (err)
goto query_ex;
- if (!csum)
- dev->cmd.checksum_disabled = 1;
-
query_ex:
kfree(query_out);
kfree(set_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 3a2408d4482..7b12acf210f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -90,6 +90,10 @@ struct mlx5_manage_pages_outbox {
__be64 pas[0];
};
+enum {
+ MAX_RECLAIM_TIME_MSECS = 5000,
+};
+
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
{
struct rb_root *root = &dev->priv.page_root;
@@ -279,6 +283,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int err;
int i;
+ if (nclaimed)
+ *nclaimed = 0;
+
memset(&in, 0, sizeof(in));
outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
out = mlx5_vzalloc(outlen);
@@ -388,20 +395,25 @@ static int optimal_reclaimed_pages(void)
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
{
- unsigned long end = jiffies + msecs_to_jiffies(5000);
+ unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
struct fw_page *fwp;
struct rb_node *p;
+ int nclaimed = 0;
int err;
do {
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
- err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL);
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
return err;
}
+ if (nclaimed)
+ end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
}
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 83c2091c9c2..ea54d95e5b9 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -448,7 +448,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
netdev_err(ndev, "irq_of_parse_and_map failed\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto irq_map_fail;
}
priv = netdev_priv(ndev);
@@ -472,24 +473,32 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
- if (priv->tx_desc_base == NULL)
+ if (priv->tx_desc_base == NULL) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
- if (priv->rx_desc_base == NULL)
+ if (priv->rx_desc_base == NULL) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
GFP_ATOMIC);
- if (!priv->tx_buf_base)
+ if (!priv->tx_buf_base) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
GFP_ATOMIC);
- if (!priv->rx_buf_base)
+ if (!priv->rx_buf_base) {
+ ret = -ENOMEM;
goto init_fail;
+ }
platform_set_drvdata(pdev, ndev);
@@ -522,7 +531,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
init_fail:
netdev_err(ndev, "init failed\n");
moxart_mac_free_memory(ndev);
-
+irq_map_fail:
+ free_netdev(ndev);
return ret;
}
@@ -543,7 +553,7 @@ static const struct of_device_id moxart_mac_match[] = {
{ }
};
-struct __initdata platform_driver moxart_mac_driver = {
+static struct platform_driver moxart_mac_driver = {
.probe = moxart_mac_probe,
.remove = moxart_remove,
.driver = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 4d7ad0074d1..ff83a9fcd4c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -665,7 +665,7 @@ static int qlcnic_set_channels(struct net_device *dev,
return err;
}
- if (channel->tx_count) {
+ if (qlcnic_82xx_check(adapter) && channel->tx_count) {
err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
if (err)
return err;
@@ -1794,3 +1794,11 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
.set_msglevel = qlcnic_set_msglevel,
.get_msglevel = qlcnic_get_msglevel,
};
+
+const struct ethtool_ops qlcnic_ethtool_failed_ops = {
+ .get_settings = qlcnic_get_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+ .set_dump = qlcnic_set_dump,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c4c5023e1fd..9e61eb86745 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -431,6 +431,9 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
usleep_range(10000, 11000);
+ if (!adapter->fw_work.work.func)
+ return;
+
cancel_delayed_work_sync(&adapter->fw_work);
}
@@ -2254,7 +2257,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = qlcnic_alloc_adapter_resources(adapter);
if (err)
- goto err_out_free_netdev;
+ goto err_out_free_wq;
adapter->dev_rst_time = jiffies;
adapter->ahw->revision_id = pdev->revision;
@@ -2275,8 +2278,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->portnum = adapter->ahw->pci_func;
err = qlcnic_start_firmware(adapter);
if (err) {
- dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
- goto err_out_free_hw;
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
+ "\t\tIf reboot doesn't help, try flashing the card\n");
+ goto err_out_maintenance_mode;
}
qlcnic_get_multiq_capability(adapter);
@@ -2392,6 +2396,9 @@ err_out_disable_msi:
err_out_free_hw:
qlcnic_free_adapter_resources(adapter);
+err_out_free_wq:
+ destroy_workqueue(adapter->qlcnic_wq);
+
err_out_free_netdev:
free_netdev(netdev);
@@ -2408,6 +2415,22 @@ err_out_disable_pdev:
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
+
+err_out_maintenance_mode:
+ netdev->netdev_ops = &qlcnic_netdev_failed_ops;
+ SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+ err = register_netdev(netdev);
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register net device\n");
+ qlcnic_clr_all_drv_state(adapter, 0);
+ goto err_out_free_hw;
+ }
+
+ pci_set_drvdata(pdev, adapter);
+ qlcnic_add_sysfs(adapter);
+
+ return 0;
}
static void qlcnic_remove(struct pci_dev *pdev)
@@ -2518,8 +2541,16 @@ static int qlcnic_resume(struct pci_dev *pdev)
static int qlcnic_open(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ u32 state;
int err;
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
+ netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
+
+ return -EIO;
+ }
+
netif_carrier_off(netdev);
err = qlcnic_attach(adapter);
@@ -3228,6 +3259,13 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
return;
state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
+ netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
+ __func__);
+ qlcnic_api_unlock(adapter);
+
+ return;
+ }
if (state == QLCNIC_DEV_READY) {
QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
@@ -3613,11 +3651,6 @@ int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
u8 max_hw = QLCNIC_MAX_TX_RINGS;
u32 max_allowed;
- if (!qlcnic_82xx_check(adapter)) {
- netdev_err(netdev, "No Multi TX-Q support\n");
- return -EINVAL;
- }
-
if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
return -EINVAL;
@@ -3657,8 +3690,7 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
u8 max_hw = adapter->ahw->max_rx_ques;
u32 max_allowed;
- if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
- !qlcnic_use_msi) {
+ if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
netdev_err(netdev, "No RSS support in INT-x mode\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 330d9a8774a..686f460b150 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -397,6 +397,7 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ rtnl_lock();
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
@@ -407,12 +408,15 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
/* After disabling SRIOV re-init the driver in default mode
configure opmode based on op_mode of function
*/
- if (qlcnic_83xx_configure_opmode(adapter))
+ if (qlcnic_83xx_configure_opmode(adapter)) {
+ rtnl_unlock();
return -EIO;
+ }
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
+ rtnl_unlock();
return 0;
}
@@ -533,6 +537,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
return -EIO;
}
+ rtnl_lock();
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
@@ -555,6 +560,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
__qlcnic_up(adapter, netdev);
error:
+ rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index c6165d05cc1..019f4377307 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1272,6 +1272,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ u32 state;
if (device_create_bin_file(dev, &bin_attr_port_stats))
dev_info(dev, "failed to create port stats sysfs entry");
@@ -1285,8 +1286,13 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+ return;
+
if (device_create_bin_file(dev, &bin_attr_pci_config))
dev_info(dev, "failed to create pci config sysfs entry");
+
if (device_create_file(dev, &dev_attr_beacon))
dev_info(dev, "failed to create beacon sysfs entry");
@@ -1307,6 +1313,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ u32 state;
device_remove_bin_file(dev, &bin_attr_port_stats);
@@ -1315,6 +1322,11 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
+
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+ return;
+
device_remove_bin_file(dev, &bin_attr_pci_config);
device_remove_file(dev, &dev_attr_beacon);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 10093f0c4c0..6bc5db70392 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
int i;
if (!mpi_coredump) {
- netif_err(qdev, drv, qdev->ndev, "No memory available\n");
- return -ENOMEM;
+ netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
+ return -EINVAL;
}
/* Try to get the spinlock, but dont worry if
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index ff2bf8a4e24..7ad146080c3 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work)
return;
}
- if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
+ if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
qdev->core_is_dumped = 1;
queue_delayed_work(qdev->workqueue,
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5cd831ebfa8..b57c278d3b4 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -688,12 +688,16 @@ static struct sh_eth_cpu_data r8a7740_data = {
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
+ .fdr_value = 0x0000070f,
+ .rmcr_value = 0x00000001,
.apr = 1,
.mpr = 1,
.tpauser = 1,
.bculr = 1,
.hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 9f18ae984f9..21f9ad6392e 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -444,6 +444,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+ EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+ EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+ EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+ EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+ EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
+ EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
+ EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+ EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+ EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+ EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+ EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
};
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
@@ -498,44 +510,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
(1ULL << EF10_STAT_rx_length_error))
-#if BITS_PER_LONG == 64
-#define STAT_MASK_BITMAP(bits) (bits)
-#else
-#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
-#endif
-
-static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
-{
- static const unsigned long hunt_40g_stat_mask[] = {
- STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
- HUNT_40G_EXTRA_STAT_MASK)
- };
- static const unsigned long hunt_10g_only_stat_mask[] = {
- STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
- HUNT_10G_ONLY_STAT_MASK)
- };
+/* These statistics are only provided if the firmware supports the
+ * capability PM_AND_RXDP_COUNTERS.
+ */
+#define HUNT_PM_AND_RXDP_STAT_MASK ( \
+ (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
+ (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
+ (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
+ (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
+ (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
+ (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
+ (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
+ (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
+ (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
+ (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
+ (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
+ (1ULL << EF10_STAT_rx_dp_emerg_wait))
+
+static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
+{
+ u64 raw_mask = HUNT_COMMON_STAT_MASK;
u32 port_caps = efx_mcdi_phy_get_caps(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- return hunt_40g_stat_mask;
+ raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
else
- return hunt_10g_only_stat_mask;
+ raw_mask |= HUNT_10G_ONLY_STAT_MASK;
+
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
+ raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
+
+ return raw_mask;
+}
+
+static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
+{
+ u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+
+#if BITS_PER_LONG == 64
+ mask[0] = raw_mask;
+#else
+ mask[0] = raw_mask & 0xffffffff;
+ mask[1] = raw_mask >> 32;
+#endif
}
static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
{
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+
+ efx_ef10_get_stat_mask(efx, mask);
return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
- efx_ef10_stat_mask(efx), names);
+ mask, names);
}
static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
__le64 generation_start, generation_end;
u64 *stats = nic_data->stats;
__le64 *dma_stats;
+ efx_ef10_get_stat_mask(efx, mask);
+
dma_stats = efx->stats_buffer.addr;
nic_data = efx->nic_data;
@@ -543,8 +583,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
- efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
stats, efx->stats_buffer.addr, false);
+ rmb();
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
if (generation_end != generation_start)
return -EAGAIN;
@@ -563,12 +604,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats)
{
- const unsigned long *mask = efx_ef10_stat_mask(efx);
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
u64 *stats = nic_data->stats;
size_t stats_count = 0, index;
int retry;
+ efx_ef10_get_stat_mask(efx, mask);
+
/* If we're unlucky enough to read statistics during the DMA, wait
* up to 10ms for it to finish (typically takes <500us)
*/
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 128d7cdf9eb..366c8e3e378 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -27,10 +27,10 @@
/* A reboot/assertion causes the MCDI status word to be set after the
* command word is set or a REBOOT event is sent. If we notice a reboot
- * via these mechanisms then wait 20ms for the status word to be set.
+ * via these mechanisms then wait 250ms for the status word to be set.
*/
#define MCDI_STATUS_DELAY_US 100
-#define MCDI_STATUS_DELAY_COUNT 200
+#define MCDI_STATUS_DELAY_COUNT 2500
#define MCDI_STATUS_SLEEP_MS \
(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
@@ -800,9 +800,6 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
} else {
int count;
- /* Nobody was waiting for an MCDI request, so trigger a reset */
- efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
-
/* Consume the status word since efx_mcdi_rpc_finish() won't */
for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
if (efx_mcdi_poll_reboot(efx))
@@ -810,6 +807,9 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
udelay(MCDI_STATUS_DELAY_US);
}
mcdi->new_epoch = true;
+
+ /* Nobody was waiting for an MCDI request, so trigger a reset */
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
}
spin_unlock(&mcdi->iface_lock);
@@ -963,7 +963,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
size_t outlen;
int rc;
@@ -981,6 +981,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
goto fail;
}
+ /* We currently assume we have control of the external link
+ * and are completely trusted by firmware. Abort probing
+ * if that's not true for this function.
+ */
+ if (driver_operating &&
+ outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
+ (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
+ netif_err(efx, probe, efx->net_dev,
+ "This driver version only supports one function per port\n");
+ return -ENODEV;
+ }
+
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
return 0;
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index b5cf62492f8..e0a63ddb7a6 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -2574,8 +2574,58 @@
#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
-#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */
-#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an emergency descriptor fetch was
+ * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
#define MC_CMD_MAC_NSTATS 0x61 /* enum */
@@ -5065,6 +5115,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
/* RxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index e7dbd2dd202..9826594c8a4 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -469,8 +469,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
* @count: Length of the @desc array
* @mask: Bitmask of which elements of @desc are enabled
* @stats: Buffer to update with the converted statistics. The length
- * of this array must be at least the number of set bits in the
- * first @count bits of @mask.
+ * of this array must be at least @count.
* @dma_buf: DMA buffer containing hardware statistics
* @accumulate: If set, the converted values will be added rather than
* directly stored to the corresponding elements of @stats
@@ -503,11 +502,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
}
if (accumulate)
- *stats += val;
+ stats[index] += val;
else
- *stats = val;
+ stats[index] = val;
}
-
- ++stats;
}
}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index fda29d39032..890bbbe8320 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -386,6 +386,18 @@ enum {
EF10_STAT_rx_align_error,
EF10_STAT_rx_length_error,
EF10_STAT_rx_nodesc_drops,
+ EF10_STAT_rx_pm_trunc_bb_overflow,
+ EF10_STAT_rx_pm_discard_bb_overflow,
+ EF10_STAT_rx_pm_trunc_vfifo_full,
+ EF10_STAT_rx_pm_discard_vfifo_full,
+ EF10_STAT_rx_pm_trunc_qbb,
+ EF10_STAT_rx_pm_discard_qbb,
+ EF10_STAT_rx_pm_discard_mapping,
+ EF10_STAT_rx_dp_q_disabled_packets,
+ EF10_STAT_rx_dp_di_dropped_packets,
+ EF10_STAT_rx_dp_streaming_packets,
+ EF10_STAT_rx_dp_emerg_fetch,
+ EF10_STAT_rx_dp_emerg_wait,
EF10_STAT_COUNT
};
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 5730fe2445a..98eedb90cdc 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -1124,8 +1124,7 @@ static const char * chip_ids[ 16 ] = {
void __iomem *__ioaddr = ioaddr; \
if (__len >= 2 && (unsigned long)__ptr & 2) { \
__len -= 2; \
- SMC_outw(*(u16 *)__ptr, ioaddr, \
- DATA_REG(lp)); \
+ SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
__ptr += 2; \
} \
if (SMC_CAN_USE_DATACS && lp->datacs) \
@@ -1133,8 +1132,7 @@ static const char * chip_ids[ 16 ] = {
SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
if (__len & 2) { \
__ptr += (__len & ~3); \
- SMC_outw(*((u16 *)__ptr), ioaddr, \
- DATA_REG(lp)); \
+ SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
} \
} else if (SMC_16BIT(lp)) \
SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 79974e31187..cc3ce557e4a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -639,13 +639,6 @@ void cpsw_rx_handler(void *token, int len, int status)
static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
{
struct cpsw_priv *priv = dev_id;
- u32 rx, tx, rx_thresh;
-
- rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
- rx = __raw_readl(&priv->wr_regs->rx_stat);
- tx = __raw_readl(&priv->wr_regs->tx_stat);
- if (!rx_thresh && !rx && !tx)
- return IRQ_NONE;
cpsw_intr_disable(priv);
if (priv->irq_enabled == true) {
@@ -1169,9 +1162,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
}
}
+ napi_enable(&priv->napi);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
- napi_enable(&priv->napi);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
@@ -1771,8 +1764,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->mac_control = prop;
- if (!of_property_read_u32(node, "dual_emac", &prop))
- data->dual_emac = prop;
+ if (of_property_read_bool(node, "dual_emac"))
+ data->dual_emac = 1;
/*
* Populate all the child nodes here...
@@ -1782,7 +1775,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (ret)
pr_warn("Doesn't have any child node\n");
- for_each_node_by_name(slave_node, "slave") {
+ for_each_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
const void *mac_addr = NULL;
u32 phyid;
@@ -1791,6 +1784,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
struct device_node *mdio_node;
struct platform_device *mdio;
+ /* This is no slave child node, continue */
+ if (strcmp(slave_node->name, "slave"))
+ continue;
+
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
pr_err("Missing slave[%d] phy_id property\n", i);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 67df09ea9d0..6a32ef9d63a 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -876,8 +876,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
- }
- if (!netdev_mc_empty(ndev)) {
+ } else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index c8f088ab5fd..bdf697b184a 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -32,7 +32,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "via-rhine"
-#define DRV_VERSION "1.5.0"
+#define DRV_VERSION "1.5.1"
#define DRV_RELDATE "2010-10-09"
#include <linux/types.h>
@@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
if (unlikely(vlan_tx_tag_present(skb))) {
- rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
+ u16 vid_pcp = vlan_tx_tag_get(skb);
+
+ /* drop CFI/DEI bit, register needs VID and PCP */
+ vid_pcp = (vid_pcp & VLAN_VID_MASK) |
+ ((vid_pcp & VLAN_PRIO_MASK) >> 1);
+ rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
/* request tagging */
rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index b88121f240c..0029148077a 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+ /* Init descriptor indexes */
+ lp->tx_bd_ci = 0;
+ lp->tx_bd_next = 0;
+ lp->tx_bd_tail = 0;
+ lp->rx_bd_ci = 0;
+
return 0;
out: