aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJohannes Berg <johannes@sipsolutions.net>2013-06-17 22:44:02 +0200
committerDavid S. Miller <davem@davemloft.net>2013-06-17 16:04:34 -0700
commitab69bde6b2e9c37456eeb0051a185446336aef9f (patch)
tree240e8f8f1ce05b182ab5c6693622cd962968b468 /drivers
parent93725cbd22ed716bea8dc479b4925d40a4dbd0c6 (diff)
alx: add a simple AR816x/AR817x device driver
This is a very simple driver, based on the original vendor driver that Qualcomm/Atheros published/submitted previously, but reworked to make the code saner. However, it also lost a number of features (TSO/GSO, VLAN acceleration and multi- queue support) in the process, as well as debugging support features I didn't have any use for. The only thing I left is checksum offload. More features can obviously be added, but this seemed like a good start for having a driver in mainline at all. Johannes Stezenbach has verified that the driver works on AR8161, I have a AR8171 myself. The E2200 device ID I found on github in somebody's repository. Signed-off-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/atheros/Kconfig18
-rw-r--r--drivers/net/ethernet/atheros/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/alx/Makefile3
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h114
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c272
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c1226
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h499
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1625
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h810
9 files changed, 4568 insertions, 0 deletions
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 36d6abd1cff..ad6aa1e9834 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -67,4 +67,22 @@ config ATL1C
To compile this driver as a module, choose M here. The module
will be called atl1c.
+config ALX
+ tristate "Qualcomm Atheros AR816x/AR817x support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MDIO
+ help
+ This driver supports the Qualcomm Atheros L1F ethernet adapter,
+ i.e. the following chipsets:
+
+ 1969:1091 - AR8161 Gigabit Ethernet
+ 1969:1090 - AR8162 Fast Ethernet
+ 1969:10A1 - AR8171 Gigabit Ethernet
+ 1969:10A0 - AR8172 Fast Ethernet
+
+ To compile this driver as a module, choose M here. The module
+ will be called alx.
+
endif # NET_VENDOR_ATHEROS
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
index e7e76fb576f..5cf1c65bbce 100644
--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/
obj-$(CONFIG_ATL2) += atlx/
obj-$(CONFIG_ATL1E) += atl1e/
obj-$(CONFIG_ATL1C) += atl1c/
+obj-$(CONFIG_ALX) += alx/
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
new file mode 100644
index 00000000000..5901fa407d5
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ALX) += alx.o
+alx-objs := main.o ethtool.o hw.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
new file mode 100644
index 00000000000..50b3ae2b143
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _ALX_H_
+#define _ALX_H_
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include "hw.h"
+
+#define ALX_WATCHDOG_TIME (5 * HZ)
+
+struct alx_buffer {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(size);
+};
+
+struct alx_rx_queue {
+ struct alx_rrd *rrd;
+ dma_addr_t rrd_dma;
+
+ struct alx_rfd *rfd;
+ dma_addr_t rfd_dma;
+
+ struct alx_buffer *bufs;
+
+ u16 write_idx, read_idx;
+ u16 rrd_read_idx;
+};
+#define ALX_RX_ALLOC_THRESH 32
+
+struct alx_tx_queue {
+ struct alx_txd *tpd;
+ dma_addr_t tpd_dma;
+ struct alx_buffer *bufs;
+ u16 write_idx, read_idx;
+};
+
+#define ALX_DEFAULT_TX_WORK 128
+
+enum alx_device_quirks {
+ ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
+};
+
+struct alx_priv {
+ struct net_device *dev;
+
+ struct alx_hw hw;
+
+ /* all descriptor memory */
+ struct {
+ dma_addr_t dma;
+ void *virt;
+ int size;
+ } descmem;
+
+ /* protect int_mask updates */
+ spinlock_t irq_lock;
+ u32 int_mask;
+
+ int tx_ringsz;
+ int rx_ringsz;
+ int rxbuf_size;
+
+ struct napi_struct napi;
+ struct alx_tx_queue txq;
+ struct alx_rx_queue rxq;
+
+ struct work_struct link_check_wk;
+ struct work_struct reset_wk;
+
+ u16 msg_enable;
+
+ bool msi;
+};
+
+extern const struct ethtool_ops alx_ethtool_ops;
+extern const char alx_drv_name[];
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
new file mode 100644
index 00000000000..6fa2aec2bc8
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+
+#include "alx.h"
+#include "reg.h"
+#include "hw.h"
+
+
+static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ ecmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP |
+ SUPPORTED_Pause;
+ if (alx_hw_giga(hw))
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+ ecmd->advertising = ADVERTISED_TP;
+ if (hw->adv_cfg & ADVERTISED_Autoneg)
+ ecmd->advertising |= hw->adv_cfg;
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = 0;
+ if (hw->adv_cfg & ADVERTISED_Autoneg)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
+ if (hw->flowctrl & ALX_FC_RX) {
+ ecmd->advertising |= ADVERTISED_Pause;
+
+ if (!(hw->flowctrl & ALX_FC_TX))
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ } else if (hw->flowctrl & ALX_FC_TX) {
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ }
+ }
+
+ if (hw->link_speed != SPEED_UNKNOWN) {
+ ethtool_cmd_speed_set(ecmd,
+ hw->link_speed - hw->link_speed % 10);
+ ecmd->duplex = hw->link_speed % 10;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ u32 adv_cfg;
+
+ ASSERT_RTNL();
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (ecmd->advertising & ADVERTISED_1000baseT_Half)
+ return -EINVAL;
+ adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
+ } else {
+ int speed = ethtool_cmd_speed(ecmd);
+
+ switch (speed + ecmd->duplex) {
+ case SPEED_10 + DUPLEX_HALF:
+ adv_cfg = ADVERTISED_10baseT_Half;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ adv_cfg = ADVERTISED_10baseT_Full;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ adv_cfg = ADVERTISED_100baseT_Half;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ adv_cfg = ADVERTISED_100baseT_Full;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ hw->adv_cfg = adv_cfg;
+ return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
+}
+
+static void alx_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ if (hw->flowctrl & ALX_FC_ANEG &&
+ hw->adv_cfg & ADVERTISED_Autoneg)
+ pause->autoneg = AUTONEG_ENABLE;
+ else
+ pause->autoneg = AUTONEG_DISABLE;
+
+ if (hw->flowctrl & ALX_FC_TX)
+ pause->tx_pause = 1;
+ else
+ pause->tx_pause = 0;
+
+ if (hw->flowctrl & ALX_FC_RX)
+ pause->rx_pause = 1;
+ else
+ pause->rx_pause = 0;
+}
+
+
+static int alx_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ int err = 0;
+ bool reconfig_phy = false;
+ u8 fc = 0;
+
+ if (pause->tx_pause)
+ fc |= ALX_FC_TX;
+ if (pause->rx_pause)
+ fc |= ALX_FC_RX;
+ if (pause->autoneg)
+ fc |= ALX_FC_ANEG;
+
+ ASSERT_RTNL();
+
+ /* restart auto-neg for auto-mode */
+ if (hw->adv_cfg & ADVERTISED_Autoneg) {
+ if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
+ reconfig_phy = true;
+ if (fc & hw->flowctrl & ALX_FC_ANEG &&
+ (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+ reconfig_phy = true;
+ }
+
+ if (reconfig_phy) {
+ err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
+ return err;
+ }
+
+ /* flow control on mac */
+ if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+ alx_cfg_mac_flowcontrol(hw, fc);
+
+ hw->flowctrl = fc;
+
+ return 0;
+}
+
+static u32 alx_get_msglevel(struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ return alx->msg_enable;
+}
+
+static void alx_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ alx->msg_enable = data;
+}
+
+static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ wol->supported = WAKE_MAGIC | WAKE_PHY;
+ wol->wolopts = 0;
+
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+ wol->wolopts |= WAKE_MAGIC;
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
+ WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
+ return -EOPNOTSUPP;
+
+ hw->sleep_ctrl = 0;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
+ if (wol->wolopts & WAKE_PHY)
+ hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
+
+ device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
+
+ return 0;
+}
+
+static void alx_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+const struct ethtool_ops alx_ethtool_ops = {
+ .get_settings = alx_get_settings,
+ .set_settings = alx_set_settings,
+ .get_pauseparam = alx_get_pauseparam,
+ .set_pauseparam = alx_set_pauseparam,
+ .get_drvinfo = alx_get_drvinfo,
+ .get_msglevel = alx_get_msglevel,
+ .set_msglevel = alx_set_msglevel,
+ .get_wol = alx_get_wol,
+ .set_wol = alx_set_wol,
+ .get_link = ethtool_op_get_link,
+};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
new file mode 100644
index 00000000000..220a16ad0e4
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/mdio.h>
+#include "reg.h"
+#include "hw.h"
+
+static inline bool alx_is_rev_a(u8 rev)
+{
+ return rev == ALX_REV_A0 || rev == ALX_REV_A1;
+}
+
+static int alx_wait_mdio_idle(struct alx_hw *hw)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
+ val = alx_read_mem32(hw, ALX_MDIO);
+ if (!(val & ALX_MDIO_BUSY))
+ return 0;
+ udelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+ u16 reg, u16 *phy_data)
+{
+ u32 val, clk_sel;
+ int err;
+
+ *phy_data = 0;
+
+ /* use slow clock when it's in hibernation status */
+ clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+ ALX_MDIO_CLK_SEL_25MD4 :
+ ALX_MDIO_CLK_SEL_25MD128;
+
+ if (ext) {
+ val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+ reg << ALX_MDIO_EXTN_REG_SHIFT;
+ alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+ val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
+ ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
+ } else {
+ val = ALX_MDIO_SPRES_PRMBL |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+ reg << ALX_MDIO_REG_SHIFT |
+ ALX_MDIO_START | ALX_MDIO_OP_READ;
+ }
+ alx_write_mem32(hw, ALX_MDIO, val);
+
+ err = alx_wait_mdio_idle(hw);
+ if (err)
+ return err;
+ val = alx_read_mem32(hw, ALX_MDIO);
+ *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
+ return 0;
+}
+
+static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+ u16 reg, u16 phy_data)
+{
+ u32 val, clk_sel;
+
+ /* use slow clock when it's in hibernation status */
+ clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+ ALX_MDIO_CLK_SEL_25MD4 :
+ ALX_MDIO_CLK_SEL_25MD128;
+
+ if (ext) {
+ val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+ reg << ALX_MDIO_EXTN_REG_SHIFT;
+ alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+ val = ALX_MDIO_SPRES_PRMBL |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+ phy_data << ALX_MDIO_DATA_SHIFT |
+ ALX_MDIO_START | ALX_MDIO_MODE_EXT;
+ } else {
+ val = ALX_MDIO_SPRES_PRMBL |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+ reg << ALX_MDIO_REG_SHIFT |
+ phy_data << ALX_MDIO_DATA_SHIFT |
+ ALX_MDIO_START;
+ }
+ alx_write_mem32(hw, ALX_MDIO, val);
+
+ return alx_wait_mdio_idle(hw);
+}
+
+static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+ return alx_read_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+ return alx_write_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+ return alx_read_phy_core(hw, true, dev, reg, pdata);
+}
+
+static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+ return alx_write_phy_core(hw, true, dev, reg, data);
+}
+
+static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+ int err;
+
+ err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+ if (err)
+ return err;
+
+ return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
+}
+
+static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+ int err;
+
+ err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+ if (err)
+ return err;
+
+ return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
+}
+
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_read_phy_reg(hw, reg, phy_data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_write_phy_reg(hw, reg, phy_data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_read_phy_ext(hw, dev, reg, pdata);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_write_phy_ext(hw, dev, reg, data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_read_phy_dbg(hw, reg, pdata);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_write_phy_dbg(hw, reg, data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+static u16 alx_get_phy_config(struct alx_hw *hw)
+{
+ u32 val;
+ u16 phy_val;
+
+ val = alx_read_mem32(hw, ALX_PHY_CTRL);
+ /* phy in reset */
+ if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
+ return ALX_DRV_PHY_UNKNOWN;
+
+ val = alx_read_mem32(hw, ALX_DRV);
+ val = ALX_GET_FIELD(val, ALX_DRV_PHY);
+ if (ALX_DRV_PHY_UNKNOWN == val)
+ return ALX_DRV_PHY_UNKNOWN;
+
+ alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
+ if (ALX_PHY_INITED == phy_val)
+ return val;
+
+ return ALX_DRV_PHY_UNKNOWN;
+}
+
+static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
+{
+ u32 read;
+ int i;
+
+ for (i = 0; i < ALX_SLD_MAX_TO; i++) {
+ read = alx_read_mem32(hw, reg);
+ if ((read & wait) == 0) {
+ if (val)
+ *val = read;
+ return true;
+ }
+ mdelay(1);
+ }
+
+ return false;
+}
+
+static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
+{
+ u32 mac0, mac1;
+
+ mac0 = alx_read_mem32(hw, ALX_STAD0);
+ mac1 = alx_read_mem32(hw, ALX_STAD1);
+
+ /* addr should be big-endian */
+ *(__be32 *)(addr + 2) = cpu_to_be32(mac0);
+ *(__be16 *)addr = cpu_to_be16(mac1);
+
+ return is_valid_ether_addr(addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
+{
+ u32 val;
+
+ /* try to get it from register first */
+ if (alx_read_macaddr(hw, addr))
+ return 0;
+
+ /* try to load from efuse */
+ if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
+ return -EIO;
+ alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
+ if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
+ return -EIO;
+ if (alx_read_macaddr(hw, addr))
+ return 0;
+
+ /* try to load from flash/eeprom (if present) */
+ val = alx_read_mem32(hw, ALX_EFLD);
+ if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
+ if (!alx_wait_reg(hw, ALX_EFLD,
+ ALX_EFLD_STAT | ALX_EFLD_START, &val))
+ return -EIO;
+ alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
+ if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
+ return -EIO;
+ if (alx_read_macaddr(hw, addr))
+ return 0;
+ }
+
+ return -EIO;
+}
+
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
+{
+ u32 val;
+
+ /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
+ val = be32_to_cpu(*(__be32 *)(addr + 2));
+ alx_write_mem32(hw, ALX_STAD0, val);
+ val = be16_to_cpu(*(__be16 *)addr);
+ alx_write_mem32(hw, ALX_STAD1, val);
+}
+
+static void alx_enable_osc(struct alx_hw *hw)
+{
+ u32 val;
+
+ /* rising edge */
+ val = alx_read_mem32(hw, ALX_MISC);
+ alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
+ alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+}
+
+static void alx_reset_osc(struct alx_hw *hw, u8 rev)
+{
+ u32 val, val2;
+
+ /* clear Internal OSC settings, switching OSC by hw itself */
+ val = alx_read_mem32(hw, ALX_MISC3);
+ alx_write_mem32(hw, ALX_MISC3,
+ (val & ~ALX_MISC3_25M_BY_SW) |
+ ALX_MISC3_25M_NOTO_INTNL);
+
+ /* 25M clk from chipset may be unstable 1s after de-assert of
+ * PERST, driver need re-calibrate before enter Sleep for WoL
+ */
+ val = alx_read_mem32(hw, ALX_MISC);
+ if (rev >= ALX_REV_B0) {
+ /* restore over current protection def-val,
+ * this val could be reset by MAC-RST
+ */
+ ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
+ /* a 0->1 change will update the internal val of osc */
+ val &= ~ALX_MISC_INTNLOSC_OPEN;
+ alx_write_mem32(hw, ALX_MISC, val);
+ alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+ /* hw will automatically dis OSC after cab. */
+ val2 = alx_read_mem32(hw, ALX_MSIC2);
+ val2 &= ~ALX_MSIC2_CALB_START;
+ alx_write_mem32(hw, ALX_MSIC2, val2);
+ alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
+ } else {
+ val &= ~ALX_MISC_INTNLOSC_OPEN;
+ /* disable isolate for rev A devices */
+ if (alx_is_rev_a(rev))
+ val &= ~ALX_MISC_ISO_EN;
+
+ alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+ alx_write_mem32(hw, ALX_MISC, val);
+ }
+
+ udelay(20);
+}
+
+static int alx_stop_mac(struct alx_hw *hw)
+{
+ u32 rxq, txq, val;
+ u16 i;
+
+ rxq = alx_read_mem32(hw, ALX_RXQ0);
+ alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
+ txq = alx_read_mem32(hw, ALX_TXQ0);
+ alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
+
+ udelay(40);
+
+ hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+ for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+ val = alx_read_mem32(hw, ALX_MAC_STS);
+ if (!(val & ALX_MAC_STS_IDLE))
+ return 0;
+ udelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+int alx_reset_mac(struct alx_hw *hw)
+{
+ u32 val, pmctrl;
+ int i, ret;
+ u8 rev;
+ bool a_cr;
+
+ pmctrl = 0;
+ rev = alx_hw_revision(hw);
+ a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
+
+ /* disable all interrupts, RXQ/TXQ */
+ alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
+ alx_write_mem32(hw, ALX_IMR, 0);
+ alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+
+ ret = alx_stop_mac(hw);
+ if (ret)
+ return ret;
+
+ /* mac reset workaroud */
+ alx_write_mem32(hw, ALX_RFD_PIDX, 1);
+
+ /* dis l0s/l1 before mac reset */
+ if (a_cr) {
+ pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+ if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+ alx_write_mem32(hw, ALX_PMCTRL,
+ pmctrl & ~(ALX_PMCTRL_L1_EN |
+ ALX_PMCTRL_L0S_EN));
+ }
+
+ /* reset whole mac safely */
+ val = alx_read_mem32(hw, ALX_MASTER);
+ alx_write_mem32(hw, ALX_MASTER,
+ val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
+
+ /* make sure it's real idle */
+ udelay(10);
+ for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+ val = alx_read_mem32(hw, ALX_RFD_PIDX);
+ if (val == 0)
+ break;
+ udelay(10);
+ }
+ for (; i < ALX_DMA_MAC_RST_TO; i++) {
+ val = alx_read_mem32(hw, ALX_MASTER);
+ if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
+ break;
+ udelay(10);
+ }
+ if (i == ALX_DMA_MAC_RST_TO)
+ return -EIO;
+ udelay(10);
+
+ if (a_cr) {
+ alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
+ /* restore l0s / l1 */
+ if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+ alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+ }
+
+ alx_reset_osc(hw, rev);
+
+ /* clear Internal OSC settings, switching OSC by hw itself,
+ * disable isolate for rev A devices
+ */
+ val = alx_read_mem32(hw, ALX_MISC3);
+ alx_write_mem32(hw, ALX_MISC3,
+ (val & ~ALX_MISC3_25M_BY_SW) |
+ ALX_MISC3_25M_NOTO_INTNL);
+ val = alx_read_mem32(hw, ALX_MISC);
+ val &= ~ALX_MISC_INTNLOSC_OPEN;
+ if (alx_is_rev_a(rev))
+ val &= ~ALX_MISC_ISO_EN;
+ alx_write_mem32(hw, ALX_MISC, val);
+ udelay(20);
+
+ /* driver control speed/duplex, hash-alg */
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+ val = alx_read_mem32(hw, ALX_SERDES);
+ alx_write_mem32(hw, ALX_SERDES,
+ val | ALX_SERDES_MACCLK_SLWDWN |
+ ALX_SERDES_PHYCLK_SLWDWN);
+
+ return 0;
+}
+
+void alx_reset_phy(struct alx_hw *hw)
+{
+ int i;
+ u32 val;
+ u16 phy_val;
+
+ /* (DSP)reset PHY core */
+ val = alx_read_mem32(hw, ALX_PHY_CTRL);
+ val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
+ ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
+ ALX_PHY_CTRL_CLS);
+ val |= ALX_PHY_CTRL_RST_ANALOG;
+
+ val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
+ alx_write_mem32(hw, ALX_PHY_CTRL, val);
+ udelay(10);
+ alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
+
+ for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
+ udelay(10);
+
+ /* phy power saving & hib */
+ alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
+ ALX_SYSMODCTRL_IECHOADJ_DEF);
+ alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
+ ALX_VDRVBIAS_DEF);
+
+ /* EEE advertisement */
+ val = alx_read_mem32(hw, ALX_LPI_CTRL);
+ alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
+
+ /* phy power saving */
+ alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
+ alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+ phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
+ /* rtl8139c, 120m issue */
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
+ ALX_MIIEXT_NLP78_120M_DEF);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
+ ALX_MIIEXT_S3DIG10_DEF);
+
+ if (hw->lnk_patch) {
+ /* Turn off half amplitude */
+ alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+ phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
+ /* Turn off Green feature */
+ alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+ phy_val | ALX_GREENCFG2_BP_GREEN);
+ /* Turn off half Bias */
+ alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+ phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
+ }
+
+ /* set phy interrupt mask */
+ alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
+}
+
+#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
+
+void alx_reset_pcie(struct alx_hw *hw)
+{
+ u8 rev = alx_hw_revision(hw);
+ u32 val;
+ u16 val16;
+
+ /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+ pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
+ if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
+ val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
+ }
+
+ /* clear WoL setting/status */
+ val = alx_read_mem32(hw, ALX_WOL0);
+ alx_write_mem32(hw, ALX_WOL0, 0);
+
+ val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+ alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
+
+ /* mask some pcie error bits */
+ val = alx_read_mem32(hw, ALX_UE_SVRT);
+ val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
+ alx_write_mem32(hw, ALX_UE_SVRT, val);
+
+ /* wol 25M & pclk */
+ val = alx_read_mem32(hw, ALX_MASTER);
+ if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
+ if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+ (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
+ alx_write_mem32(hw, ALX_MASTER,
+ val | ALX_MASTER_PCLKSEL_SRDS |
+ ALX_MASTER_WAKEN_25M);
+ } else {
+ if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+ (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
+ alx_write_mem32(hw, ALX_MASTER,
+ (val & ~ALX_MASTER_PCLKSEL_SRDS) |
+ ALX_MASTER_WAKEN_25M);
+ }
+
+ /* ASPM setting */
+ alx_enable_aspm(hw, true, true);
+
+ udelay(10);
+}
+
+void alx_start_mac(struct alx_hw *hw)
+{
+ u32 mac, txq, rxq;
+
+ rxq = alx_read_mem32(hw, ALX_RXQ0);
+ alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
+ txq = alx_read_mem32(hw, ALX_TXQ0);
+ alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
+
+ mac = hw->rx_ctrl;
+ if (hw->link_speed % 10 == DUPLEX_FULL)
+ mac |= ALX_MAC_CTRL_FULLD;
+ else
+ mac &= ~ALX_MAC_CTRL_FULLD;
+ ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+ hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
+ ALX_MAC_CTRL_SPEED_10_100);
+ mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
+ hw->rx_ctrl = mac;
+ alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+}
+
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
+{
+ if (fc & ALX_FC_RX)
+ hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
+ else
+ hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
+
+ if (fc & ALX_FC_TX)
+ hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
+ else
+ hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
+
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+ u32 pmctrl;
+ u8 rev = alx_hw_revision(hw);
+
+ pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+
+ ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
+ ALX_PMCTRL_LCKDET_TIMER_DEF);
+ pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
+ ALX_PMCTRL_L1_CLKSW_EN |
+ ALX_PMCTRL_L1_SRDSRX_PWD;
+ ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
+ ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
+ pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
+ ALX_PMCTRL_L1_SRDSPLL_EN |
+ ALX_PMCTRL_L1_BUFSRX_EN |
+ ALX_PMCTRL_SADLY_EN |
+ ALX_PMCTRL_HOTRST_WTEN|
+ ALX_PMCTRL_L0S_EN |
+ ALX_PMCTRL_L1_EN |
+ ALX_PMCTRL_ASPM_FCEN |
+ ALX_PMCTRL_TXL1_AFTER_L0S |
+ ALX_PMCTRL_RXL1_AFTER_L0S);
+ if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
+ pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
+
+ if (l0s_en)
+ pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
+ if (l1_en)
+ pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
+
+ alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+}
+
+
+static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
+{
+ u32 cfg = 0;
+
+ if (ethadv_cfg & ADVERTISED_Autoneg) {
+ cfg |= ALX_DRV_PHY_AUTO;
+ if (ethadv_cfg & ADVERTISED_10baseT_Half)
+ cfg |= ALX_DRV_PHY_10;
+ if (ethadv_cfg & ADVERTISED_10baseT_Full)
+ cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+ if (ethadv_cfg & ADVERTISED_100baseT_Half)
+ cfg |= ALX_DRV_PHY_100;
+ if (ethadv_cfg & ADVERTISED_100baseT_Full)
+ cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+ if (ethadv_cfg & ADVERTISED_1000baseT_Half)
+ cfg |= ALX_DRV_PHY_1000;
+ if (ethadv_cfg & ADVERTISED_1000baseT_Full)
+ cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+ if (ethadv_cfg & ADVERTISED_Pause)
+ cfg |= ADVERTISE_PAUSE_CAP;
+ if (ethadv_cfg & ADVERTISED_Asym_Pause)
+ cfg |= ADVERTISE_PAUSE_ASYM;
+ } else {
+ switch (ethadv_cfg) {
+ case ADVERTISED_10baseT_Half:
+ cfg |= ALX_DRV_PHY_10;
+ break;
+ case ADVERTISED_100baseT_Half:
+ cfg |= ALX_DRV_PHY_100;
+ break;
+ case ADVERTISED_10baseT_Full:
+ cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+ break;
+ case ADVERTISED_100baseT_Full:
+ cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+ break;
+ }
+ }
+
+ return cfg;
+}
+
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
+{
+ u16 adv, giga, cr;
+ u32 val;
+ int err = 0;
+
+ alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
+ val = alx_read_mem32(hw, ALX_DRV);
+ ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
+
+ if (ethadv & ADVERTISED_Autoneg) {
+ adv = ADVERTISE_CSMA;
+ adv |= ethtool_adv_to_mii_adv_t(ethadv);
+
+ if (flowctrl & ALX_FC_ANEG) {
+ if (flowctrl & ALX_FC_RX) {
+ adv |= ADVERTISED_Pause;
+ if (!(flowctrl & ALX_FC_TX))
+ adv |= ADVERTISED_Asym_Pause;
+ } else if (flowctrl & ALX_FC_TX) {
+ adv |= ADVERTISED_Asym_Pause;
+ }
+ }
+ giga = 0;
+ if (alx_hw_giga(hw))
+ giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
+
+ cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
+
+ if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
+ alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
+ alx_write_phy_reg(hw, MII_BMCR, cr))
+ err = -EBUSY;
+ } else {
+ cr = BMCR_RESET;
+ if (ethadv == ADVERTISED_100baseT_Half ||
+ ethadv == ADVERTISED_100baseT_Full)
+ cr |= BMCR_SPEED100;
+ if (ethadv == ADVERTISED_10baseT_Full ||
+ ethadv == ADVERTISED_100baseT_Full)
+ cr |= BMCR_FULLDPLX;
+
+ err = alx_write_phy_reg(hw, MII_BMCR, cr);
+ }
+
+ if (!err) {
+ alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
+ val |= ethadv_to_hw_cfg(hw, ethadv);
+ }
+
+ alx_write_mem32(hw, ALX_DRV, val);
+
+ return err;
+}
+
+
+void alx_post_phy_link(struct alx_hw *hw)
+{
+ u16 phy_val, len, agc;
+ u8 revid = alx_hw_revision(hw);
+ bool adj_th = revid == ALX_REV_B0;
+ int speed;
+
+ if (hw->link_speed == SPEED_UNKNOWN)
+ speed = SPEED_UNKNOWN;
+ else
+ speed = hw->link_speed - hw->link_speed % 10;
+
+ if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
+ return;
+
+ /* 1000BT/AZ, wrong cable length */
+ if (speed != SPEED_UNKNOWN) {
+ alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
+ &phy_val);
+ len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
+ alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
+ agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
+
+ if ((speed == SPEED_1000 &&
+ (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
+ (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
+ (speed == SPEED_100 &&
+ (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
+ (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+ ALX_AZ_ANADECT_LONG);
+ alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ phy_val | ALX_AFE_10BT_100M_TH);
+ } else {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+ ALX_AZ_ANADECT_DEF);
+ alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
+ ALX_MIIEXT_AFE, &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ phy_val & ~ALX_AFE_10BT_100M_TH);
+ }
+
+ /* threshold adjust */
+ if (adj_th && hw->lnk_patch) {
+ if (speed == SPEED_100) {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+ ALX_MSE16DB_UP);
+ } else if (speed == SPEED_1000) {
+ /*
+ * Giga link threshold, raise the tolerance of
+ * noise 50%
+ */
+ alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+ &phy_val);
+ ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+ ALX_MSE20DB_TH_HI);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+ phy_val);
+ }
+ }
+ } else {
+ alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ phy_val & ~ALX_AFE_10BT_100M_TH);
+
+ if (adj_th && hw->lnk_patch) {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+ ALX_MSE16DB_DOWN);
+ alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
+ ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+ ALX_MSE20DB_TH_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
+ }
+ }
+}
+
+
+/* NOTE:
+ * 1. phy link must be established before calling this function
+ * 2. wol option (pattern,magic,link,etc.) is configed before call it.
+ */
+int alx_pre_suspend(struct alx_hw *hw, int speed)
+{
+ u32 master, mac, phy, val;
+ int err = 0;
+
+ master = alx_read_mem32(hw, ALX_MASTER);
+ master &= ~ALX_MASTER_PCLKSEL_SRDS;
+ mac = hw->rx_ctrl;
+ /* 10/100 half */
+ ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100);
+ mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+
+ phy = alx_read_mem32(hw, ALX_PHY_CTRL);
+ phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
+ phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
+ ALX_PHY_CTRL_HIB_EN;
+
+ /* without any activity */
+ if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
+ err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+ if (err)
+ return err;
+ phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
+ } else {
+ if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
+ mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
+ if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
+ mac |= ALX_MAC_CTRL_TX_EN;
+ if (speed % 10 == DUPLEX_FULL)
+ mac |= ALX_MAC_CTRL_FULLD;
+ if (speed >= SPEED_1000)
+ ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+ ALX_MAC_CTRL_SPEED_1000);
+ phy |= ALX_PHY_CTRL_DSPRST_OUT;
+ err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
+ ALX_MIIEXT_S3DIG10,
+ ALX_MIIEXT_S3DIG10_SL);
+ if (err)
+ return err;
+ }
+
+ alx_enable_osc(hw);
+ hw->rx_ctrl = mac;
+ alx_write_mem32(hw, ALX_MASTER, master);
+ alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+ alx_write_mem32(hw, ALX_PHY_CTRL, phy);
+
+ /* set val of PDLL D3PLLOFF */
+ val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+ val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
+ alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
+
+ return 0;
+}
+
+bool alx_phy_configured(struct alx_hw *hw)
+{
+ u32 cfg, hw_cfg;
+
+ cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
+ cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
+ hw_cfg = alx_get_phy_config(hw);
+
+ if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
+ return false;
+
+ return cfg == hw_cfg;
+}
+
+int alx_get_phy_link(struct alx_hw *hw, int *speed)
+{
+ struct pci_dev *pdev = hw->pdev;
+ u16 bmsr, giga;
+ int err;
+
+ err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+ if (err)
+ return err;
+
+ err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+ if (err)
+ return err;
+
+ if (!(bmsr & BMSR_LSTATUS)) {
+ *speed = SPEED_UNKNOWN;
+ return 0;
+ }
+
+ /* speed/duplex result is saved in PHY Specific Status Register */
+ err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
+ if (err)
+ return err;
+
+ if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
+ goto wrong_speed;
+
+ switch (giga & ALX_GIGA_PSSR_SPEED) {
+ case ALX_GIGA_PSSR_1000MBS:
+ *speed = SPEED_1000;
+ break;
+ case ALX_GIGA_PSSR_100MBS:
+ *speed = SPEED_100;
+ break;
+ case ALX_GIGA_PSSR_10MBS:
+ *speed = SPEED_10;
+ break;
+ default:
+ goto wrong_speed;
+ }
+
+ *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ return 1;
+
+wrong_speed:
+ dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
+ return -EINVAL;
+}
+
+int alx_clear_phy_intr(struct alx_hw *hw)
+{
+ u16 isr;
+
+ /* clear interrupt status by reading it */
+ return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
+}
+
+int alx_config_wol(struct alx_hw *hw)
+{
+ u32 wol = 0;
+ int err = 0;
+
+ /* turn on magic packet event */
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+ wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
+
+ /* turn on link up event */
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
+ wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
+ /* only link up can wake up */
+ err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
+ }
+ alx_write_mem32(hw, ALX_WOL0, wol);
+
+ return err;
+}
+
+void alx_disable_rss(struct alx_hw *hw)
+{
+ u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
+
+ ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
+ alx_write_mem32(hw, ALX_RXQ0, ctrl);
+}
+
+void alx_configure_basic(struct alx_hw *hw)
+{
+ u32 val, raw_mtu, max_payload;
+ u16 val16;
+ u8 chip_rev = alx_hw_revision(hw);
+
+ alx_set_macaddr(hw, hw->mac_addr);
+
+ alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
+
+ /* idle timeout to switch clk_125M */
+ if (chip_rev >= ALX_REV_B0)
+ alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
+ ALX_IDLE_DECISN_TIMER_DEF);
+
+ alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
+
+ val = alx_read_mem32(hw, ALX_MASTER);
+ val |= ALX_MASTER_IRQMOD2_EN |
+ ALX_MASTER_IRQMOD1_EN |
+ ALX_MASTER_SYSALVTIMER_EN;
+ alx_write_mem32(hw, ALX_MASTER, val);
+ alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
+ (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
+ /* intr re-trig timeout */
+ alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
+ /* tpd threshold to trig int */
+ alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
+ alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
+
+ raw_mtu = hw->mtu + ETH_HLEN;
+ alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
+ if (raw_mtu > ALX_MTU_JUMBO_TH)
+ hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
+
+ if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
+ val = (raw_mtu + 8 + 7) >> 3;
+ else
+ val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
+ alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
+
+ max_payload = pcie_get_readrq(hw->pdev) >> 8;
+ /*
+ * if BIOS had changed the default dma read max length,
+ * restore it to default value
+ */
+ if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
+ pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
+
+ val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
+ ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
+ ALX_TXQ0_SUPT_IPOPT |
+ ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
+ alx_write_mem32(hw, ALX_TXQ0, val);
+ val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
+ ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
+ ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
+ ALX_HQTPD_BURST_EN;
+ alx_write_mem32(hw, ALX_HQTPD, val);
+
+ /* rxq, flow control */
+ val = alx_read_mem32(hw, ALX_SRAM5);
+ val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
+ if (val > ALX_SRAM_RXF_LEN_8K) {
+ val16 = ALX_MTU_STD_ALGN >> 3;
+ val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
+ } else {
+ val16 = ALX_MTU_STD_ALGN >> 3;
+ val = (val - ALX_MTU_STD_ALGN) >> 3;
+ }
+ alx_write_mem32(hw, ALX_RXQ2,
+ val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
+ val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
+ val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
+ ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
+ ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
+ ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
+ ALX_RXQ0_IPV6_PARSE_EN;
+
+ if (alx_hw_giga(hw))
+ ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
+ ALX_RXQ0_ASPM_THRESH_100M);
+
+ alx_write_mem32(hw, ALX_RXQ0, val);
+
+ val = alx_read_mem32(hw, ALX_DMA);
+ val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
+ ALX_DMA_RREQ_PRI_DATA |
+ max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
+ ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
+ ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
+ (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
+ alx_write_mem32(hw, ALX_DMA, val);
+
+ /* default multi-tx-q weights */
+ val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
+ 4 << ALX_WRR_PRI0_SHIFT |
+ 4 << ALX_WRR_PRI1_SHIFT |
+ 4 << ALX_WRR_PRI2_SHIFT |
+ 4 << ALX_WRR_PRI3_SHIFT;
+ alx_write_mem32(hw, ALX_WRR, val);
+}
+
+static inline u32 alx_speed_to_ethadv(int speed)
+{
+ switch (speed) {
+ case SPEED_1000 + DUPLEX_FULL:
+ return ADVERTISED_1000baseT_Full;
+ case SPEED_100 + DUPLEX_FULL:
+ return ADVERTISED_100baseT_Full;
+ case SPEED_100 + DUPLEX_HALF:
+ return ADVERTISED_10baseT_Half;
+ case SPEED_10 + DUPLEX_FULL:
+ return ADVERTISED_10baseT_Full;
+ case SPEED_10 + DUPLEX_HALF:
+ return ADVERTISED_10baseT_Half;
+ default:
+ return 0;
+ }
+}
+
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
+{
+ int i, err, spd;
+ u16 lpa;
+
+ err = alx_get_phy_link(hw, &spd);
+ if (err < 0)
+ return err;
+
+ if (spd == SPEED_UNKNOWN)
+ return 0;
+
+ err = alx_read_phy_reg(hw, MII_LPA, &lpa);
+ if (err)
+ return err;
+
+ if (!(lpa & LPA_LPACK)) {
+ *speed = spd;
+ return 0;
+ }
+
+ if (lpa & LPA_10FULL)
+ *speed = SPEED_10 + DUPLEX_FULL;
+ else if (lpa & LPA_10HALF)
+ *speed = SPEED_10 + DUPLEX_HALF;
+ else if (lpa & LPA_100FULL)
+ *speed = SPEED_100 + DUPLEX_FULL;
+ else
+ *speed = SPEED_100 + DUPLEX_HALF;
+
+ if (*speed != spd) {
+ err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+ if (err)
+ return err;
+ err = alx_setup_speed_duplex(hw,
+ alx_speed_to_ethadv(*speed) |
+ ADVERTISED_Autoneg,
+ ALX_FC_ANEG | ALX_FC_RX |
+ ALX_FC_TX);
+ if (err)
+ return err;
+
+ /* wait for linkup */
+ for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
+ int speed2;
+
+ msleep(100);
+
+ err = alx_get_phy_link(hw, &speed2);
+ if (err < 0)
+ return err;
+ if (speed2 != SPEED_UNKNOWN)
+ break;
+ }
+ if (i == ALX_MAX_SETUP_LNK_CYCLE)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+bool alx_get_phy_info(struct alx_hw *hw)
+{
+ u16 devs1, devs2;
+
+ if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
+ alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
+ return false;
+
+ /* since we haven't PMA/PMD status2 register, we can't
+ * use mdio45_probe function for prtad and mmds.
+ * use fixed MMD3 to get mmds.
+ */
+ if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
+ alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
+ return false;
+ hw->mdio.mmds = devs1 | devs2 << 16;
+
+ return true;
+}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
new file mode 100644
index 00000000000..65e723d2172
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_HW_H_
+#define ALX_HW_H_
+#include <linux/types.h>
+#include <linux/mdio.h>
+#include <linux/pci.h>
+#include "reg.h"
+
+/* Transmit Packet Descriptor, contains 4 32-bit words.
+ *
+ * 31 16 0
+ * +----------------+----------------+
+ * | vlan-tag | buf length |
+ * +----------------+----------------+
+ * | Word 1 |
+ * +----------------+----------------+
+ * | Word 2: buf addr lo |
+ * +----------------+----------------+
+ * | Word 3: buf addr hi |
+ * +----------------+----------------+
+ *
+ * Word 2 and 3 combine to form a 64-bit buffer address
+ *
+ * Word 1 has three forms, depending on the state of bit 8/12/13:
+ * if bit8 =='1', the definition is just for custom checksum offload.
+ * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
+ * for the skb is special for LSO V2, Word 2 become total skb length ,
+ * Word 3 is meaningless.
+ * other condition, the definition is for general skb or ip/tcp/udp
+ * checksum or LSO(TSO) offload.
+ *
+ * Here is the depiction:
+ *
+ * 0-+ 0-+
+ * 1 | 1 |
+ * 2 | 2 |
+ * 3 | Payload offset 3 | L4 header offset
+ * 4 | (7:0) 4 | (7:0)
+ * 5 | 5 |
+ * 6 | 6 |
+ * 7-+ 7-+
+ * 8 Custom csum enable = 1 8 Custom csum enable = 0
+ * 9 General IPv4 checksum 9 General IPv4 checksum
+ * 10 General TCP checksum 10 General TCP checksum
+ * 11 General UDP checksum 11 General UDP checksum
+ * 12 Large Send Segment enable 12 Large Send Segment enable
+ * 13 Large Send Segment type 13 Large Send Segment type
+ * 14 VLAN tagged 14 VLAN tagged
+ * 15 Insert VLAN tag 15 Insert VLAN tag
+ * 16 IPv4 packet 16 IPv4 packet
+ * 17 Ethernet frame type 17 Ethernet frame type
+ * 18-+ 18-+
+ * 19 | 19 |
+ * 20 | 20 |
+ * 21 | Custom csum offset 21 |
+ * 22 | (25:18) 22 |
+ * 23 | 23 | MSS (30:18)
+ * 24 | 24 |
+ * 25-+ 25 |
+ * 26-+ 26 |
+ * 27 | 27 |
+ * 28 | Reserved 28 |
+ * 29 | 29 |
+ * 30-+ 30-+
+ * 31 End of packet 31 End of packet
+ */
+struct alx_txd {
+ __le16 len;
+ __le16 vlan_tag;
+ __le32 word1;
+ union {
+ __le64 addr;
+ struct {
+ __le32 pkt_len;
+ __le32 resvd;
+ } l;
+ } adrl;
+} __packed;
+
+/* tpd word 1 */
+#define TPD_CXSUMSTART_MASK 0x00FF
+#define TPD_CXSUMSTART_SHIFT 0
+#define TPD_L4HDROFFSET_MASK 0x00FF
+#define TPD_L4HDROFFSET_SHIFT 0
+#define TPD_CXSUM_EN_MASK 0x0001
+#define TPD_CXSUM_EN_SHIFT 8
+#define TPD_IP_XSUM_MASK 0x0001
+#define TPD_IP_XSUM_SHIFT 9
+#define TPD_TCP_XSUM_MASK 0x0001
+#define TPD_TCP_XSUM_SHIFT 10
+#define TPD_UDP_XSUM_MASK 0x0001
+#define TPD_UDP_XSUM_SHIFT 11
+#define TPD_LSO_EN_MASK 0x0001
+#define TPD_LSO_EN_SHIFT 12
+#define TPD_LSO_V2_MASK 0x0001
+#define TPD_LSO_V2_SHIFT 13
+#define TPD_VLTAGGED_MASK 0x0001
+#define TPD_VLTAGGED_SHIFT 14
+#define TPD_INS_VLTAG_MASK 0x0001
+#define TPD_INS_VLTAG_SHIFT 15
+#define TPD_IPV4_MASK 0x0001
+#define TPD_IPV4_SHIFT 16
+#define TPD_ETHTYPE_MASK 0x0001
+#define TPD_ETHTYPE_SHIFT 17
+#define TPD_CXSUMOFFSET_MASK 0x00FF
+#define TPD_CXSUMOFFSET_SHIFT 18
+#define TPD_MSS_MASK 0x1FFF
+#define TPD_MSS_SHIFT 18
+#define TPD_EOP_MASK 0x0001
+#define TPD_EOP_SHIFT 31
+
+#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
+
+/* Receive Free Descriptor */
+struct alx_rfd {
+ __le64 addr; /* data buffer address, length is
+ * declared in register --- every
+ * buffer has the same size
+ */
+} __packed;
+
+/* Receive Return Descriptor, contains 4 32-bit words.
+ *
+ * 31 16 0
+ * +----------------+----------------+
+ * | Word 0 |
+ * +----------------+----------------+
+ * | Word 1: RSS Hash value |
+ * +----------------+----------------+
+ * | Word 2 |
+ * +----------------+----------------+
+ * | Word 3 |
+ * +----------------+----------------+
+ *
+ * Word 0 depiction & Word 2 depiction:
+ *
+ * 0--+ 0--+
+ * 1 | 1 |
+ * 2 | 2 |
+ * 3 | 3 |
+ * 4 | 4 |
+ * 5 | 5 |
+ * 6 | 6 |
+ * 7 | IP payload checksum 7 | VLAN tag
+ * 8 | (15:0) 8 | (15:0)
+ * 9 | 9 |
+ * 10 | 10 |
+ * 11 | 11 |
+ * 12 | 12 |
+ * 13 | 13 |
+ * 14 | 14 |
+ * 15-+ 15-+
+ * 16-+ 16-+
+ * 17 | Number of RFDs 17 |
+ * 18 | (19:16) 18 |
+ * 19-+ 19 | Protocol ID
+ * 20-+ 20 | (23:16)
+ * 21 | 21 |
+ * 22 | 22 |
+ * 23 | 23-+
+ * 24 | 24 | Reserved
+ * 25 | Start index of RFD-ring 25-+
+ * 26 | (31:20) 26 | RSS Q-num (27:25)
+ * 27 | 27-+
+ * 28 | 28-+
+ * 29 | 29 | RSS Hash algorithm
+ * 30 | 30 | (31:28)
+ * 31-+ 31-+
+ *
+ * Word 3 depiction:
+ *
+ * 0--+
+ * 1 |
+ * 2 |
+ * 3 |
+ * 4 |
+ * 5 |
+ * 6 |
+ * 7 | Packet length (include FCS)
+ * 8 | (13:0)
+ * 9 |
+ * 10 |
+ * 11 |
+ * 12 |
+ * 13-+
+ * 14 L4 Header checksum error
+ * 15 IPv4 checksum error
+ * 16 VLAN tagged
+ * 17-+
+ * 18 | Protocol ID (19:17)
+ * 19-+
+ * 20 Receive error summary
+ * 21 FCS(CRC) error
+ * 22 Frame alignment error
+ * 23 Truncated packet
+ * 24 Runt packet
+ * 25 Incomplete packet due to insufficient rx-desc
+ * 26 Broadcast packet
+ * 27 Multicast packet
+ * 28 Ethernet type (EII or 802.3)
+ * 29 FIFO overflow
+ * 30 Length error (for 802.3, length field mismatch with actual len)
+ * 31 Updated, indicate to driver that this RRD is refreshed.
+ */
+struct alx_rrd {
+ __le32 word0;
+ __le32 rss_hash;
+ __le32 word2;
+ __le32 word3;
+} __packed;
+
+/* rrd word 0 */
+#define RRD_XSUM_MASK 0xFFFF
+#define RRD_XSUM_SHIFT 0
+#define RRD_NOR_MASK 0x000F
+#define RRD_NOR_SHIFT 16
+#define RRD_SI_MASK 0x0FFF
+#define RRD_SI_SHIFT 20
+
+/* rrd word 2 */
+#define RRD_VLTAG_MASK 0xFFFF
+#define RRD_VLTAG_SHIFT 0
+#define RRD_PID_MASK 0x00FF
+#define RRD_PID_SHIFT 16
+/* non-ip packet */
+#define RRD_PID_NONIP 0
+/* ipv4(only) */
+#define RRD_PID_IPV4 1
+/* tcp/ipv6 */
+#define RRD_PID_IPV6TCP 2
+/* tcp/ipv4 */
+#define RRD_PID_IPV4TCP 3
+/* udp/ipv6 */
+#define RRD_PID_IPV6UDP 4
+/* udp/ipv4 */
+#define RRD_PID_IPV4UDP 5
+/* ipv6(only) */
+#define RRD_PID_IPV6 6
+/* LLDP packet */
+#define RRD_PID_LLDP 7
+/* 1588 packet */
+#define RRD_PID_1588 8
+#define RRD_RSSQ_MASK 0x0007
+#define RRD_RSSQ_SHIFT 25
+#define RRD_RSSALG_MASK 0x000F
+#define RRD_RSSALG_SHIFT 28
+#define RRD_RSSALG_TCPV6 0x1
+#define RRD_RSSALG_IPV6 0x2
+#define RRD_RSSALG_TCPV4 0x4
+#define RRD_RSSALG_IPV4 0x8
+
+/* rrd word 3 */
+#define RRD_PKTLEN_MASK 0x3FFF
+#define RRD_PKTLEN_SHIFT 0
+#define RRD_ERR_L4_MASK 0x0001
+#define RRD_ERR_L4_SHIFT 14
+#define RRD_ERR_IPV4_MASK 0x0001
+#define RRD_ERR_IPV4_SHIFT 15
+#define RRD_VLTAGGED_MASK 0x0001
+#define RRD_VLTAGGED_SHIFT 16
+#define RRD_OLD_PID_MASK 0x0007
+#define RRD_OLD_PID_SHIFT 17
+#define RRD_ERR_RES_MASK 0x0001
+#define RRD_ERR_RES_SHIFT 20
+#define RRD_ERR_FCS_MASK 0x0001
+#define RRD_ERR_FCS_SHIFT 21
+#define RRD_ERR_FAE_MASK 0x0001
+#define RRD_ERR_FAE_SHIFT 22
+#define RRD_ERR_TRUNC_MASK 0x0001
+#define RRD_ERR_TRUNC_SHIFT 23
+#define RRD_ERR_RUNT_MASK 0x0001
+#define RRD_ERR_RUNT_SHIFT 24
+#define RRD_ERR_ICMP_MASK 0x0001
+#define RRD_ERR_ICMP_SHIFT 25
+#define RRD_BCAST_MASK 0x0001
+#define RRD_BCAST_SHIFT 26
+#define RRD_MCAST_MASK 0x0001
+#define RRD_MCAST_SHIFT 27
+#define RRD_ETHTYPE_MASK 0x0001
+#define RRD_ETHTYPE_SHIFT 28
+#define RRD_ERR_FIFOV_MASK 0x0001
+#define RRD_ERR_FIFOV_SHIFT 29
+#define RRD_ERR_LEN_MASK 0x0001
+#define RRD_ERR_LEN_SHIFT 30
+#define RRD_UPDATED_MASK 0x0001
+#define RRD_UPDATED_SHIFT 31
+
+
+#define ALX_MAX_SETUP_LNK_CYCLE 50
+
+/* for FlowControl */
+#define ALX_FC_RX 0x01
+#define ALX_FC_TX 0x02
+#define ALX_FC_ANEG 0x04
+
+/* for sleep control */
+#define ALX_SLEEP_WOL_PHY 0x00000001
+#define ALX_SLEEP_WOL_MAGIC 0x00000002
+#define ALX_SLEEP_CIFS 0x00000004
+#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \
+ ALX_SLEEP_WOL_MAGIC | \
+ ALX_SLEEP_CIFS)
+
+/* for RSS hash type */
+#define ALX_RSS_HASH_TYPE_IPV4 0x1
+#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2
+#define ALX_RSS_HASH_TYPE_IPV6 0x4
+#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8
+#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \
+ ALX_RSS_HASH_TYPE_IPV4_TCP | \
+ ALX_RSS_HASH_TYPE_IPV6 | \
+ ALX_RSS_HASH_TYPE_IPV6_TCP)
+#define ALX_DEF_RXBUF_SIZE 1536
+#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
+#define ALX_MAX_TSO_PKT_SIZE (7*1024)
+#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE
+#define ALX_MIN_FRAME_SIZE 68
+#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+
+#define ALX_MAX_RX_QUEUES 8
+#define ALX_MAX_TX_QUEUES 4
+#define ALX_MAX_HANDLED_INTRS 5
+
+#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \
+ ALX_ISR_DMAW | \
+ ALX_ISR_DMAR | \
+ ALX_ISR_SMB | \
+ ALX_ISR_MANU | \
+ ALX_ISR_TIMER)
+
+#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \
+ ALX_ISR_DMAW | ALX_ISR_DMAR)
+
+#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \
+ ALX_ISR_TXF_UR | \
+ ALX_ISR_RFD_UR)
+
+#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \
+ ALX_ISR_TX_Q1 | \
+ ALX_ISR_TX_Q2 | \
+ ALX_ISR_TX_Q3 | \
+ ALX_ISR_RX_Q0 | \
+ ALX_ISR_RX_Q1 | \
+ ALX_ISR_RX_Q2 | \
+ ALX_ISR_RX_Q3 | \
+ ALX_ISR_RX_Q4 | \
+ ALX_ISR_RX_Q5 | \
+ ALX_ISR_RX_Q6 | \
+ ALX_ISR_RX_Q7)
+
+/* maximum interrupt vectors for msix */
+#define ALX_MAX_MSIX_INTRS 16
+
+#define ALX_GET_FIELD(_data, _field) \
+ (((_data) >> _field ## _SHIFT) & _field ## _MASK)
+
+#define ALX_SET_FIELD(_data, _field, _value) do { \
+ (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \
+ (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
+ } while (0)
+
+struct alx_hw {
+ struct pci_dev *pdev;
+ u8 __iomem *hw_addr;
+
+ /* current & permanent mac addr */
+ u8 mac_addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+
+ u16 mtu;
+ u16 imt;
+ u8 dma_chnl;
+ u8 max_dma_chnl;
+ /* tpd threshold to trig INT */
+ u32 ith_tpd;
+ u32 rx_ctrl;
+ u32 mc_hash[2];
+
+ u32 smb_timer;
+ /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
+ int link_speed;
+
+ /* auto-neg advertisement or force mode config */
+ u32 adv_cfg;
+ u8 flowctrl;
+
+ u32 sleep_ctrl;
+
+ spinlock_t mdio_lock;
+ struct mdio_if_info mdio;
+ u16 phy_id[2];
+
+ /* PHY link patch flag */
+ bool lnk_patch;
+};
+
+static inline int alx_hw_revision(struct alx_hw *hw)
+{
+ return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
+}
+
+static inline bool alx_hw_with_cr(struct alx_hw *hw)
+{
+ return hw->pdev->revision & 1;
+}
+
+static inline bool alx_hw_giga(struct alx_hw *hw)
+{
+ return hw->pdev->device & 1;
+}
+
+static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
+{
+ writeb(val, hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
+{
+ writew(val, hw->hw_addr + reg);
+}
+
+static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
+{
+ return readw(hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
+{
+ writel(val, hw->hw_addr + reg);
+}
+
+static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
+{
+ return readl(hw->hw_addr + reg);
+}
+
+static inline void alx_post_write(struct alx_hw *hw)
+{
+ readl(hw->hw_addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
+void alx_reset_phy(struct alx_hw *hw);
+void alx_reset_pcie(struct alx_hw *hw);
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
+void alx_post_phy_link(struct alx_hw *hw);
+int alx_pre_suspend(struct alx_hw *hw, int speed);
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
+int alx_get_phy_link(struct alx_hw *hw, int *speed);
+int alx_clear_phy_intr(struct alx_hw *hw);
+int alx_config_wol(struct alx_hw *hw);
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
+void alx_start_mac(struct alx_hw *hw);
+int alx_reset_mac(struct alx_hw *hw);
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
+bool alx_phy_configured(struct alx_hw *hw);
+void alx_configure_basic(struct alx_hw *hw);
+void alx_disable_rss(struct alx_hw *hw);
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
+bool alx_get_phy_info(struct alx_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
new file mode 100644
index 00000000000..418de8b1316
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -0,0 +1,1625 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/mdio.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/ip6_checksum.h>
+#include <linux/crc32.h>
+#include "alx.h"
+#include "hw.h"
+#include "reg.h"
+
+const char alx_drv_name[] = "alx";
+
+
+static void alx_free_txbuf(struct alx_priv *alx, int entry)
+{
+ struct alx_buffer *txb = &alx->txq.bufs[entry];
+
+ if (dma_unmap_len(txb, size)) {
+ dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_addr(txb, dma),
+ dma_unmap_len(txb, size),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(txb, size, 0);
+ }
+
+ if (txb->skb) {
+ dev_kfree_skb_any(txb->skb);
+ txb->skb = NULL;
+ }
+}
+
+static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
+{
+ struct alx_rx_queue *rxq = &alx->rxq;
+ struct sk_buff *skb;
+ struct alx_buffer *cur_buf;
+ dma_addr_t dma;
+ u16 cur, next, count = 0;
+
+ next = cur = rxq->write_idx;
+ if (++next == alx->rx_ringsz)
+ next = 0;
+ cur_buf = &rxq->bufs[cur];
+
+ while (!cur_buf->skb && next != rxq->read_idx) {
+ struct alx_rfd *rfd = &rxq->rfd[cur];
+
+ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+ if (!skb)
+ break;
+ dma = dma_map_single(&alx->hw.pdev->dev,
+ skb->data, alx->rxbuf_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ /* Unfortunately, RX descriptor buffers must be 4-byte
+ * aligned, so we can't use IP alignment.
+ */
+ if (WARN_ON(dma & 3)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ cur_buf->skb = skb;
+ dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
+ dma_unmap_addr_set(cur_buf, dma, dma);
+ rfd->addr = cpu_to_le64(dma);
+
+ cur = next;
+ if (++next == alx->rx_ringsz)
+ next = 0;
+ cur_buf = &rxq->bufs[cur];
+ count++;
+ }
+
+ if (count) {
+ /* flush all updates before updating hardware */
+ wmb();
+ rxq->write_idx = cur;
+ alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
+ }
+
+ return count;
+}
+
+static inline int alx_tpd_avail(struct alx_priv *alx)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+
+ if (txq->write_idx >= txq->read_idx)
+ return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
+ return txq->read_idx - txq->write_idx - 1;
+}
+
+static bool alx_clean_tx_irq(struct alx_priv *alx)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+ u16 hw_read_idx, sw_read_idx;
+ unsigned int total_bytes = 0, total_packets = 0;
+ int budget = ALX_DEFAULT_TX_WORK;
+
+ sw_read_idx = txq->read_idx;
+ hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
+
+ if (sw_read_idx != hw_read_idx) {
+ while (sw_read_idx != hw_read_idx && budget > 0) {
+ struct sk_buff *skb;
+
+ skb = txq->bufs[sw_read_idx].skb;
+ if (skb) {
+ total_bytes += skb->len;
+ total_packets++;
+ budget--;
+ }
+
+ alx_free_txbuf(alx, sw_read_idx);
+
+ if (++sw_read_idx == alx->tx_ringsz)
+ sw_read_idx = 0;
+ }
+ txq->read_idx = sw_read_idx;
+
+ netdev_completed_queue(alx->dev, total_packets, total_bytes);
+ }
+
+ if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
+ alx_tpd_avail(alx) > alx->tx_ringsz/4)
+ netif_wake_queue(alx->dev);
+
+ return sw_read_idx == hw_read_idx;
+}
+
+static void alx_schedule_link_check(struct alx_priv *alx)
+{
+ schedule_work(&alx->link_check_wk);
+}
+
+static void alx_schedule_reset(struct alx_priv *alx)
+{
+ schedule_work(&alx->reset_wk);
+}
+
+static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+{
+ struct alx_rx_queue *rxq = &alx->rxq;
+ struct alx_rrd *rrd;
+ struct alx_buffer *rxb;
+ struct sk_buff *skb;
+ u16 length, rfd_cleaned = 0;
+
+ while (budget > 0) {
+ rrd = &rxq->rrd[rxq->rrd_read_idx];
+ if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
+ break;
+ rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
+
+ if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+ RRD_SI) != rxq->read_idx ||
+ ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+ RRD_NOR) != 1) {
+ alx_schedule_reset(alx);
+ return 0;
+ }
+
+ rxb = &rxq->bufs[rxq->read_idx];
+ dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_addr(rxb, dma),
+ dma_unmap_len(rxb, size),
+ DMA_FROM_DEVICE);
+ dma_unmap_len_set(rxb, size, 0);
+ skb = rxb->skb;
+ rxb->skb = NULL;
+
+ if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
+ rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
+ rrd->word3 = 0;
+ dev_kfree_skb_any(skb);
+ goto next_pkt;
+ }
+
+ length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
+ RRD_PKTLEN) - ETH_FCS_LEN;
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, alx->dev);
+
+ skb_checksum_none_assert(skb);
+ if (alx->dev->features & NETIF_F_RXCSUM &&
+ !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
+ cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
+ switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
+ RRD_PID)) {
+ case RRD_PID_IPV6UDP:
+ case RRD_PID_IPV4UDP:
+ case RRD_PID_IPV4TCP:
+ case RRD_PID_IPV6TCP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ }
+ }
+
+ napi_gro_receive(&alx->napi, skb);
+ budget--;
+
+next_pkt:
+ if (++rxq->read_idx == alx->rx_ringsz)
+ rxq->read_idx = 0;
+ if (++rxq->rrd_read_idx == alx->rx_ringsz)
+ rxq->rrd_read_idx = 0;
+
+ if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
+ rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
+ }
+
+ if (rfd_cleaned)
+ alx_refill_rx_ring(alx, GFP_ATOMIC);
+
+ return budget > 0;
+}
+
+static int alx_poll(struct napi_struct *napi, int budget)
+{
+ struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
+ struct alx_hw *hw = &alx->hw;
+ bool complete = true;
+ unsigned long flags;
+
+ complete = alx_clean_tx_irq(alx) &&
+ alx_clean_rx_irq(alx, budget);
+
+ if (!complete)
+ return 1;
+
+ napi_complete(&alx->napi);
+
+ /* enable interrupt */
+ spin_lock_irqsave(&alx->irq_lock, flags);
+ alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+ alx_post_write(hw);
+
+ return 0;
+}
+
+static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+{
+ struct alx_hw *hw = &alx->hw;
+ bool write_int_mask = false;
+
+ spin_lock(&alx->irq_lock);
+
+ /* ACK interrupt */
+ alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
+ intr &= alx->int_mask;
+
+ if (intr & ALX_ISR_FATAL) {
+ netif_warn(alx, hw, alx->dev,
+ "fatal interrupt 0x%x, resetting\n", intr);
+ alx_schedule_reset(alx);
+ goto out;
+ }
+
+ if (intr & ALX_ISR_ALERT)
+ netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
+
+ if (intr & ALX_ISR_PHY) {
+ /* suppress PHY interrupt, because the source
+ * is from PHY internal. only the internal status
+ * is cleared, the interrupt status could be cleared.
+ */
+ alx->int_mask &= ~ALX_ISR_PHY;
+ write_int_mask = true;
+ alx_schedule_link_check(alx);
+ }
+
+ if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
+ napi_schedule(&alx->napi);
+ /* mask rx/tx interrupt, enable them when napi complete */
+ alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+ write_int_mask = true;
+ }
+
+ if (write_int_mask)
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+
+ alx_write_mem32(hw, ALX_ISR, 0);
+
+ out:
+ spin_unlock(&alx->irq_lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t alx_intr_msi(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+
+ return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
+}
+
+static irqreturn_t alx_intr_legacy(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+ struct alx_hw *hw = &alx->hw;
+ u32 intr;
+
+ intr = alx_read_mem32(hw, ALX_ISR);
+
+ if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
+ return IRQ_NONE;
+
+ return alx_intr_handle(alx, intr);
+}
+
+static void alx_init_ring_ptrs(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+ u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
+
+ alx->rxq.read_idx = 0;
+ alx->rxq.write_idx = 0;
+ alx->rxq.rrd_read_idx = 0;
+ alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
+ alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
+ alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
+ alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
+ alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
+ alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
+
+ alx->txq.read_idx = 0;
+ alx->txq.write_idx = 0;
+ alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
+ alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
+ alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
+
+ /* load these pointers into the chip */
+ alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
+}
+
+static void alx_free_txring_buf(struct alx_priv *alx)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+ int i;
+
+ if (!txq->bufs)
+ return;
+
+ for (i = 0; i < alx->tx_ringsz; i++)
+ alx_free_txbuf(alx, i);
+
+ memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
+ memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
+ txq->write_idx = 0;
+ txq->read_idx = 0;
+
+ netdev_reset_queue(alx->dev);
+}
+
+static void alx_free_rxring_buf(struct alx_priv *alx)
+{
+ struct alx_rx_queue *rxq = &alx->rxq;
+ struct alx_buffer *cur_buf;
+ u16 i;
+
+ if (rxq == NULL)
+ return;
+
+ for (i = 0; i < alx->rx_ringsz; i++) {
+ cur_buf = rxq->bufs + i;
+ if (cur_buf->skb) {
+ dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_addr(cur_buf, dma),
+ dma_unmap_len(cur_buf, size),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(cur_buf->skb);
+ cur_buf->skb = NULL;
+ dma_unmap_len_set(cur_buf, size, 0);
+ dma_unmap_addr_set(cur_buf, dma, 0);
+ }
+ }
+
+ rxq->write_idx = 0;
+ rxq->read_idx = 0;
+ rxq->rrd_read_idx = 0;
+}
+
+static void alx_free_buffers(struct alx_priv *alx)
+{
+ alx_free_txring_buf(alx);
+ alx_free_rxring_buf(alx);
+}
+
+static int alx_reinit_rings(struct alx_priv *alx)
+{
+ alx_free_buffers(alx);
+
+ alx_init_ring_ptrs(alx);
+
+ if (!alx_refill_rx_ring(alx, GFP_KERNEL))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
+{
+ u32 crc32, bit, reg;
+
+ crc32 = ether_crc(ETH_ALEN, addr);
+ reg = (crc32 >> 31) & 0x1;
+ bit = (crc32 >> 26) & 0x1F;
+
+ mc_hash[reg] |= BIT(bit);
+}
+
+static void __alx_set_rx_mode(struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ struct netdev_hw_addr *ha;
+ u32 mc_hash[2] = {};
+
+ if (!(netdev->flags & IFF_ALLMULTI)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ alx_add_mc_addr(hw, ha->addr, mc_hash);
+
+ alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
+ alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
+ }
+
+ hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
+ if (netdev->flags & IFF_PROMISC)
+ hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
+ if (netdev->flags & IFF_ALLMULTI)
+ hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
+
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_set_rx_mode(struct net_device *netdev)
+{
+ __alx_set_rx_mode(netdev);
+}
+
+static int alx_set_mac_address(struct net_device *netdev, void *data)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ struct sockaddr *addr = data;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (netdev->addr_assign_type & NET_ADDR_RANDOM)
+ netdev->addr_assign_type ^= NET_ADDR_RANDOM;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+ alx_set_macaddr(hw, hw->mac_addr);
+
+ return 0;
+}
+
+static int alx_alloc_descriptors(struct alx_priv *alx)
+{
+ alx->txq.bufs = kcalloc(alx->tx_ringsz,
+ sizeof(struct alx_buffer),
+ GFP_KERNEL);
+ if (!alx->txq.bufs)
+ return -ENOMEM;
+
+ alx->rxq.bufs = kcalloc(alx->rx_ringsz,
+ sizeof(struct alx_buffer),
+ GFP_KERNEL);
+ if (!alx->rxq.bufs)
+ goto out_free;
+
+ /* physical tx/rx ring descriptors
+ *
+ * Allocate them as a single chunk because they must not cross a
+ * 4G boundary (hardware has a single register for high 32 bits
+ * of addresses only)
+ */
+ alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
+ sizeof(struct alx_rrd) * alx->rx_ringsz +
+ sizeof(struct alx_rfd) * alx->rx_ringsz;
+ alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
+ alx->descmem.size,
+ &alx->descmem.dma,
+ GFP_KERNEL);
+ if (!alx->descmem.virt)
+ goto out_free;
+
+ alx->txq.tpd = (void *)alx->descmem.virt;
+ alx->txq.tpd_dma = alx->descmem.dma;
+
+ /* alignment requirement for next block */
+ BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
+
+ alx->rxq.rrd =
+ (void *)((u8 *)alx->descmem.virt +
+ sizeof(struct alx_txd) * alx->tx_ringsz);
+ alx->rxq.rrd_dma = alx->descmem.dma +
+ sizeof(struct alx_txd) * alx->tx_ringsz;
+
+ /* alignment requirement for next block */
+ BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
+
+ alx->rxq.rfd =
+ (void *)((u8 *)alx->descmem.virt +
+ sizeof(struct alx_txd) * alx->tx_ringsz +
+ sizeof(struct alx_rrd) * alx->rx_ringsz);
+ alx->rxq.rfd_dma = alx->descmem.dma +
+ sizeof(struct alx_txd) * alx->tx_ringsz +
+ sizeof(struct alx_rrd) * alx->rx_ringsz;
+
+ return 0;
+out_free:
+ kfree(alx->txq.bufs);
+ kfree(alx->rxq.bufs);
+ return -ENOMEM;
+}
+
+static int alx_alloc_rings(struct alx_priv *alx)
+{
+ int err;
+
+ err = alx_alloc_descriptors(alx);
+ if (err)
+ return err;
+
+ alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+ alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+ alx->tx_ringsz = alx->tx_ringsz;
+
+ netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
+
+ alx_reinit_rings(alx);
+ return 0;
+}
+
+static void alx_free_rings(struct alx_priv *alx)
+{
+ netif_napi_del(&alx->napi);
+ alx_free_buffers(alx);
+
+ kfree(alx->txq.bufs);
+ kfree(alx->rxq.bufs);
+
+ dma_free_coherent(&alx->hw.pdev->dev,
+ alx->descmem.size,
+ alx->descmem.virt,
+ alx->descmem.dma);
+}
+
+static void alx_config_vector_mapping(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
+ alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
+ alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
+}
+
+static void alx_irq_enable(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ /* level-1 interrupt switch */
+ alx_write_mem32(hw, ALX_ISR, 0);
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ alx_post_write(hw);
+}
+
+static void alx_irq_disable(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+ alx_write_mem32(hw, ALX_IMR, 0);
+ alx_post_write(hw);
+
+ synchronize_irq(alx->hw.pdev->irq);
+}
+
+static int alx_request_irq(struct alx_priv *alx)
+{
+ struct pci_dev *pdev = alx->hw.pdev;
+ struct alx_hw *hw = &alx->hw;
+ int err;
+ u32 msi_ctrl;
+
+ msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
+
+ if (!pci_enable_msi(alx->hw.pdev)) {
+ alx->msi = true;
+
+ alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
+ msi_ctrl | ALX_MSI_MASK_SEL_LINE);
+ err = request_irq(pdev->irq, alx_intr_msi, 0,
+ alx->dev->name, alx);
+ if (!err)
+ goto out;
+ /* fall back to legacy interrupt */
+ pci_disable_msi(alx->hw.pdev);
+ }
+
+ alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
+ err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
+ alx->dev->name, alx);
+out:
+ if (!err)
+ alx_config_vector_mapping(alx);
+ return err;
+}
+
+static void alx_free_irq(struct alx_priv *alx)
+{
+ struct pci_dev *pdev = alx->hw.pdev;
+
+ free_irq(pdev->irq, alx);
+
+ if (alx->msi) {
+ pci_disable_msi(alx->hw.pdev);
+ alx->msi = false;
+ }
+}
+
+static int alx_identify_hw(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+ int rev = alx_hw_revision(hw);
+
+ if (rev > ALX_REV_C0)
+ return -EINVAL;
+
+ hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
+
+ return 0;
+}
+
+static int alx_init_sw(struct alx_priv *alx)
+{
+ struct pci_dev *pdev = alx->hw.pdev;
+ struct alx_hw *hw = &alx->hw;
+ int err;
+
+ err = alx_identify_hw(alx);
+ if (err) {
+ dev_err(&pdev->dev, "unrecognized chip, aborting\n");
+ return err;
+ }
+
+ alx->hw.lnk_patch =
+ pdev->device == ALX_DEV_ID_AR8161 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
+ pdev->subsystem_device == 0x0091 &&
+ pdev->revision == 0;
+
+ hw->smb_timer = 400;
+ hw->mtu = alx->dev->mtu;
+ alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
+ alx->tx_ringsz = 256;
+ alx->rx_ringsz = 512;
+ hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
+ hw->imt = 200;
+ alx->int_mask = ALX_ISR_MISC;
+ hw->dma_chnl = hw->max_dma_chnl;
+ hw->ith_tpd = alx->tx_ringsz / 3;
+ hw->link_speed = SPEED_UNKNOWN;
+ hw->adv_cfg = ADVERTISED_Autoneg |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_1000baseT_Full;
+ hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
+
+ hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
+ ALX_MAC_CTRL_MHASH_ALG_HI5B |
+ ALX_MAC_CTRL_BRD_EN |
+ ALX_MAC_CTRL_PCRCE |
+ ALX_MAC_CTRL_CRCE |
+ ALX_MAC_CTRL_RXFC_EN |
+ ALX_MAC_CTRL_TXFC_EN |
+ 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
+
+ return err;
+}
+
+
+static netdev_features_t alx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return features;
+}
+
+static void alx_netif_stop(struct alx_priv *alx)
+{
+ alx->dev->trans_start = jiffies;
+ if (netif_carrier_ok(alx->dev)) {
+ netif_carrier_off(alx->dev);
+ netif_tx_disable(alx->dev);
+ napi_disable(&alx->napi);
+ }
+}
+
+static void alx_halt(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_netif_stop(alx);
+ hw->link_speed = SPEED_UNKNOWN;
+
+ alx_reset_mac(hw);
+
+ /* disable l0s/l1 */
+ alx_enable_aspm(hw, false, false);
+ alx_irq_disable(alx);
+ alx_free_buffers(alx);
+}
+
+static void alx_configure(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_configure_basic(hw);
+ alx_disable_rss(hw);
+ __alx_set_rx_mode(alx->dev);
+
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_activate(struct alx_priv *alx)
+{
+ /* hardware setting lost, restore it */
+ alx_reinit_rings(alx);
+ alx_configure(alx);
+
+ /* clear old interrupts */
+ alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+ alx_irq_enable(alx);
+
+ alx_schedule_link_check(alx);
+}
+
+static void alx_reinit(struct alx_priv *alx)
+{
+ ASSERT_RTNL();
+
+ alx_halt(alx);
+ alx_activate(alx);
+}
+
+static int alx_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((max_frame < ALX_MIN_FRAME_SIZE) ||
+ (max_frame > ALX_MAX_FRAME_SIZE))
+ return -EINVAL;
+
+ if (netdev->mtu == mtu)
+ return 0;
+
+ netdev->mtu = mtu;
+ alx->hw.mtu = mtu;
+ alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ?
+ ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE;
+ netdev_update_features(netdev);
+ if (netif_running(netdev))
+ alx_reinit(alx);
+ return 0;
+}
+
+static void alx_netif_start(struct alx_priv *alx)
+{
+ netif_tx_wake_all_queues(alx->dev);
+ napi_enable(&alx->napi);
+ netif_carrier_on(alx->dev);
+}
+
+static int __alx_open(struct alx_priv *alx, bool resume)
+{
+ int err;
+
+ if (!resume)
+ netif_carrier_off(alx->dev);
+
+ err = alx_alloc_rings(alx);
+ if (err)
+ return err;
+
+ alx_configure(alx);
+
+ err = alx_request_irq(alx);
+ if (err)
+ goto out_free_rings;
+
+ /* clear old interrupts */
+ alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+ alx_irq_enable(alx);
+
+ if (!resume)
+ netif_tx_start_all_queues(alx->dev);
+
+ alx_schedule_link_check(alx);
+ return 0;
+
+out_free_rings:
+ alx_free_rings(alx);
+ return err;
+}
+
+static void __alx_stop(struct alx_priv *alx)
+{
+ alx_halt(alx);
+ alx_free_irq(alx);
+ alx_free_rings(alx);
+}
+
+static const char *alx_speed_desc(u16 speed)
+{
+ switch (speed) {
+ case SPEED_1000 + DUPLEX_FULL:
+ return "1 Gbps Full";
+ case SPEED_100 + DUPLEX_FULL:
+ return "100 Mbps Full";
+ case SPEED_100 + DUPLEX_HALF:
+ return "100 Mbps Half";
+ case SPEED_10 + DUPLEX_FULL:
+ return "10 Mbps Full";
+ case SPEED_10 + DUPLEX_HALF:
+ return "10 Mbps Half";
+ default:
+ return "Unknown speed";
+ }
+}
+
+static void alx_check_link(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+ unsigned long flags;
+ int speed, old_speed;
+ int err;
+
+ /* clear PHY internal interrupt status, otherwise the main
+ * interrupt status will be asserted forever
+ */
+ alx_clear_phy_intr(hw);
+
+ err = alx_get_phy_link(hw, &speed);
+ if (err < 0)
+ goto reset;
+
+ spin_lock_irqsave(&alx->irq_lock, flags);
+ alx->int_mask |= ALX_ISR_PHY;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+ old_speed = hw->link_speed;
+
+ if (old_speed == speed)
+ return;
+ hw->link_speed = speed;
+
+ if (speed != SPEED_UNKNOWN) {
+ netif_info(alx, link, alx->dev,
+ "NIC Up: %s\n", alx_speed_desc(speed));
+ alx_post_phy_link(hw);
+ alx_enable_aspm(hw, true, true);
+ alx_start_mac(hw);
+
+ if (old_speed == SPEED_UNKNOWN)
+ alx_netif_start(alx);
+ } else {
+ /* link is now down */
+ alx_netif_stop(alx);
+ netif_info(alx, link, alx->dev, "Link Down\n");
+ err = alx_reset_mac(hw);
+ if (err)
+ goto reset;
+ alx_irq_disable(alx);
+
+ /* MAC reset causes all HW settings to be lost, restore all */
+ err = alx_reinit_rings(alx);
+ if (err)
+ goto reset;
+ alx_configure(alx);
+ alx_enable_aspm(hw, false, true);
+ alx_post_phy_link(hw);
+ alx_irq_enable(alx);
+ }
+
+ return;
+
+reset:
+ alx_schedule_reset(alx);
+}
+
+static int alx_open(struct net_device *netdev)
+{
+ return __alx_open(netdev_priv(netdev), false);
+}
+
+static int alx_stop(struct net_device *netdev)
+{
+ __alx_stop(netdev_priv(netdev));
+ return 0;
+}
+
+static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+ struct alx_hw *hw = &alx->hw;
+ int err, speed;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __alx_stop(alx);
+
+#ifdef CONFIG_PM_SLEEP
+ err = pci_save_state(pdev);
+ if (err)
+ return err;
+#endif
+
+ err = alx_select_powersaving_speed(hw, &speed);
+ if (err)
+ return err;
+ err = alx_clear_phy_intr(hw);
+ if (err)
+ return err;
+ err = alx_pre_suspend(hw, speed);
+ if (err)
+ return err;
+ err = alx_config_wol(hw);
+ if (err)
+ return err;
+
+ *wol_en = false;
+ if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
+ netif_info(alx, wol, netdev,
+ "wol: ctrl=%X, speed=%X\n",
+ hw->sleep_ctrl, speed);
+ device_set_wakeup_enable(&pdev->dev, true);
+ *wol_en = true;
+ }
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+static void alx_shutdown(struct pci_dev *pdev)
+{
+ int err;
+ bool wol_en;
+
+ err = __alx_shutdown(pdev, &wol_en);
+ if (!err) {
+ pci_wake_from_d3(pdev, wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+ } else {
+ dev_err(&pdev->dev, "shutdown fail %d\n", err);
+ }
+}
+
+static void alx_link_check(struct work_struct *work)
+{
+ struct alx_priv *alx;
+
+ alx = container_of(work, struct alx_priv, link_check_wk);
+
+ rtnl_lock();
+ alx_check_link(alx);
+ rtnl_unlock();
+}
+
+static void alx_reset(struct work_struct *work)
+{
+ struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
+
+ rtnl_lock();
+ alx_reinit(alx);
+ rtnl_unlock();
+}
+
+static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
+{
+ u8 cso, css;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ cso = skb_checksum_start_offset(skb);
+ if (cso & 1)
+ return -EINVAL;
+
+ css = cso + skb->csum_offset;
+ first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
+ first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
+ first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
+
+ return 0;
+}
+
+static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+ struct alx_txd *tpd, *first_tpd;
+ dma_addr_t dma;
+ int maplen, f, first_idx = txq->write_idx;
+
+ first_tpd = &txq->tpd[txq->write_idx];
+ tpd = first_tpd;
+
+ maplen = skb_headlen(skb);
+ dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+ goto err_dma;
+
+ dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+ dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+ tpd->adrl.addr = cpu_to_le64(dma);
+ tpd->len = cpu_to_le16(maplen);
+
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+
+ if (++txq->write_idx == alx->tx_ringsz)
+ txq->write_idx = 0;
+ tpd = &txq->tpd[txq->write_idx];
+
+ tpd->word1 = first_tpd->word1;
+
+ maplen = skb_frag_size(frag);
+ dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
+ maplen, DMA_TO_DEVICE);
+ if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+ goto err_dma;
+ dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+ dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+ tpd->adrl.addr = cpu_to_le64(dma);
+ tpd->len = cpu_to_le16(maplen);
+ }
+
+ /* last TPD, set EOP flag and store skb */
+ tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
+ txq->bufs[txq->write_idx].skb = skb;
+
+ if (++txq->write_idx == alx->tx_ringsz)
+ txq->write_idx = 0;
+
+ return 0;
+
+err_dma:
+ f = first_idx;
+ while (f != txq->write_idx) {
+ alx_free_txbuf(alx, f);
+ if (++f == alx->tx_ringsz)
+ f = 0;
+ }
+ return -ENOMEM;
+}
+
+static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_tx_queue *txq = &alx->txq;
+ struct alx_txd *first;
+ int tpdreq = skb_shinfo(skb)->nr_frags + 1;
+
+ if (alx_tpd_avail(alx) < tpdreq) {
+ netif_stop_queue(alx->dev);
+ goto drop;
+ }
+
+ first = &txq->tpd[txq->write_idx];
+ memset(first, 0, sizeof(*first));
+
+ if (alx_tx_csum(skb, first))
+ goto drop;
+
+ if (alx_map_tx_skb(alx, skb) < 0)
+ goto drop;
+
+ netdev_sent_queue(alx->dev, skb->len);
+
+ /* flush updates before updating hardware */
+ wmb();
+ alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
+
+ if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
+ netif_stop_queue(alx->dev);
+
+ return NETDEV_TX_OK;
+
+drop:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void alx_tx_timeout(struct net_device *dev)
+{
+ struct alx_priv *alx = netdev_priv(dev);
+
+ alx_schedule_reset(alx);
+}
+
+static int alx_mdio_read(struct net_device *netdev,
+ int prtad, int devad, u16 addr)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ u16 val;
+ int err;
+
+ if (prtad != hw->mdio.prtad)
+ return -EINVAL;
+
+ if (devad == MDIO_DEVAD_NONE)
+ err = alx_read_phy_reg(hw, addr, &val);
+ else
+ err = alx_read_phy_ext(hw, devad, addr, &val);
+
+ if (err)
+ return err;
+ return val;
+}
+
+static int alx_mdio_write(struct net_device *netdev,
+ int prtad, int devad, u16 addr, u16 val)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ if (prtad != hw->mdio.prtad)
+ return -EINVAL;
+
+ if (devad == MDIO_DEVAD_NONE)
+ return alx_write_phy_reg(hw, addr, val);
+
+ return alx_write_phy_ext(hw, devad, addr, val);
+}
+
+static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void alx_poll_controller(struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ if (alx->msi)
+ alx_intr_msi(0, alx);
+ else
+ alx_intr_legacy(0, alx);
+}
+#endif
+
+static const struct net_device_ops alx_netdev_ops = {
+ .ndo_open = alx_open,
+ .ndo_stop = alx_stop,
+ .ndo_start_xmit = alx_start_xmit,
+ .ndo_set_rx_mode = alx_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = alx_set_mac_address,
+ .ndo_change_mtu = alx_change_mtu,
+ .ndo_do_ioctl = alx_ioctl,
+ .ndo_tx_timeout = alx_tx_timeout,
+ .ndo_fix_features = alx_fix_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = alx_poll_controller,
+#endif
+};
+
+static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct alx_priv *alx;
+ struct alx_hw *hw;
+ bool phy_configured;
+ int bars, pm_cap, err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ /* The alx chip can DMA to 64-bit addresses, but it uses a single
+ * shared register for the high 32 bits, so only a single, aligned,
+ * 4 GB physical address range can be used for descriptors.
+ */
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA config, aborting\n");
+ goto out_pci_disable;
+ }
+ }
+ }
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_request_selected_regions(pdev, bars, alx_drv_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed(bars:%d)\n", bars);
+ goto out_pci_disable;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pm_cap == 0) {
+ dev_err(&pdev->dev,
+ "Can't find power management capability, aborting\n");
+ err = -EIO;
+ goto out_pci_release;
+ }
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ goto out_pci_release;
+
+ netdev = alloc_etherdev(sizeof(*alx));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto out_pci_release;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ alx = netdev_priv(netdev);
+ alx->dev = netdev;
+ alx->hw.pdev = pdev;
+ alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
+ NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
+ hw = &alx->hw;
+ pci_set_drvdata(pdev, alx);
+
+ hw->hw_addr = pci_ioremap_bar(pdev, 0);
+ if (!hw->hw_addr) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -EIO;
+ goto out_free_netdev;
+ }
+
+ netdev->netdev_ops = &alx_netdev_ops;
+ SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+ netdev->irq = pdev->irq;
+ netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
+
+ if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
+ pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+
+ err = alx_init_sw(alx);
+ if (err) {
+ dev_err(&pdev->dev, "net device private data init failed\n");
+ goto out_unmap;
+ }
+
+ alx_reset_pcie(hw);
+
+ phy_configured = alx_phy_configured(hw);
+
+ if (!phy_configured)
+ alx_reset_phy(hw);
+
+ err = alx_reset_mac(hw);
+ if (err) {
+ dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
+ goto out_unmap;
+ }
+
+ /* setup link to put it in a known good starting state */
+ if (!phy_configured) {
+ err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to configure PHY speed/duplex (err=%d)\n",
+ err);
+ goto out_unmap;
+ }
+ }
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+
+ if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
+ dev_warn(&pdev->dev,
+ "Invalid permanent address programmed, using random one\n");
+ eth_hw_addr_random(netdev);
+ memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
+ }
+
+ memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
+ memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
+ memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
+
+ hw->mdio.prtad = 0;
+ hw->mdio.mmds = 0;
+ hw->mdio.dev = netdev;
+ hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
+ MDIO_SUPPORTS_C22 |
+ MDIO_EMULATE_C22;
+ hw->mdio.mdio_read = alx_mdio_read;
+ hw->mdio.mdio_write = alx_mdio_write;
+
+ if (!alx_get_phy_info(hw)) {
+ dev_err(&pdev->dev, "failed to identify PHY\n");
+ err = -EIO;
+ goto out_unmap;
+ }
+
+ INIT_WORK(&alx->link_check_wk, alx_link_check);
+ INIT_WORK(&alx->reset_wk, alx_reset);
+ spin_lock_init(&alx->hw.mdio_lock);
+ spin_lock_init(&alx->irq_lock);
+
+ netif_carrier_off(netdev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "register netdevice failed\n");
+ goto out_unmap;
+ }
+
+ device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
+
+ netdev_info(netdev,
+ "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
+ netdev->dev_addr);
+
+ return 0;
+
+out_unmap:
+ iounmap(hw->hw_addr);
+out_free_netdev:
+ free_netdev(netdev);
+out_pci_release:
+ pci_release_selected_regions(pdev, bars);
+out_pci_disable:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void alx_remove(struct pci_dev *pdev)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_hw *hw = &alx->hw;
+
+ cancel_work_sync(&alx->link_check_wk);
+ cancel_work_sync(&alx->reset_wk);
+
+ /* restore permanent mac address */
+ alx_set_macaddr(hw, hw->perm_addr);
+
+ unregister_netdev(alx->dev);
+ iounmap(hw->hw_addr);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(alx->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int alx_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int err;
+ bool wol_en;
+
+ err = __alx_shutdown(pdev, &wol_en);
+ if (err) {
+ dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
+ return err;
+ }
+
+ if (wol_en) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ return 0;
+}
+
+static int alx_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+ struct alx_hw *hw = &alx->hw;
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ hw->link_speed = SPEED_UNKNOWN;
+ alx->int_mask = ALX_ISR_MISC;
+
+ alx_reset_pcie(hw);
+ alx_reset_phy(hw);
+
+ err = alx_reset_mac(hw);
+ if (err) {
+ netif_err(alx, hw, alx->dev,
+ "resume:reset_mac fail %d\n", err);
+ return -EIO;
+ }
+
+ err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+ if (err) {
+ netif_err(alx, hw, alx->dev,
+ "resume:setup_speed_duplex fail %d\n", err);
+ return -EIO;
+ }
+
+ if (netif_running(netdev)) {
+ err = __alx_open(alx, true);
+ if (err)
+ return err;
+ }
+
+ netif_device_attach(netdev);
+
+ return err;
+}
+#endif
+
+static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+ pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
+
+ dev_info(&pdev->dev, "pci error detected\n");
+
+ rtnl_lock();
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ alx_halt(alx);
+ }
+
+ if (state == pci_channel_io_perm_failure)
+ rc = PCI_ERS_RESULT_DISCONNECT;
+ else
+ pci_disable_device(pdev);
+
+ rtnl_unlock();
+
+ return rc;
+}
+
+static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_hw *hw = &alx->hw;
+ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+ dev_info(&pdev->dev, "pci error slot reset\n");
+
+ rtnl_lock();
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
+ goto out;
+ }
+
+ pci_set_master(pdev);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ alx_reset_pcie(hw);
+ if (!alx_reset_mac(hw))
+ rc = PCI_ERS_RESULT_RECOVERED;
+out:
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ rtnl_unlock();
+
+ return rc;
+}
+
+static void alx_pci_error_resume(struct pci_dev *pdev)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+
+ dev_info(&pdev->dev, "pci error resume\n");
+
+ rtnl_lock();
+
+ if (netif_running(netdev)) {
+ alx_activate(alx);
+ netif_device_attach(netdev);
+ }
+
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers alx_err_handlers = {
+ .error_detected = alx_pci_error_detected,
+ .slot_reset = alx_pci_error_slot_reset,
+ .resume = alx_pci_error_resume,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
+#define ALX_PM_OPS (&alx_pm_ops)
+#else
+#define ALX_PM_OPS NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
+ {}
+};
+
+static struct pci_driver alx_driver = {
+ .name = alx_drv_name,
+ .id_table = alx_pci_tbl,
+ .probe = alx_probe,
+ .remove = alx_remove,
+ .shutdown = alx_shutdown,
+ .err_handler = &alx_err_handlers,
+ .driver.pm = ALX_PM_OPS,
+};
+
+module_pci_driver(alx_driver);
+MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
+MODULE_DESCRIPTION(
+ "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
new file mode 100644
index 00000000000..e4358c98bc4
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -0,0 +1,810 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_REG_H
+#define ALX_REG_H
+
+#define ALX_DEV_ID_AR8161 0x1091
+#define ALX_DEV_ID_E2200 0xe091
+#define ALX_DEV_ID_AR8162 0x1090
+#define ALX_DEV_ID_AR8171 0x10A1
+#define ALX_DEV_ID_AR8172 0x10A0
+
+/* rev definition,
+ * bit(0): with xD support
+ * bit(1): with Card Reader function
+ * bit(7:2): real revision
+ */
+#define ALX_PCI_REVID_SHIFT 3
+#define ALX_REV_A0 0
+#define ALX_REV_A1 1
+#define ALX_REV_B0 2
+#define ALX_REV_C0 3
+
+#define ALX_DEV_CTRL 0x0060
+#define ALX_DEV_CTRL_MAXRRS_MIN 2
+
+#define ALX_MSIX_MASK 0x0090
+
+#define ALX_UE_SVRT 0x010C
+#define ALX_UE_SVRT_FCPROTERR BIT(13)
+#define ALX_UE_SVRT_DLPROTERR BIT(4)
+
+/* eeprom & flash load register */
+#define ALX_EFLD 0x0204
+#define ALX_EFLD_F_EXIST BIT(10)
+#define ALX_EFLD_E_EXIST BIT(9)
+#define ALX_EFLD_STAT BIT(5)
+#define ALX_EFLD_START BIT(0)
+
+/* eFuse load register */
+#define ALX_SLD 0x0218
+#define ALX_SLD_STAT BIT(12)
+#define ALX_SLD_START BIT(11)
+#define ALX_SLD_MAX_TO 100
+
+#define ALX_PDLL_TRNS1 0x1104
+#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11)
+
+#define ALX_PMCTRL 0x12F8
+#define ALX_PMCTRL_HOTRST_WTEN BIT(31)
+/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
+#define ALX_PMCTRL_ASPM_FCEN BIT(30)
+#define ALX_PMCTRL_SADLY_EN BIT(29)
+#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF
+#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24
+#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC
+/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
+#define ALX_PMCTRL_L1REQ_TO_MASK 0xF
+#define ALX_PMCTRL_L1REQ_TO_SHIFT 20
+#define ALX_PMCTRL_L1REG_TO_DEF 0xF
+#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19)
+#define ALX_PMCTRL_L1_TIMER_MASK 0x7
+#define ALX_PMCTRL_L1_TIMER_SHIFT 16
+#define ALX_PMCTRL_L1_TIMER_16US 4
+#define ALX_PMCTRL_RCVR_WT_1US BIT(15)
+/* bit13: enable pcie clk switch in L1 state */
+#define ALX_PMCTRL_L1_CLKSW_EN BIT(13)
+#define ALX_PMCTRL_L0S_EN BIT(12)
+#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11)
+#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7)
+/* bit6: power down serdes RX */
+#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6)
+#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5)
+#define ALX_PMCTRL_L1_SRDS_EN BIT(4)
+#define ALX_PMCTRL_L1_EN BIT(3)
+
+/*******************************************************/
+/* following registers are mapped only to memory space */
+/*******************************************************/
+
+#define ALX_MASTER 0x1400
+/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
+#define ALX_MASTER_PCLKSEL_SRDS BIT(12)
+/* bit11: irq moduration for rx */
+#define ALX_MASTER_IRQMOD2_EN BIT(11)
+/* bit10: irq moduration for tx/rx */
+#define ALX_MASTER_IRQMOD1_EN BIT(10)
+#define ALX_MASTER_SYSALVTIMER_EN BIT(7)
+#define ALX_MASTER_OOB_DIS BIT(6)
+/* bit5: wakeup without pcie clk */
+#define ALX_MASTER_WAKEN_25M BIT(5)
+/* bit0: MAC & DMA reset */
+#define ALX_MASTER_DMA_MAC_RST BIT(0)
+#define ALX_DMA_MAC_RST_TO 50
+
+#define ALX_IRQ_MODU_TIMER 0x1408
+#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF
+#define ALX_IRQ_MODU_TIMER1_SHIFT 0
+
+#define ALX_PHY_CTRL 0x140C
+#define ALX_PHY_CTRL_100AB_EN BIT(17)
+/* bit14: affect MAC & PHY, go to low power sts */
+#define ALX_PHY_CTRL_POWER_DOWN BIT(14)
+/* bit13: 1:pll always ON, 0:can switch in lpw */
+#define ALX_PHY_CTRL_PLL_ON BIT(13)
+#define ALX_PHY_CTRL_RST_ANALOG BIT(12)
+#define ALX_PHY_CTRL_HIB_PULSE BIT(11)
+#define ALX_PHY_CTRL_HIB_EN BIT(10)
+#define ALX_PHY_CTRL_IDDQ BIT(7)
+#define ALX_PHY_CTRL_GATE_25M BIT(5)
+#define ALX_PHY_CTRL_LED_MODE BIT(2)
+/* bit0: out of dsp RST state */
+#define ALX_PHY_CTRL_DSPRST_OUT BIT(0)
+#define ALX_PHY_CTRL_DSPRST_TO 80
+#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \
+ ALX_PHY_CTRL_100AB_EN | \
+ ALX_PHY_CTRL_PLL_ON)
+
+#define ALX_MAC_STS 0x1410
+#define ALX_MAC_STS_TXQ_BUSY BIT(3)
+#define ALX_MAC_STS_RXQ_BUSY BIT(2)
+#define ALX_MAC_STS_TXMAC_BUSY BIT(1)
+#define ALX_MAC_STS_RXMAC_BUSY BIT(0)
+#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \
+ ALX_MAC_STS_RXQ_BUSY | \
+ ALX_MAC_STS_TXMAC_BUSY | \
+ ALX_MAC_STS_RXMAC_BUSY)
+
+#define ALX_MDIO 0x1414
+#define ALX_MDIO_MODE_EXT BIT(30)
+#define ALX_MDIO_BUSY BIT(27)
+#define ALX_MDIO_CLK_SEL_MASK 0x7
+#define ALX_MDIO_CLK_SEL_SHIFT 24
+#define ALX_MDIO_CLK_SEL_25MD4 0
+#define ALX_MDIO_CLK_SEL_25MD128 7
+#define ALX_MDIO_START BIT(23)
+#define ALX_MDIO_SPRES_PRMBL BIT(22)
+/* bit21: 1:read,0:write */
+#define ALX_MDIO_OP_READ BIT(21)
+#define ALX_MDIO_REG_MASK 0x1F
+#define ALX_MDIO_REG_SHIFT 16
+#define ALX_MDIO_DATA_MASK 0xFFFF
+#define ALX_MDIO_DATA_SHIFT 0
+#define ALX_MDIO_MAX_AC_TO 120
+
+#define ALX_MDIO_EXTN 0x1448
+#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F
+#define ALX_MDIO_EXTN_DEVAD_SHIFT 16
+#define ALX_MDIO_EXTN_REG_MASK 0xFFFF
+#define ALX_MDIO_EXTN_REG_SHIFT 0
+
+#define ALX_SERDES 0x1424
+#define ALX_SERDES_PHYCLK_SLWDWN BIT(18)
+#define ALX_SERDES_MACCLK_SLWDWN BIT(17)
+
+#define ALX_LPI_CTRL 0x1440
+#define ALX_LPI_CTRL_EN BIT(0)
+
+/* for B0+, bit[13..] for C0+ */
+#define ALX_HRTBT_EXT_CTRL 0x1AD0
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24
+#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13)
+#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12)
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4
+#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3)
+#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2)
+#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1)
+#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0)
+
+#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4
+#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478
+#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8
+#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC
+#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0
+#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4
+
+/* 1B8C ~ 1B94 for C0+ */
+#define ALX_SWOI_ACER_CTRL 0x1B8C
+#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20)
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12
+#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF
+#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0
+
+#define ALX_SWOI_IOAC_CTRL_2 0x1B90
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0
+
+#define ALX_SWOI_IOAC_CTRL_3 0x1B94
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0
+
+/* for B0 */
+#define ALX_IDLE_DECISN_TIMER 0x1474
+/* 1ms */
+#define ALX_IDLE_DECISN_TIMER_DEF 0x400
+
+#define ALX_MAC_CTRL 0x1480
+#define ALX_MAC_CTRL_FAST_PAUSE BIT(31)
+#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30)
+/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
+#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29)
+#define ALX_MAC_CTRL_BRD_EN BIT(26)
+#define ALX_MAC_CTRL_MULTIALL_EN BIT(25)
+#define ALX_MAC_CTRL_SPEED_MASK 0x3
+#define ALX_MAC_CTRL_SPEED_SHIFT 20
+#define ALX_MAC_CTRL_SPEED_10_100 1
+#define ALX_MAC_CTRL_SPEED_1000 2
+#define ALX_MAC_CTRL_PROMISC_EN BIT(15)
+#define ALX_MAC_CTRL_VLANSTRIP BIT(14)
+#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF
+#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10
+#define ALX_MAC_CTRL_PCRCE BIT(7)
+#define ALX_MAC_CTRL_CRCE BIT(6)
+#define ALX_MAC_CTRL_FULLD BIT(5)
+#define ALX_MAC_CTRL_RXFC_EN BIT(3)
+#define ALX_MAC_CTRL_TXFC_EN BIT(2)
+#define ALX_MAC_CTRL_RX_EN BIT(1)
+#define ALX_MAC_CTRL_TX_EN BIT(0)
+
+#define ALX_STAD0 0x1488
+#define ALX_STAD1 0x148C
+
+#define ALX_HASH_TBL0 0x1490
+#define ALX_HASH_TBL1 0x1494
+
+#define ALX_MTU 0x149C
+#define ALX_MTU_JUMBO_TH 1514
+#define ALX_MTU_STD_ALGN 1536
+
+#define ALX_SRAM5 0x1524
+#define ALX_SRAM_RXF_LEN_MASK 0xFFF
+#define ALX_SRAM_RXF_LEN_SHIFT 0
+#define ALX_SRAM_RXF_LEN_8K (8*1024)
+
+#define ALX_SRAM9 0x1534
+#define ALX_SRAM_LOAD_PTR BIT(0)
+
+#define ALX_RX_BASE_ADDR_HI 0x1540
+
+#define ALX_TX_BASE_ADDR_HI 0x1544
+
+#define ALX_RFD_ADDR_LO 0x1550
+#define ALX_RFD_RING_SZ 0x1560
+#define ALX_RFD_BUF_SZ 0x1564
+
+#define ALX_RRD_ADDR_LO 0x1568
+#define ALX_RRD_RING_SZ 0x1578
+
+/* pri3: highest, pri0: lowest */
+#define ALX_TPD_PRI3_ADDR_LO 0x14E4
+#define ALX_TPD_PRI2_ADDR_LO 0x14E0
+#define ALX_TPD_PRI1_ADDR_LO 0x157C
+#define ALX_TPD_PRI0_ADDR_LO 0x1580
+
+/* producer index is 16bit */
+#define ALX_TPD_PRI3_PIDX 0x1618
+#define ALX_TPD_PRI2_PIDX 0x161A
+#define ALX_TPD_PRI1_PIDX 0x15F0
+#define ALX_TPD_PRI0_PIDX 0x15F2
+
+/* consumer index is 16bit */
+#define ALX_TPD_PRI3_CIDX 0x161C
+#define ALX_TPD_PRI2_CIDX 0x161E
+#define ALX_TPD_PRI1_CIDX 0x15F4
+#define ALX_TPD_PRI0_CIDX 0x15F6
+
+#define ALX_TPD_RING_SZ 0x1584
+
+#define ALX_TXQ0 0x1590
+#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF
+#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16
+#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200
+#define ALX_TXQ0_LSO_8023_EN BIT(7)
+#define ALX_TXQ0_MODE_ENHANCE BIT(6)
+#define ALX_TXQ0_EN BIT(5)
+#define ALX_TXQ0_SUPT_IPOPT BIT(4)
+#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF
+#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0
+#define ALX_TXQ_TPD_BURSTPREF_DEF 5
+
+#define ALX_TXQ1 0x1594
+/* bit11: drop large packet, len > (rfd buf) */
+#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11)
+#define ALX_TXQ1_JUMBO_TSO_TH (7*1024)
+
+#define ALX_RXQ0 0x15A0
+#define ALX_RXQ0_EN BIT(31)
+#define ALX_RXQ0_RSS_HASH_EN BIT(29)
+#define ALX_RXQ0_RSS_MODE_MASK 0x3
+#define ALX_RXQ0_RSS_MODE_SHIFT 26
+#define ALX_RXQ0_RSS_MODE_DIS 0
+#define ALX_RXQ0_RSS_MODE_MQMI 3
+#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F
+#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20
+#define ALX_RXQ0_NUM_RFD_PREF_DEF 8
+#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF
+#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8
+#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100
+#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128
+#define ALX_RXQ0_IPV6_PARSE_EN BIT(7)
+#define ALX_RXQ0_RSS_HSTYP_MASK 0xF
+#define ALX_RXQ0_RSS_HSTYP_SHIFT 2
+#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5)
+#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2)
+#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
+ ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
+ ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
+ ALX_RXQ0_RSS_HSTYP_IPV4_EN)
+#define ALX_RXQ0_ASPM_THRESH_MASK 0x3
+#define ALX_RXQ0_ASPM_THRESH_SHIFT 0
+#define ALX_RXQ0_ASPM_THRESH_100M 3
+
+#define ALX_RXQ2 0x15A8
+#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF
+#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16
+#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF
+#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0
+/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
+ * rx-packet(1522) + delay-of-link(64)
+ * = 3212.
+ */
+#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212
+
+#define ALX_DMA 0x15C0
+#define ALX_DMA_RCHNL_SEL_MASK 0x3
+#define ALX_DMA_RCHNL_SEL_SHIFT 26
+#define ALX_DMA_WDLY_CNT_MASK 0xF
+#define ALX_DMA_WDLY_CNT_SHIFT 16
+#define ALX_DMA_WDLY_CNT_DEF 4
+#define ALX_DMA_RDLY_CNT_MASK 0x1F
+#define ALX_DMA_RDLY_CNT_SHIFT 11
+#define ALX_DMA_RDLY_CNT_DEF 15
+/* bit10: 0:tpd with pri, 1: data */
+#define ALX_DMA_RREQ_PRI_DATA BIT(10)
+#define ALX_DMA_RREQ_BLEN_MASK 0x7
+#define ALX_DMA_RREQ_BLEN_SHIFT 4
+#define ALX_DMA_RORDER_MODE_MASK 0x7
+#define ALX_DMA_RORDER_MODE_SHIFT 0
+#define ALX_DMA_RORDER_MODE_OUT 4
+
+#define ALX_WOL0 0x14A0
+#define ALX_WOL0_PME_LINK BIT(5)
+#define ALX_WOL0_LINK_EN BIT(4)
+#define ALX_WOL0_PME_MAGIC_EN BIT(3)
+#define ALX_WOL0_MAGIC_EN BIT(2)
+
+#define ALX_RFD_PIDX 0x15E0
+
+#define ALX_RFD_CIDX 0x15F8
+
+/* MIB */
+#define ALX_MIB_BASE 0x1700
+#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
+#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
+#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
+#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
+
+#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
+#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
+#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
+#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
+
+#define ALX_ISR 0x1600
+#define ALX_ISR_DIS BIT(31)
+#define ALX_ISR_RX_Q7 BIT(30)
+#define ALX_ISR_RX_Q6 BIT(29)
+#define ALX_ISR_RX_Q5 BIT(28)
+#define ALX_ISR_RX_Q4 BIT(27)
+#define ALX_ISR_PCIE_LNKDOWN BIT(26)
+#define ALX_ISR_RX_Q3 BIT(19)
+#define ALX_ISR_RX_Q2 BIT(18)
+#define ALX_ISR_RX_Q1 BIT(17)
+#define ALX_ISR_RX_Q0 BIT(16)
+#define ALX_ISR_TX_Q0 BIT(15)
+#define ALX_ISR_PHY BIT(12)
+#define ALX_ISR_DMAW BIT(10)
+#define ALX_ISR_DMAR BIT(9)
+#define ALX_ISR_TXF_UR BIT(8)
+#define ALX_ISR_TX_Q3 BIT(7)
+#define ALX_ISR_TX_Q2 BIT(6)
+#define ALX_ISR_TX_Q1 BIT(5)
+#define ALX_ISR_RFD_UR BIT(4)
+#define ALX_ISR_RXF_OV BIT(3)
+#define ALX_ISR_MANU BIT(2)
+#define ALX_ISR_TIMER BIT(1)
+#define ALX_ISR_SMB BIT(0)
+
+#define ALX_IMR 0x1604
+
+/* re-send assert msg if SW no response */
+#define ALX_INT_RETRIG 0x1608
+/* 40ms */
+#define ALX_INT_RETRIG_TO 20000
+
+#define ALX_SMB_TIMER 0x15C4
+
+#define ALX_TINT_TPD_THRSHLD 0x15C8
+
+#define ALX_TINT_TIMER 0x15CC
+
+#define ALX_CLK_GATE 0x1814
+#define ALX_CLK_GATE_RXMAC BIT(5)
+#define ALX_CLK_GATE_TXMAC BIT(4)
+#define ALX_CLK_GATE_RXQ BIT(3)
+#define ALX_CLK_GATE_TXQ BIT(2)
+#define ALX_CLK_GATE_DMAR BIT(1)
+#define ALX_CLK_GATE_DMAW BIT(0)
+#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \
+ ALX_CLK_GATE_TXMAC | \
+ ALX_CLK_GATE_RXQ | \
+ ALX_CLK_GATE_TXQ | \
+ ALX_CLK_GATE_DMAR | \
+ ALX_CLK_GATE_DMAW)
+
+/* interop between drivers */
+#define ALX_DRV 0x1804
+#define ALX_DRV_PHY_AUTO BIT(28)
+#define ALX_DRV_PHY_1000 BIT(27)
+#define ALX_DRV_PHY_100 BIT(26)
+#define ALX_DRV_PHY_10 BIT(25)
+#define ALX_DRV_PHY_DUPLEX BIT(24)
+/* bit23: adv Pause */
+#define ALX_DRV_PHY_PAUSE BIT(23)
+/* bit22: adv Asym Pause */
+#define ALX_DRV_PHY_MASK 0xFF
+#define ALX_DRV_PHY_SHIFT 21
+#define ALX_DRV_PHY_UNKNOWN 0
+
+/* flag of phy inited */
+#define ALX_PHY_INITED 0x003F
+
+/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
+#define ALX_WOL_CTRL2 0x1830
+#define ALX_WOL_CTRL2_DATA_STORE BIT(3)
+#define ALX_WOL_CTRL2_PTRN_EVT BIT(2)
+#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1)
+#define ALX_WOL_CTRL2_PTRN_EN BIT(0)
+
+#define ALX_WOL_CTRL3 0x1834
+#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF
+#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0
+
+#define ALX_WOL_CTRL4 0x1838
+#define ALX_WOL_CTRL4_PT15_MATCH BIT(31)
+#define ALX_WOL_CTRL4_PT14_MATCH BIT(30)
+#define ALX_WOL_CTRL4_PT13_MATCH BIT(29)
+#define ALX_WOL_CTRL4_PT12_MATCH BIT(28)
+#define ALX_WOL_CTRL4_PT11_MATCH BIT(27)
+#define ALX_WOL_CTRL4_PT10_MATCH BIT(26)
+#define ALX_WOL_CTRL4_PT9_MATCH BIT(25)
+#define ALX_WOL_CTRL4_PT8_MATCH BIT(24)
+#define ALX_WOL_CTRL4_PT7_MATCH BIT(23)
+#define ALX_WOL_CTRL4_PT6_MATCH BIT(22)
+#define ALX_WOL_CTRL4_PT5_MATCH BIT(21)
+#define ALX_WOL_CTRL4_PT4_MATCH BIT(20)
+#define ALX_WOL_CTRL4_PT3_MATCH BIT(19)
+#define ALX_WOL_CTRL4_PT2_MATCH BIT(18)
+#define ALX_WOL_CTRL4_PT1_MATCH BIT(17)
+#define ALX_WOL_CTRL4_PT0_MATCH BIT(16)
+#define ALX_WOL_CTRL4_PT15_EN BIT(15)
+#define ALX_WOL_CTRL4_PT14_EN BIT(14)
+#define ALX_WOL_CTRL4_PT13_EN BIT(13)
+#define ALX_WOL_CTRL4_PT12_EN BIT(12)
+#define ALX_WOL_CTRL4_PT11_EN BIT(11)
+#define ALX_WOL_CTRL4_PT10_EN BIT(10)
+#define ALX_WOL_CTRL4_PT9_EN BIT(9)
+#define ALX_WOL_CTRL4_PT8_EN BIT(8)
+#define ALX_WOL_CTRL4_PT7_EN BIT(7)
+#define ALX_WOL_CTRL4_PT6_EN BIT(6)
+#define ALX_WOL_CTRL4_PT5_EN BIT(5)
+#define ALX_WOL_CTRL4_PT4_EN BIT(4)
+#define ALX_WOL_CTRL4_PT3_EN BIT(3)
+#define ALX_WOL_CTRL4_PT2_EN BIT(2)
+#define ALX_WOL_CTRL4_PT1_EN BIT(1)
+#define ALX_WOL_CTRL4_PT0_EN BIT(0)
+
+#define ALX_WOL_CTRL5 0x183C
+#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0
+
+#define ALX_WOL_CTRL6 0x1840
+#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0
+
+#define ALX_WOL_CTRL7 0x1844
+#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0
+
+#define ALX_WOL_CTRL8 0x1848
+#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0
+
+#define ALX_ACER_FIXED_PTN0 0x1850
+#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF
+#define ALX_ACER_FIXED_PTN0_SHIFT 0
+
+#define ALX_ACER_FIXED_PTN1 0x1854
+#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF
+#define ALX_ACER_FIXED_PTN1_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM0 0x1858
+#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM0_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM1 0x185C
+#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM1_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM2 0x1860
+#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM2_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM3 0x1864
+#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM3_SHIFT 0
+
+#define ALX_ACER_MAGIC 0x1868
+#define ALX_ACER_MAGIC_EN BIT(31)
+#define ALX_ACER_MAGIC_PME_EN BIT(30)
+#define ALX_ACER_MAGIC_MATCH BIT(29)
+#define ALX_ACER_MAGIC_FF_CHECK BIT(10)
+#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F
+#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5
+#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F
+#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0
+
+#define ALX_ACER_TIMER 0x186C
+#define ALX_ACER_TIMER_EN BIT(31)
+#define ALX_ACER_TIMER_PME_EN BIT(30)
+#define ALX_ACER_TIMER_MATCH BIT(29)
+#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF
+#define ALX_ACER_TIMER_THRES_SHIFT 0
+#define ALX_ACER_TIMER_THRES_DEF 1
+
+/* RSS definitions */
+#define ALX_RSS_KEY0 0x14B0
+#define ALX_RSS_KEY1 0x14B4
+#define ALX_RSS_KEY2 0x14B8
+#define ALX_RSS_KEY3 0x14BC
+#define ALX_RSS_KEY4 0x14C0
+#define ALX_RSS_KEY5 0x14C4
+#define ALX_RSS_KEY6 0x14C8
+#define ALX_RSS_KEY7 0x14CC
+#define ALX_RSS_KEY8 0x14D0
+#define ALX_RSS_KEY9 0x14D4
+
+#define ALX_RSS_IDT_TBL0 0x1B00
+
+#define ALX_MSI_MAP_TBL1 0x15D0
+#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20
+#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16
+#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12
+#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8
+#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4
+#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0
+
+#define ALX_MSI_MAP_TBL2 0x15D8
+#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20
+#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16
+#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12
+#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8
+#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4
+#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0
+
+#define ALX_MSI_ID_MAP 0x15D4
+
+#define ALX_MSI_RETRANS_TIMER 0x1920
+/* bit16: 1:line,0:standard */
+#define ALX_MSI_MASK_SEL_LINE BIT(16)
+#define ALX_MSI_RETRANS_TM_MASK 0xFFFF
+#define ALX_MSI_RETRANS_TM_SHIFT 0
+
+/* CR DMA ctrl */
+
+/* TX QoS */
+#define ALX_WRR 0x1938
+#define ALX_WRR_PRI_MASK 0x3
+#define ALX_WRR_PRI_SHIFT 29
+#define ALX_WRR_PRI_RESTRICT_NONE 3
+#define ALX_WRR_PRI3_MASK 0x1F
+#define ALX_WRR_PRI3_SHIFT 24
+#define ALX_WRR_PRI2_MASK 0x1F
+#define ALX_WRR_PRI2_SHIFT 16
+#define ALX_WRR_PRI1_MASK 0x1F
+#define ALX_WRR_PRI1_SHIFT 8
+#define ALX_WRR_PRI0_MASK 0x1F
+#define ALX_WRR_PRI0_SHIFT 0
+
+#define ALX_HQTPD 0x193C
+#define ALX_HQTPD_BURST_EN BIT(31)
+#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF
+#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8
+#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF
+#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4
+#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF
+#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0
+
+#define ALX_MISC 0x19C0
+#define ALX_MISC_PSW_OCP_MASK 0x7
+#define ALX_MISC_PSW_OCP_SHIFT 21
+#define ALX_MISC_PSW_OCP_DEF 0x7
+#define ALX_MISC_ISO_EN BIT(12)
+#define ALX_MISC_INTNLOSC_OPEN BIT(3)
+
+#define ALX_MSIC2 0x19C8
+#define ALX_MSIC2_CALB_START BIT(0)
+
+#define ALX_MISC3 0x19CC
+/* bit1: 1:Software control 25M */
+#define ALX_MISC3_25M_BY_SW BIT(1)
+/* bit0: 25M switch to intnl OSC */
+#define ALX_MISC3_25M_NOTO_INTNL BIT(0)
+
+/* MSIX tbl in memory space */
+#define ALX_MSIX_ENTRY_BASE 0x2000
+
+/********************* PHY regs definition ***************************/
+
+/* PHY Specific Status Register */
+#define ALX_MII_GIGA_PSSR 0x11
+#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800
+#define ALX_GIGA_PSSR_DPLX 0x2000
+#define ALX_GIGA_PSSR_SPEED 0xC000
+#define ALX_GIGA_PSSR_10MBS 0x0000
+#define ALX_GIGA_PSSR_100MBS 0x4000
+#define ALX_GIGA_PSSR_1000MBS 0x8000
+
+/* PHY Interrupt Enable Register */
+#define ALX_MII_IER 0x12
+#define ALX_IER_LINK_UP 0x0400
+#define ALX_IER_LINK_DOWN 0x0800
+
+/* PHY Interrupt Status Register */
+#define ALX_MII_ISR 0x13
+
+#define ALX_MII_DBG_ADDR 0x1D
+#define ALX_MII_DBG_DATA 0x1E
+
+/***************************** debug port *************************************/
+
+#define ALX_MIIDBG_ANACTRL 0x00
+#define ALX_ANACTRL_DEF 0x02EF
+
+#define ALX_MIIDBG_SYSMODCTRL 0x04
+/* en half bias */
+#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B
+
+#define ALX_MIIDBG_SRDSYSMOD 0x05
+#define ALX_SRDSYSMOD_DEEMP_EN 0x0040
+#define ALX_SRDSYSMOD_DEF 0x2C46
+
+#define ALX_MIIDBG_HIBNEG 0x0B
+#define ALX_HIBNEG_PSHIB_EN 0x8000
+#define ALX_HIBNEG_HIB_PSE 0x1000
+#define ALX_HIBNEG_DEF 0xBC40
+#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \
+ ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
+
+#define ALX_MIIDBG_TST10BTCFG 0x12
+#define ALX_TST10BTCFG_DEF 0x4C04
+
+#define ALX_MIIDBG_AZ_ANADECT 0x15
+#define ALX_AZ_ANADECT_DEF 0x3220
+#define ALX_AZ_ANADECT_LONG 0x3210
+
+#define ALX_MIIDBG_MSE16DB 0x18
+#define ALX_MSE16DB_UP 0x05EA
+#define ALX_MSE16DB_DOWN 0x02EA
+
+#define ALX_MIIDBG_MSE20DB 0x1C
+#define ALX_MSE20DB_TH_MASK 0x7F
+#define ALX_MSE20DB_TH_SHIFT 2
+#define ALX_MSE20DB_TH_DEF 0x2E
+#define ALX_MSE20DB_TH_HI 0x54
+
+#define ALX_MIIDBG_AGC 0x23
+#define ALX_AGC_2_VGA_MASK 0x3FU
+#define ALX_AGC_2_VGA_SHIFT 8
+#define ALX_AGC_LONG1G_LIMT 40
+#define ALX_AGC_LONG100M_LIMT 44
+
+#define ALX_MIIDBG_LEGCYPS 0x29
+#define ALX_LEGCYPS_EN 0x8000
+#define ALX_LEGCYPS_DEF 0x129D
+
+#define ALX_MIIDBG_TST100BTCFG 0x36
+#define ALX_TST100BTCFG_DEF 0xE12C
+
+#define ALX_MIIDBG_GREENCFG 0x3B
+#define ALX_GREENCFG_DEF 0x7078
+
+#define ALX_MIIDBG_GREENCFG2 0x3D
+#define ALX_GREENCFG2_BP_GREEN 0x8000
+#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080
+
+/******* dev 3 *********/
+#define ALX_MIIEXT_PCS 3
+
+#define ALX_MIIEXT_CLDCTRL3 0x8003
+#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000
+
+#define ALX_MIIEXT_CLDCTRL5 0x8005
+#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000
+
+#define ALX_MIIEXT_CLDCTRL6 0x8006
+#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF
+#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0
+#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116
+#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152
+
+#define ALX_MIIEXT_VDRVBIAS 0x8062
+#define ALX_VDRVBIAS_DEF 0x3
+
+/********* dev 7 **********/
+#define ALX_MIIEXT_ANEG 7
+
+#define ALX_MIIEXT_LOCAL_EEEADV 0x3C
+#define ALX_LOCAL_EEEADV_1000BT 0x0004
+#define ALX_LOCAL_EEEADV_100BT 0x0002
+
+#define ALX_MIIEXT_AFE 0x801A
+#define ALX_AFE_10BT_100M_TH 0x0040
+
+#define ALX_MIIEXT_S3DIG10 0x8023
+/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
+#define ALX_MIIEXT_S3DIG10_SL 0x0001
+#define ALX_MIIEXT_S3DIG10_DEF 0
+
+#define ALX_MIIEXT_NLP78 0x8027
+#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05
+
+#endif