aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinaro CI <ci_notify@linaro.org>2022-06-14 21:41:23 +0000
committerLinaro CI <ci_notify@linaro.org>2022-06-14 21:41:23 +0000
commit7c067d4d1c044047807fc349ac9281590d23fb6b (patch)
treef861a35e805b0ef64781cc9a3af927605cad8175
parent4fb8aa57eaecad5cc266695e5cf74647ade7b102 (diff)
parentd88e88fe7365d1766db258168fc5b56490bc0e2e (diff)
Merge remote-tracking branch 'sdx55-drivers/tracking-qcomlt-sdx55-drivers' into integration-linux-qcomlt
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/bus/mhi/ep/main.c35
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c32
-rw-r--r--drivers/edac/qcom_edac.c111
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/mhi_ep_net.c331
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c138
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c54
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h4
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c2
-rw-r--r--drivers/pci/endpoint/functions/Kconfig10
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c436
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c3
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c4
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c3
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c40
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c8
-rw-r--r--drivers/soc/qcom/llcc-qcom.c64
-rw-r--r--include/linux/pci-epc.h12
-rw-r--r--include/linux/pci-epf.h6
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h35
29 files changed, 1184 insertions, 174 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 1fc9ead83d2a..326f53cb4d5f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12952,6 +12952,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi.git
F: Documentation/ABI/stable/sysfs-bus-mhi
F: Documentation/mhi/
F: drivers/bus/mhi/
+F: drivers/net/mhi_*
F: include/linux/mhi.h
MICROBLAZE ARCHITECTURE
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 40109a79017a..59fe8633d4c9 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -973,11 +973,9 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
static void mhi_ep_reset_worker(struct work_struct *work)
{
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state cur_state;
- int ret;
- mhi_ep_abort_transfer(mhi_cntrl);
+ mhi_ep_power_down(mhi_cntrl);
spin_lock_bh(&mhi_cntrl->state_lock);
/* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
@@ -990,27 +988,8 @@ static void mhi_ep_reset_worker(struct work_struct *work)
* issue reset during shutdown also and we don't need to do re-init in
* that case.
*/
- if (cur_state == MHI_STATE_SYS_ERR) {
- mhi_ep_mmio_init(mhi_cntrl);
-
- /* Set AMSS EE before signaling ready state */
- mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
-
- /* All set, notify the host that we are ready */
- ret = mhi_ep_set_ready_state(mhi_cntrl);
- if (ret)
- return;
-
- dev_dbg(dev, "READY state notification sent to the host\n");
-
- ret = mhi_ep_enable(mhi_cntrl);
- if (ret) {
- dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
- return;
- }
-
- enable_irq(mhi_cntrl->irq);
- }
+ if (cur_state == MHI_STATE_SYS_ERR)
+ mhi_ep_power_up(mhi_cntrl);
}
/*
@@ -1089,11 +1068,11 @@ EXPORT_SYMBOL_GPL(mhi_ep_power_up);
void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
{
- if (mhi_cntrl->enabled)
+ if (mhi_cntrl->enabled) {
mhi_ep_abort_transfer(mhi_cntrl);
-
- kfree(mhi_cntrl->mhi_event);
- disable_irq(mhi_cntrl->irq);
+ kfree(mhi_cntrl->mhi_event);
+ disable_irq(mhi_cntrl->irq);
+ }
}
EXPORT_SYMBOL_GPL(mhi_ep_power_down);
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 468d1097a1ec..7991720e7b81 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -333,6 +333,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
struct dw_edma_chunk *chunk;
struct dw_edma_burst *burst;
struct dw_edma_desc *desc;
+ bool read = false;
u32 cnt = 0;
int i;
@@ -423,7 +424,36 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
- if (chan->dir == EDMA_DIR_WRITE) {
+ /****************************************************************
+ *
+ * Root Complex Endpoint
+ * +-----------------------+ +----------------------+
+ * | | TX CH | |
+ * | | | |
+ * | DEV_TO_MEM <-------------+ MEM_TO_DEV |
+ * | | | |
+ * | | | |
+ * | MEM_TO_DEV +-------------> DEV_TO_MEM |
+ * | | | |
+ * | | RX CH | |
+ * +-----------------------+ +----------------------+
+ *
+ * If eDMA is controlled by the Root complex, TX channel
+ * (EDMA_DIR_WRITE) is used for memory read (DEV_TO_MEM) and RX
+ * channel (EDMA_DIR_READ) is used for memory write (MEM_TO_DEV).
+ *
+ * If eDMA is controlled by the endpoint, RX channel
+ * (EDMA_DIR_READ) is used for memory read (DEV_TO_MEM) and TX
+ * channel (EDMA_DIR_WRITE) is used for memory write (MEM_TO_DEV).
+ *
+ ****************************************************************/
+
+ if ((dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ) ||
+ (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE))
+ read = true;
+
+ /* Program the source and destination addresses for DMA read/write */
+ if (read) {
burst->sar = src_addr;
if (xfer->type == EDMA_XFER_CYCLIC) {
burst->dar = xfer->xfer.cyclic.paddr;
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 97a27e42dd61..4ee84a494936 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -21,30 +21,9 @@
#define TRP_SYN_REG_CNT 6
#define DRP_SYN_REG_CNT 8
-#define LLCC_COMMON_STATUS0 0x0003000c
#define LLCC_LB_CNT_MASK GENMASK(31, 28)
#define LLCC_LB_CNT_SHIFT 28
-/* Single & double bit syndrome register offsets */
-#define TRP_ECC_SB_ERR_SYN0 0x0002304c
-#define TRP_ECC_DB_ERR_SYN0 0x00020370
-#define DRP_ECC_SB_ERR_SYN0 0x0004204c
-#define DRP_ECC_DB_ERR_SYN0 0x00042070
-
-/* Error register offsets */
-#define TRP_ECC_ERROR_STATUS1 0x00020348
-#define TRP_ECC_ERROR_STATUS0 0x00020344
-#define DRP_ECC_ERROR_STATUS1 0x00042048
-#define DRP_ECC_ERROR_STATUS0 0x00042044
-
-/* TRP, DRP interrupt register offsets */
-#define DRP_INTERRUPT_STATUS 0x00041000
-#define TRP_INTERRUPT_0_STATUS 0x00020480
-#define DRP_INTERRUPT_CLEAR 0x00041008
-#define DRP_ECC_ERROR_CNTR_CLEAR 0x00040004
-#define TRP_INTERRUPT_0_CLEAR 0x00020484
-#define TRP_ECC_ERROR_CNTR_CLEAR 0x00020440
-
/* Mask and shift macros */
#define ECC_DB_ERR_COUNT_MASK GENMASK(4, 0)
#define ECC_DB_ERR_WAYS_MASK GENMASK(31, 16)
@@ -60,15 +39,6 @@
#define DRP_TRP_INT_CLEAR GENMASK(1, 0)
#define DRP_TRP_CNT_CLEAR GENMASK(1, 0)
-/* Config registers offsets*/
-#define DRP_ECC_ERROR_CFG 0x00040000
-
-/* Tag RAM, Data RAM interrupt register offsets */
-#define CMN_INTERRUPT_0_ENABLE 0x0003001c
-#define CMN_INTERRUPT_2_ENABLE 0x0003003c
-#define TRP_INTERRUPT_0_ENABLE 0x00020488
-#define DRP_INTERRUPT_ENABLE 0x0004100c
-
#define SB_ERROR_THRESHOLD 0x1
#define SB_ERROR_THRESHOLD_SHIFT 24
#define SB_DB_TRP_INTERRUPT_ENABLE 0x3
@@ -86,9 +56,6 @@ enum {
static const struct llcc_edac_reg_data edac_reg_data[] = {
[LLCC_DRAM_CE] = {
.name = "DRAM Single-bit",
- .synd_reg = DRP_ECC_SB_ERR_SYN0,
- .count_status_reg = DRP_ECC_ERROR_STATUS1,
- .ways_status_reg = DRP_ECC_ERROR_STATUS0,
.reg_cnt = DRP_SYN_REG_CNT,
.count_mask = ECC_SB_ERR_COUNT_MASK,
.ways_mask = ECC_SB_ERR_WAYS_MASK,
@@ -96,9 +63,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
},
[LLCC_DRAM_UE] = {
.name = "DRAM Double-bit",
- .synd_reg = DRP_ECC_DB_ERR_SYN0,
- .count_status_reg = DRP_ECC_ERROR_STATUS1,
- .ways_status_reg = DRP_ECC_ERROR_STATUS0,
.reg_cnt = DRP_SYN_REG_CNT,
.count_mask = ECC_DB_ERR_COUNT_MASK,
.ways_mask = ECC_DB_ERR_WAYS_MASK,
@@ -106,9 +70,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
},
[LLCC_TRAM_CE] = {
.name = "TRAM Single-bit",
- .synd_reg = TRP_ECC_SB_ERR_SYN0,
- .count_status_reg = TRP_ECC_ERROR_STATUS1,
- .ways_status_reg = TRP_ECC_ERROR_STATUS0,
.reg_cnt = TRP_SYN_REG_CNT,
.count_mask = ECC_SB_ERR_COUNT_MASK,
.ways_mask = ECC_SB_ERR_WAYS_MASK,
@@ -116,9 +77,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
},
[LLCC_TRAM_UE] = {
.name = "TRAM Double-bit",
- .synd_reg = TRP_ECC_DB_ERR_SYN0,
- .count_status_reg = TRP_ECC_ERROR_STATUS1,
- .ways_status_reg = TRP_ECC_ERROR_STATUS0,
.reg_cnt = TRP_SYN_REG_CNT,
.count_mask = ECC_DB_ERR_COUNT_MASK,
.ways_mask = ECC_DB_ERR_WAYS_MASK,
@@ -126,7 +84,7 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
},
};
-static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap)
+static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_bcast_regmap)
{
u32 sb_err_threshold;
int ret;
@@ -135,31 +93,31 @@ static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap)
* Configure interrupt enable registers such that Tag, Data RAM related
* interrupts are propagated to interrupt controller for servicing
*/
- ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg->cmn_interrupt_2_enable,
TRP0_INTERRUPT_ENABLE,
TRP0_INTERRUPT_ENABLE);
if (ret)
return ret;
- ret = regmap_update_bits(llcc_bcast_regmap, TRP_INTERRUPT_0_ENABLE,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg->trp_interrupt_0_enable,
SB_DB_TRP_INTERRUPT_ENABLE,
SB_DB_TRP_INTERRUPT_ENABLE);
if (ret)
return ret;
sb_err_threshold = (SB_ERROR_THRESHOLD << SB_ERROR_THRESHOLD_SHIFT);
- ret = regmap_write(llcc_bcast_regmap, DRP_ECC_ERROR_CFG,
+ ret = regmap_write(llcc_bcast_regmap, drv->edac_reg->drp_ecc_error_cfg,
sb_err_threshold);
if (ret)
return ret;
- ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg->cmn_interrupt_2_enable,
DRP0_INTERRUPT_ENABLE,
DRP0_INTERRUPT_ENABLE);
if (ret)
return ret;
- ret = regmap_write(llcc_bcast_regmap, DRP_INTERRUPT_ENABLE,
+ ret = regmap_write(llcc_bcast_regmap, drv->edac_reg->drp_interrupt_enable,
SB_DB_DRP_INTERRUPT_ENABLE);
return ret;
}
@@ -173,24 +131,24 @@ qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv)
switch (err_type) {
case LLCC_DRAM_CE:
case LLCC_DRAM_UE:
- ret = regmap_write(drv->bcast_regmap, DRP_INTERRUPT_CLEAR,
+ ret = regmap_write(drv->bcast_regmap, drv->edac_reg->drp_interrupt_clear,
DRP_TRP_INT_CLEAR);
if (ret)
return ret;
- ret = regmap_write(drv->bcast_regmap, DRP_ECC_ERROR_CNTR_CLEAR,
+ ret = regmap_write(drv->bcast_regmap, drv->edac_reg->drp_ecc_error_cntr_clear,
DRP_TRP_CNT_CLEAR);
if (ret)
return ret;
break;
case LLCC_TRAM_CE:
case LLCC_TRAM_UE:
- ret = regmap_write(drv->bcast_regmap, TRP_INTERRUPT_0_CLEAR,
+ ret = regmap_write(drv->bcast_regmap, drv->edac_reg->trp_interrupt_0_clear,
DRP_TRP_INT_CLEAR);
if (ret)
return ret;
- ret = regmap_write(drv->bcast_regmap, TRP_ECC_ERROR_CNTR_CLEAR,
+ ret = regmap_write(drv->bcast_regmap, drv->edac_reg->trp_ecc_error_cntr_clear,
DRP_TRP_CNT_CLEAR);
if (ret)
return ret;
@@ -203,16 +161,53 @@ qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv)
return ret;
}
+struct qcom_llcc_syn_regs {
+ u32 synd_reg;
+ u32 count_status_reg;
+ u32 ways_status_reg;
+};
+
+static void get_reg_offsets(struct llcc_drv_data *drv, int err_type, struct qcom_llcc_syn_regs *syn_regs)
+{
+ const struct llcc_edac_reg *edac_reg = drv->edac_reg;
+
+ switch(err_type) {
+ case LLCC_DRAM_CE:
+ syn_regs->synd_reg = edac_reg->drp_ecc_sb_err_syn0;
+ syn_regs->count_status_reg = edac_reg->drp_ecc_error_status1;
+ syn_regs->ways_status_reg = edac_reg->drp_ecc_error_status0;
+ break;
+ case LLCC_DRAM_UE:
+ syn_regs->synd_reg = edac_reg->drp_ecc_db_err_syn0;
+ syn_regs->count_status_reg = edac_reg->drp_ecc_error_status1;
+ syn_regs->ways_status_reg = edac_reg->drp_ecc_error_status0;
+ break;
+ case LLCC_TRAM_CE:
+ syn_regs->synd_reg = edac_reg->trp_ecc_sb_err_syn0;
+ syn_regs->count_status_reg = edac_reg->trp_ecc_error_status1;
+ syn_regs->ways_status_reg = edac_reg->trp_ecc_error_status0;
+ break;
+ case LLCC_TRAM_UE:
+ syn_regs->synd_reg = edac_reg->trp_ecc_db_err_syn0;
+ syn_regs->count_status_reg = edac_reg->trp_ecc_error_status1;
+ syn_regs->ways_status_reg = edac_reg->trp_ecc_error_status0;
+ break;
+ }
+}
+
/* Dump Syndrome registers data for Tag RAM, Data RAM bit errors*/
static int
dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
{
struct llcc_edac_reg_data reg_data = edac_reg_data[err_type];
+ struct qcom_llcc_syn_regs regs = { };
int err_cnt, err_ways, ret, i;
u32 synd_reg, synd_val;
+ get_reg_offsets(drv, err_type, &regs);
+
for (i = 0; i < reg_data.reg_cnt; i++) {
- synd_reg = reg_data.synd_reg + (i * 4);
+ synd_reg = regs.synd_reg + (i * 4);
ret = regmap_read(drv->regmap, drv->offsets[bank] + synd_reg,
&synd_val);
if (ret)
@@ -223,7 +218,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
}
ret = regmap_read(drv->regmap,
- drv->offsets[bank] + reg_data.count_status_reg,
+ drv->offsets[bank] + regs.count_status_reg,
&err_cnt);
if (ret)
goto clear;
@@ -234,7 +229,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
reg_data.name, err_cnt);
ret = regmap_read(drv->regmap,
- drv->offsets[bank] + reg_data.ways_status_reg,
+ drv->offsets[bank] + regs.ways_status_reg,
&err_ways);
if (ret)
goto clear;
@@ -297,7 +292,7 @@ llcc_ecc_irq_handler(int irq, void *edev_ctl)
/* Iterate over the banks and look for Tag RAM or Data RAM errors */
for (i = 0; i < drv->num_banks; i++) {
ret = regmap_read(drv->regmap,
- drv->offsets[i] + DRP_INTERRUPT_STATUS,
+ drv->offsets[i] + drv->edac_reg->drp_interrupt_status,
&drp_error);
if (!ret && (drp_error & SB_ECC_ERROR)) {
@@ -313,7 +308,7 @@ llcc_ecc_irq_handler(int irq, void *edev_ctl)
irq_rc = IRQ_HANDLED;
ret = regmap_read(drv->regmap,
- drv->offsets[i] + TRP_INTERRUPT_0_STATUS,
+ drv->offsets[i] + drv->edac_reg->trp_interrupt_0_status,
&trp_error);
if (!ret && (trp_error & SB_ECC_ERROR)) {
@@ -340,7 +335,7 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
int ecc_irq;
int rc;
- rc = qcom_llcc_core_setup(llcc_driv_data->bcast_regmap);
+ rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
if (rc)
return rc;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b2a4f998c180..18e26769385f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -452,6 +452,15 @@ config MHI_NET
QCOM based WWAN modems for IP or QMAP/rmnet protocol (like SDX55).
Say Y or M.
+config MHI_EP_NET
+ tristate "MHI Endpoint network driver"
+ depends on MHI_BUS_EP
+ help
+ This is the network driver for MHI bus implementation in endpoint
+ devices. It is used provide the network interface for QCOM modems
+ such as SDX55.
+ Say Y or M.
+
endif # NET_CORE
config SUNGEM_PHY
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3f1192d3c52d..c81dd49bf6e1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
obj-$(CONFIG_MHI_NET) += mhi_net.o
+obj-$(CONFIG_MHI_EP_NET) += mhi_ep_net.o
#
# Networking Drivers
diff --git a/drivers/net/mhi_ep_net.c b/drivers/net/mhi_ep_net.c
new file mode 100644
index 000000000000..5891d17bf4e4
--- /dev/null
+++ b/drivers/net/mhi_ep_net.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MHI Endpoint Network driver
+ *
+ * Based on drivers/net/mhi_net.c
+ *
+ * Copyright (c) 2022, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/if_arp.h>
+#include <linux/mhi_ep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+
+#define MHI_NET_MIN_MTU ETH_MIN_MTU
+#define MHI_NET_MAX_MTU 0xffff
+
+struct mhi_ep_net_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+};
+
+struct mhi_ep_net_dev {
+ struct mhi_ep_device *mdev;
+ struct net_device *ndev;
+ struct mhi_ep_net_stats stats;
+ struct workqueue_struct *xmit_wq;
+ struct work_struct xmit_work;
+ struct sk_buff_head tx_buffers;
+ spinlock_t tx_lock; /* Lock for protecting tx_buffers */
+ u32 mru;
+};
+
+static void mhi_ep_net_dev_process_queue_packets(struct work_struct *work)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = container_of(work,
+ struct mhi_ep_net_dev, xmit_work);
+ struct mhi_ep_device *mdev = mhi_ep_netdev->mdev;
+ struct sk_buff_head q;
+ struct sk_buff *skb;
+ int ret;
+
+ if (mhi_ep_queue_is_empty(mdev, DMA_FROM_DEVICE)) {
+ netif_stop_queue(mhi_ep_netdev->ndev);
+ return;
+ }
+
+ __skb_queue_head_init(&q);
+
+ spin_lock_bh(&mhi_ep_netdev->tx_lock);
+ skb_queue_splice_init(&mhi_ep_netdev->tx_buffers, &q);
+ spin_unlock_bh(&mhi_ep_netdev->tx_lock);
+
+ while ((skb = __skb_dequeue(&q))) {
+ ret = mhi_ep_queue_skb(mdev, skb);
+ if (ret) {
+ kfree(skb);
+ goto exit_drop;
+ }
+
+ u64_stats_update_begin(&mhi_ep_netdev->stats.tx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.tx_packets);
+ u64_stats_add(&mhi_ep_netdev->stats.tx_bytes, skb->len);
+ u64_stats_update_end(&mhi_ep_netdev->stats.tx_syncp);
+
+ /* Check if queue is empty */
+ if (mhi_ep_queue_is_empty(mdev, DMA_FROM_DEVICE)) {
+ netif_stop_queue(mhi_ep_netdev->ndev);
+ break;
+ }
+
+ consume_skb(skb);
+ cond_resched();
+ }
+
+ return;
+
+exit_drop:
+ u64_stats_update_begin(&mhi_ep_netdev->stats.tx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.tx_dropped);
+ u64_stats_update_end(&mhi_ep_netdev->stats.tx_syncp);
+}
+
+static int mhi_ndo_open(struct net_device *ndev)
+{
+ /* Carrier is established via out-of-band channel (e.g. qmi) */
+ netif_carrier_on(ndev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int mhi_ndo_stop(struct net_device *ndev)
+{
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ return 0;
+}
+
+static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+
+ spin_lock(&mhi_ep_netdev->tx_lock);
+ skb_queue_tail(&mhi_ep_netdev->tx_buffers, skb);
+ spin_unlock(&mhi_ep_netdev->tx_lock);
+
+ queue_work(mhi_ep_netdev->xmit_wq, &mhi_ep_netdev->xmit_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void mhi_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_ep_netdev->stats.rx_syncp);
+ stats->rx_packets = u64_stats_read(&mhi_ep_netdev->stats.rx_packets);
+ stats->rx_bytes = u64_stats_read(&mhi_ep_netdev->stats.rx_bytes);
+ stats->rx_errors = u64_stats_read(&mhi_ep_netdev->stats.rx_errors);
+ } while (u64_stats_fetch_retry_irq(&mhi_ep_netdev->stats.rx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_ep_netdev->stats.tx_syncp);
+ stats->tx_packets = u64_stats_read(&mhi_ep_netdev->stats.tx_packets);
+ stats->tx_bytes = u64_stats_read(&mhi_ep_netdev->stats.tx_bytes);
+ stats->tx_errors = u64_stats_read(&mhi_ep_netdev->stats.tx_errors);
+ stats->tx_dropped = u64_stats_read(&mhi_ep_netdev->stats.tx_dropped);
+ } while (u64_stats_fetch_retry_irq(&mhi_ep_netdev->stats.tx_syncp, start));
+}
+
+static const struct net_device_ops mhi_ep_netdev_ops = {
+ .ndo_open = mhi_ndo_open,
+ .ndo_stop = mhi_ndo_stop,
+ .ndo_start_xmit = mhi_ndo_xmit,
+ .ndo_get_stats64 = mhi_ndo_get_stats64,
+};
+
+static void mhi_ep_net_setup(struct net_device *ndev)
+{
+ ndev->header_ops = NULL; /* No header */
+ ndev->type = ARPHRD_RAWIP;
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ ndev->netdev_ops = &mhi_ep_netdev_ops;
+ ndev->mtu = MHI_EP_DEFAULT_MTU;
+ ndev->min_mtu = MHI_NET_MIN_MTU;
+ ndev->max_mtu = MHI_NET_MAX_MTU;
+ ndev->tx_queue_len = 1000;
+}
+
+static void mhi_ep_net_ul_callback(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct net_device *ndev = mhi_ep_netdev->ndev;
+ struct sk_buff *skb;
+ size_t size;
+
+ size = mhi_ep_netdev->mru ? mhi_ep_netdev->mru : READ_ONCE(ndev->mtu);
+
+ skb = netdev_alloc_skb(ndev, size);
+ if (unlikely(!skb)) {
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ return;
+ }
+
+ skb_copy_to_linear_data(skb, mhi_res->buf_addr, mhi_res->bytes_xferd);
+ skb->len = mhi_res->bytes_xferd;
+ skb->dev = mhi_ep_netdev->ndev;
+
+ if (unlikely(mhi_res->transaction_status)) {
+ switch (mhi_res->transaction_status) {
+ case -ENOTCONN:
+ /* MHI layer stopping/resetting the UL channel */
+ dev_kfree_skb_any(skb);
+ return;
+ default:
+ /* Unknown error, simply drop */
+ dev_kfree_skb_any(skb);
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ }
+ } else {
+ skb_put(skb, mhi_res->bytes_xferd);
+
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_packets);
+ u64_stats_add(&mhi_ep_netdev->stats.rx_bytes, skb->len);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ netif_rx(skb);
+ }
+}
+
+static void mhi_ep_net_dl_callback(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ if (unlikely(mhi_res->transaction_status == -ENOTCONN))
+ return;
+
+ /* Since we got enough buffers to queue, wake the queue if stopped */
+ if (netif_queue_stopped(mhi_ep_netdev->ndev)) {
+ netif_wake_queue(mhi_ep_netdev->ndev);
+ queue_work(mhi_ep_netdev->xmit_wq, &mhi_ep_netdev->xmit_work);
+ }
+}
+
+static int mhi_ep_net_newlink(struct mhi_ep_device *mhi_dev, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev;
+ int ret;
+
+ mhi_ep_netdev = netdev_priv(ndev);
+
+ dev_set_drvdata(&mhi_dev->dev, mhi_ep_netdev);
+ mhi_ep_netdev->ndev = ndev;
+ mhi_ep_netdev->mdev = mhi_dev;
+ mhi_ep_netdev->mru = mhi_dev->mhi_cntrl->mru;
+
+ skb_queue_head_init(&mhi_ep_netdev->tx_buffers);
+ spin_lock_init(&mhi_ep_netdev->tx_lock);
+
+ u64_stats_init(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_init(&mhi_ep_netdev->stats.tx_syncp);
+
+ mhi_ep_netdev->xmit_wq = alloc_workqueue("mhi_ep_net_xmit_wq", 0, WQ_HIGHPRI);
+ INIT_WORK(&mhi_ep_netdev->xmit_work, mhi_ep_net_dev_process_queue_packets);
+
+ ret = register_netdev(ndev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void mhi_ep_net_dellink(struct mhi_ep_device *mhi_dev, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+
+ destroy_workqueue(mhi_ep_netdev->xmit_wq);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+}
+
+static int mhi_ep_net_probe(struct mhi_ep_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct net_device *ndev;
+ int ret;
+
+ ndev = alloc_netdev(sizeof(struct mhi_ep_net_dev), (const char *)id->driver_data,
+ NET_NAME_PREDICTABLE, mhi_ep_net_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &mhi_dev->dev);
+
+ ret = mhi_ep_net_newlink(mhi_dev, ndev);
+ if (ret) {
+ free_netdev(ndev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mhi_ep_net_remove(struct mhi_ep_device *mhi_dev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ mhi_ep_net_dellink(mhi_dev, mhi_ep_netdev->ndev);
+}
+
+static const struct mhi_device_id mhi_ep_net_id_table[] = {
+ /* Software data PATH (from modem CPU) */
+ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" },
+ {}
+};
+MODULE_DEVICE_TABLE(mhi, mhi_ep_net_id_table);
+
+static struct mhi_ep_driver mhi_ep_net_driver = {
+ .probe = mhi_ep_net_probe,
+ .remove = mhi_ep_net_remove,
+ .dl_xfer_cb = mhi_ep_net_dl_callback,
+ .ul_xfer_cb = mhi_ep_net_ul_callback,
+ .id_table = mhi_ep_net_id_table,
+ .driver = {
+ .name = "mhi_ep_net",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_mhi_ep_driver(mhi_ep_net_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("MHI Endpoint Network driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index dfcdeb432dc8..0919c96dcdbd 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -483,7 +483,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return pp->irq;
/* MSI IRQ is muxed */
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 467c8d1cd7e4..4f2010bd9cd7 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -292,7 +292,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep,
}
pp->ops = &exynos_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 0eda8236c125..fb2bf4bf5ba0 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -636,6 +636,63 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
return 0;
}
+static int dw_pcie_iatu_config(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+ void *addr;
+
+ dw_pcie_iatu_detect(pci);
+
+ ep->ib_window_map = devm_kcalloc(dev,
+ BITS_TO_LONGS(pci->num_ib_windows),
+ sizeof(long),
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ return -ENOMEM;
+
+ ep->ob_window_map = devm_kcalloc(dev,
+ BITS_TO_LONGS(pci->num_ob_windows),
+ sizeof(long),
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ return -ENOMEM;
+
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ ep->outbound_addr = addr;
+
+ return 0;
+}
+
+static int dw_pcie_ep_func_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ struct pci_epc *epc = ep->epc;
+ struct device *dev = pci->dev;
+ u8 func_no;
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ return -ENOMEM;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
+ }
+
+ return 0;
+}
+
int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -643,7 +700,22 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
unsigned int nbars;
u8 hdr_type;
u32 reg;
- int i;
+ int ret, i;
+
+ if (ep->core_init_notifier) {
+ ret = dw_pcie_iatu_config(ep);
+ if (ret)
+ return ret;
+ }
+
+ if (ep->core_init_notifier) {
+ ret = dw_pcie_ep_func_init(ep);
+ if (ret)
+ return ret;
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
+ }
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
PCI_HEADER_TYPE_MASK;
@@ -677,8 +749,6 @@ EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
int ret;
- void *addr;
- u8 func_no;
struct resource *res;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -686,7 +756,12 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
const struct pci_epc_features *epc_features;
- struct dw_pcie_ep_func *ep_func;
+
+ if (ep->ops->get_features) {
+ epc_features = ep->ops->get_features(ep);
+ if (epc_features->core_init_notifier)
+ ep->core_init_notifier = true;
+ }
INIT_LIST_HEAD(&ep->func_list);
@@ -708,7 +783,11 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
}
}
- dw_pcie_iatu_detect(pci);
+ if (!ep->core_init_notifier) {
+ ret = dw_pcie_iatu_config(ep);
+ if (ret)
+ return ret;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
@@ -717,26 +796,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- ep->ib_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(pci->num_ib_windows),
- sizeof(long),
- GFP_KERNEL);
- if (!ep->ib_window_map)
- return -ENOMEM;
-
- ep->ob_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(pci->num_ob_windows),
- sizeof(long),
- GFP_KERNEL);
- if (!ep->ob_window_map)
- return -ENOMEM;
-
- addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
- GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ep->outbound_addr = addr;
-
if (pci->link_gen < 1)
pci->link_gen = of_pci_get_max_link_speed(np);
@@ -753,23 +812,15 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ret < 0)
epc->max_functions = 1;
- for (func_no = 0; func_no < epc->max_functions; func_no++) {
- ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
- if (!ep_func)
- return -ENOMEM;
-
- ep_func->func_no = func_no;
- ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSI);
- ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSIX);
+ if (!ep->core_init_notifier) {
+ ret = dw_pcie_ep_func_init(ep);
+ if (ret)
+ return ret;
- list_add_tail(&ep_func->list, &ep->func_list);
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
}
- if (ep->ops->ep_init)
- ep->ops->ep_init(ep);
-
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
if (ret < 0) {
@@ -784,12 +835,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return -ENOMEM;
}
- if (ep->ops->get_features) {
- epc_features = ep->ops->get_features(ep);
- if (epc_features->core_init_notifier)
- return 0;
- }
+ if (!ep->core_init_notifier)
+ return dw_pcie_ep_init_complete(ep);
- return dw_pcie_ep_init_complete(ep);
+ return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 9979302532b7..e1a067229057 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -257,8 +257,11 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
static void dw_pcie_free_msi(struct pcie_port *pp)
{
- if (pp->msi_irq)
- irq_set_chained_handler_and_data(pp->msi_irq, NULL, NULL);
+ u32 ctrl;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
+ if (pp->msi_irq[ctrl])
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl], NULL, NULL);
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
@@ -368,12 +371,37 @@ int dw_pcie_host_init(struct pcie_port *pp)
for (ctrl = 0; ctrl < num_ctrls; ctrl++)
pp->irq_mask[ctrl] = ~0;
- if (!pp->msi_irq) {
- pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
- if (pp->msi_irq < 0) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
+ if (!pp->msi_irq[0]) {
+ int irq = platform_get_irq_byname_optional(pdev, "msi");
+
+ if (irq < 0) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ }
+ pp->msi_irq[0] = irq;
+ }
+
+ if (pp->has_split_msi_irq) {
+ char irq_name[] = "msiXXX";
+ int irq;
+
+ for (ctrl = 1; ctrl < num_ctrls; ctrl++) {
+ if (pp->msi_irq[ctrl])
+ continue;
+
+ snprintf(irq_name, sizeof(irq_name), "msi%d", ctrl + 1);
+ irq = platform_get_irq_byname_optional(pdev, irq_name);
+ if (irq == -ENXIO) {
+ num_ctrls = ctrl;
+ pp->num_vectors = num_ctrls * MAX_MSI_IRQS_PER_CTRL;
+ dev_warn(dev, "Limiting amount of MSI irqs to %d\n", pp->num_vectors);
+ break;
+ }
+ if (irq < 0)
+ return irq;
+
+ pp->msi_irq[ctrl] = irq;
}
}
@@ -383,10 +411,12 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
return ret;
- if (pp->msi_irq > 0)
- irq_set_chained_handler_and_data(pp->msi_irq,
- dw_chained_msi_isr,
- pp);
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ dw_chained_msi_isr,
+ pp);
+ }
ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
if (ret)
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 7d6e9b7576be..e34076320632 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -179,6 +179,7 @@ struct dw_pcie_host_ops {
struct pcie_port {
bool has_msi_ctrl:1;
+ bool has_split_msi_irq:1;
u64 cfg0_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
@@ -187,7 +188,7 @@ struct pcie_port {
u32 io_size;
int irq;
const struct dw_pcie_host_ops *ops;
- int msi_irq;
+ int msi_irq[MAX_MSI_CTRLS];
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
u16 msi_msg;
@@ -242,6 +243,7 @@ struct dw_pcie_ep {
void __iomem *msi_mem;
phys_addr_t msi_mem_phys;
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
+ bool core_init_notifier;
};
struct dw_pcie_ops {
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 1ac29a6eef22..297e6e926c00 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -338,7 +338,7 @@ static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
int ret;
pp->ops = &keembay_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = keembay_pcie_setup_msi_irq(pcie);
if (ret)
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index ec99116ad05c..9f92d53da81a 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -518,9 +518,11 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
dev_dbg(dev, "Received Linkdown event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
+ pci_epc_linkdown(pci->ep.epc);
} else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
dev_dbg(dev, "Received BME event. Link is enabled!\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
+ pci_epc_bme_notify(pci->ep.epc);
} else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
@@ -664,12 +666,6 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = qcom_pcie_enable_resources(pcie_ep);
- if (ret) {
- dev_err(dev, "Failed to enable resources: %d\n", ret);
- return ret;
- }
-
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 2ea13750b492..06125ebf31b3 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1592,6 +1592,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
+ pp->num_vectors = MAX_MSI_IRQS;
+ pp->has_split_msi_irq = true;
pcie->pci = pci;
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 1569e82b5568..cc7776833810 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -172,7 +172,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
}
pp->ops = &spear13xx_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index cc2678490162..7056072637ab 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -2262,7 +2262,7 @@ static void tegra194_pcie_shutdown(struct platform_device *pdev)
disable_irq(pcie->pci.pp.irq);
if (IS_ENABLED(CONFIG_PCI_MSI))
- disable_irq(pcie->pci.pp.msi_irq);
+ disable_irq(pcie->pci.pp.msi_irq[0]);
tegra194_pcie_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 5f1242ca2f4e..93497fb70e31 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -25,3 +25,13 @@ config PCI_EPF_NTB
device tree.
If in doubt, say "N" to disable Endpoint NTB driver.
+
+config PCI_EPF_MHI
+ tristate "PCI Endpoint driver for MHI bus"
+ depends on PCI_ENDPOINT && MHI_BUS_EP
+ help
+ Enable this configuration option to enable the PCI Endpoint
+ driver for Modem Host Interface (MHI) bus found in Qualcomm
+ modems such as SDX55.
+
+ If in doubt, say "N" to disable Endpoint driver for MHI bus.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
index 96ab932a537a..eee99b2e9103 100644
--- a/drivers/pci/endpoint/functions/Makefile
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
+obj-$(CONFIG_PCI_EPF_MHI) += pci-epf-mhi.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
new file mode 100644
index 000000000000..a8119841a252
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI EPF driver for MHI Endpoint devices
+ *
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/mhi_ep.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+#define MHI_VERSION_1_0 0x01000000
+
+struct pci_epf_mhi_ep_info {
+ const struct mhi_ep_cntrl_config *config;
+ struct pci_epf_header *epf_header;
+ enum pci_barno bar_num;
+ u32 epf_flags;
+ u32 msi_count;
+ u32 mru;
+};
+
+#define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .dir = DMA_TO_DEVICE, \
+ }
+
+#define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .dir = DMA_FROM_DEVICE, \
+ }
+
+static const struct mhi_ep_channel_config mhi_v1_channels[] = {
+ MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
+ MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
+ MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
+ MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
+ MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
+ MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
+ MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
+ MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
+ MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
+ MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
+ MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
+ MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
+ MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
+ MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
+ MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
+ MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
+ MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
+ MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
+ MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
+ MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
+ MHI_EP_CHANNEL_CONFIG_UL(36, "IP_SW0"),
+ MHI_EP_CHANNEL_CONFIG_DL(37, "IP_SW0"),
+};
+
+static const struct mhi_ep_cntrl_config mhi_v1_config = {
+ .max_channels = 128,
+ .num_channels = ARRAY_SIZE(mhi_v1_channels),
+ .ch_cfg = mhi_v1_channels,
+ .mhi_version = MHI_VERSION_1_0,
+};
+
+static struct pci_epf_header sdx55_header = {
+ .vendorid = PCI_VENDOR_ID_QCOM,
+ .deviceid = 0x0306,
+ .baseclass_code = PCI_BASE_CLASS_COMMUNICATION,
+ .subclass_code = PCI_CLASS_COMMUNICATION_MODEM & 0xff,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static const struct pci_epf_mhi_ep_info sdx55_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sdx55_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 32,
+ .mru = 0x8000,
+};
+
+struct pci_epf_mhi {
+ const struct pci_epf_mhi_ep_info *info;
+ struct mhi_ep_cntrl mhi_cntrl;
+ struct pci_epf *epf;
+ struct mutex lock;
+ void __iomem *mmio;
+ resource_size_t mmio_phys;
+ enum pci_notify_event event;
+ u32 mmio_size;
+ int irq;
+ bool mhi_registered;
+};
+
+static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
+ phys_addr_t *phys_ptr, void __iomem **virt, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf_mhi->epf->epc;
+ size_t offset = pci_addr & (epc->mem->window.page_size - 1);
+ void __iomem *virt_addr;
+ phys_addr_t phys_addr;
+ int ret;
+
+ virt_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, size + offset);
+ if (!virt_addr)
+ return -ENOMEM;
+
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, pci_addr - offset, size + offset);
+ if (ret) {
+ pci_epc_mem_free_addr(epc, phys_addr, virt_addr, size + offset);
+
+ return ret;
+ }
+
+ *phys_ptr = phys_addr + offset;
+ *virt = virt_addr + offset;
+
+ return 0;
+}
+
+static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
+ phys_addr_t phys_addr, void __iomem *virt_addr, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+ size_t offset = pci_addr & (epc->mem->window.page_size - 1);
+
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr - offset);
+ pci_epc_mem_free_addr(epc, phys_addr - offset, virt_addr - offset, size + offset);
+}
+
+void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+
+ /*
+ * Vector is incremented by 1 here as the DWC core will decrement it before
+ * writing to iATU.
+ */
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI, vector + 1);
+}
+
+int pci_epf_mhi_read_from_host(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void __iomem *to,
+ size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf_mhi->epf->epc;
+ void __iomem *tre_buf;
+ phys_addr_t tre_phys;
+ size_t offset = from % 0x1000;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ tre_buf = pci_epc_mem_alloc_addr(epc, &tre_phys, size + offset);
+ if (!tre_buf) {
+ mutex_unlock(&epf_mhi->lock);
+ return -ENOMEM;
+ }
+
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, tre_phys, from - offset,
+ size + offset);
+ if (ret) {
+ pci_epc_mem_free_addr(epc, tre_phys, tre_buf, size + offset);
+ mutex_unlock(&epf_mhi->lock);
+ return ret;
+ }
+
+ memcpy_fromio(to, tre_buf + offset, size);
+
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, tre_phys);
+ pci_epc_mem_free_addr(epc, tre_phys, tre_buf, size + offset);
+
+ mutex_unlock(&epf_mhi->lock);
+
+ return 0;
+}
+
+int pci_epf_mhi_write_to_host(struct mhi_ep_cntrl *mhi_cntrl, void __iomem *from, u64 to,
+ size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf_mhi->epf->epc;
+ void __iomem *tre_buf;
+ phys_addr_t tre_phys;
+ size_t offset = to % 0x1000;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ tre_buf = pci_epc_mem_alloc_addr(epc, &tre_phys, size + offset);
+ if (!tre_buf) {
+ mutex_unlock(&epf_mhi->lock);
+ return -ENOMEM;
+ }
+
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, tre_phys, to - offset,
+ size + offset);
+ if (ret) {
+ pci_epc_mem_free_addr(epc, tre_phys, tre_buf, size + offset);
+ mutex_unlock(&epf_mhi->lock);
+ return ret;
+ }
+
+ memcpy_toio(tre_buf + offset, from, size);
+
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, tre_phys);
+ pci_epc_mem_free_addr(epc, tre_phys, tre_buf, size + offset);
+
+ mutex_unlock(&epf_mhi->lock);
+
+ return 0;
+}
+
+static int pci_epf_mhi_notifier(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ int ret;
+
+ switch (val) {
+ case CORE_INIT:
+ epf_bar->phys_addr = epf_mhi->mmio_phys;
+ epf_bar->size = epf_mhi->mmio_size;
+ epf_bar->barno = info->bar_num;
+ epf_bar->flags = info->epf_flags;
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "Failed to set BAR: %d\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
+ order_base_2(info->msi_count));
+ if (ret) {
+ dev_err(dev, "Failed to set MSI configuration: %d\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "Failed to set Configuration header: %d\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ break;
+ case LINK_UP:
+ mhi_cntrl->mmio = epf_mhi->mmio;
+ mhi_cntrl->irq = epf_mhi->irq;
+ mhi_cntrl->mru = info->mru;
+
+ /* Assign the struct dev of PCI EP as MHI controller device */
+ mhi_cntrl->cntrl_dev = epc->dev.parent;
+ mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
+ mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
+ mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
+ mhi_cntrl->read_from_host = pci_epf_mhi_read_from_host;
+ mhi_cntrl->write_to_host = pci_epf_mhi_write_to_host;
+
+ /* Register the MHI EP controller */
+ ret = mhi_ep_register_controller(mhi_cntrl, info->config);
+ if (ret) {
+ dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ epf_mhi->mhi_registered = true;
+ break;
+ case LINK_DOWN:
+ if (epf_mhi->mhi_registered) {
+ mhi_ep_power_down(mhi_cntrl);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ epf_mhi->mhi_registered = false;
+ }
+
+ break;
+ case BME:
+ /* Power up the MHI EP stack if link is up and stack is in power down state */
+ if (!mhi_cntrl->enabled && epf_mhi->mhi_registered) {
+ ret = mhi_ep_power_up(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ epf_mhi->mhi_registered = false;
+ return NOTIFY_BAD;
+ }
+ }
+
+ break;
+ default:
+ dev_err(&epf->dev, "Invalid MHI EP notifier event: %d\n", epf_mhi->event);
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int pci_epf_mhi_bind(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ struct platform_device *pdev = to_platform_device(epc->dev.parent);
+ struct device *dev = &epf->dev;
+ struct resource *res;
+ int ret;
+
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+ /* Get MMIO base address from Endpoint controller */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
+ epf_mhi->mmio_phys = res->start;
+ epf_mhi->mmio_size = resource_size(res);
+
+ epf_mhi->mmio = ioremap_wc(epf_mhi->mmio_phys, epf_mhi->mmio_size);
+ if (IS_ERR(epf_mhi->mmio))
+ return PTR_ERR(epf_mhi->mmio);
+
+ ret = platform_get_irq_byname(pdev, "doorbell");
+ if (ret < 0) {
+ dev_err(dev, "Failed to get Doorbell IRQ\n");
+ iounmap(epf_mhi->mmio);
+ return ret;
+ }
+
+ epf_mhi->irq = ret;
+
+ epf->nb.notifier_call = pci_epf_mhi_notifier;
+ pci_epc_register_notifier(epc, &epf->nb);
+
+ return 0;
+}
+
+static void pci_epf_mhi_unbind(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epc *epc = epf->epc;
+
+ pci_epc_unregister_notifier(epc, &epf->nb);
+
+ /*
+ * Forcefully power down the MHI EP stack. Only way to bring the MHI EP stack
+ * back to working state after successive bind is by getting BME from host.
+ */
+ if (epf_mhi->mhi_registered) {
+ mhi_ep_power_down(mhi_cntrl);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ epf_mhi->mhi_registered = false;
+ }
+
+ iounmap(epf_mhi->mmio);
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+}
+
+static int pci_epf_mhi_probe(struct pci_epf *epf, const struct pci_epf_device_id *id)
+{
+ struct pci_epf_mhi_ep_info *info = (struct pci_epf_mhi_ep_info *) id->driver_data;
+ struct pci_epf_mhi *epf_mhi;
+ struct device *dev = &epf->dev;
+
+ epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
+ if (!epf_mhi)
+ return -ENOMEM;
+
+ epf->header = info->epf_header;
+ epf_mhi->info = info;
+ epf_mhi->epf = epf;
+
+ mutex_init(&epf_mhi->lock);
+
+ epf_set_drvdata(epf, epf_mhi);
+
+ return 0;
+}
+
+static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
+ {
+ .name = "sdx55", .driver_data = (kernel_ulong_t) &sdx55_info,
+ },
+ {},
+};
+
+static struct pci_epf_ops pci_epf_mhi_ops = {
+ .unbind = pci_epf_mhi_unbind,
+ .bind = pci_epf_mhi_bind,
+};
+
+static struct pci_epf_driver pci_epf_mhi_driver = {
+ .driver.name = "pci_epf_mhi",
+ .probe = pci_epf_mhi_probe,
+ .id_table = pci_epf_mhi_ids,
+ .ops = &pci_epf_mhi_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init pci_epf_mhi_init(void)
+{
+ return pci_epf_register_driver(&pci_epf_mhi_driver);
+}
+module_init(pci_epf_mhi_init);
+
+static void __exit pci_epf_mhi_exit(void)
+{
+ pci_epf_unregister_driver(&pci_epf_mhi_driver);
+}
+module_exit(pci_epf_mhi_exit);
+
+MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint devices");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 9a00448c7e61..980b4ecf19a2 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -2075,11 +2075,12 @@ static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
/**
* epf_ntb_probe() - Probe NTB function driver
* @epf: NTB endpoint function device
+ * @id: NTB endpoint function device ID
*
* Probe NTB function driver when endpoint function bus detects a NTB
* endpoint function.
*/
-static int epf_ntb_probe(struct pci_epf *epf)
+static int epf_ntb_probe(struct pci_epf *epf, const struct pci_epf_device_id *id)
{
struct epf_ntb *ntb;
struct device *dev;
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 5b833f00e980..d917a9d2be09 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -884,7 +884,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (ret)
epf_test->dma_supported = false;
- if (linkup_notifier) {
+ if (linkup_notifier || core_init_notifier) {
epf->nb.notifier_call = pci_epf_test_notifier;
pci_epc_register_notifier(epc, &epf->nb);
} else {
@@ -901,7 +901,7 @@ static const struct pci_epf_device_id pci_epf_test_ids[] = {
{},
};
-static int pci_epf_test_probe(struct pci_epf *epf)
+static int pci_epf_test_probe(struct pci_epf *epf, const struct pci_epf_device_id *id)
{
struct pci_epf_test *epf_test;
struct device *dev = &epf->dev;
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index d4850bdd837f..2cfd5fd2794c 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -178,6 +178,9 @@ static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
if (kstrtobool(page, &start) < 0)
return -EINVAL;
+ if (WARN_ON_ONCE(start == epc_group->start))
+ return 0;
+
if (!start) {
pci_epc_stop(epc);
epc_group->start = 0;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 3bc9273d0a08..6ad9b38b63a9 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -693,11 +693,28 @@ void pci_epc_linkup(struct pci_epc *epc)
if (!epc || IS_ERR(epc))
return;
- atomic_notifier_call_chain(&epc->notifier, LINK_UP, NULL);
+ blocking_notifier_call_chain(&epc->notifier, LINK_UP, NULL);
}
EXPORT_SYMBOL_GPL(pci_epc_linkup);
/**
+ * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
+ * connection with the Root Complex.
+ * @epc: the EPC device which has dropped the link with the host
+ *
+ * Invoke to Notify the EPF device that the EPC device has dropped the
+ * connection with the Root Complex.
+ */
+void pci_epc_linkdown(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ blocking_notifier_call_chain(&epc->notifier, LINK_DOWN, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_epc_linkdown);
+
+/**
* pci_epc_init_notify() - Notify the EPF device that EPC device's core
* initialization is completed.
* @epc: the EPC device whose core initialization is completed
@@ -710,11 +727,28 @@ void pci_epc_init_notify(struct pci_epc *epc)
if (!epc || IS_ERR(epc))
return;
- atomic_notifier_call_chain(&epc->notifier, CORE_INIT, NULL);
+ blocking_notifier_call_chain(&epc->notifier, CORE_INIT, NULL);
}
EXPORT_SYMBOL_GPL(pci_epc_init_notify);
/**
+ * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
+ * the BME event from the Root complex
+ * @epc: the EPC device that received the BME event
+ *
+ * Invoke to Notify the EPF device that the EPC device has received the Bus
+ * Master Enable (BME) event from the Root complex
+ */
+void pci_epc_bme_notify(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ blocking_notifier_call_chain(&epc->notifier, BME, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
+
+/**
* pci_epc_destroy() - destroy the EPC device
* @epc: the EPC device that has to be destroyed
*
@@ -774,7 +808,7 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
mutex_init(&epc->lock);
INIT_LIST_HEAD(&epc->pci_epf);
- ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier);
+ BLOCKING_INIT_NOTIFIER_HEAD(&epc->notifier);
device_initialize(&epc->dev);
epc->dev.class = pci_epc_class;
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 9ed556936f48..0882ac829e95 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -494,11 +494,13 @@ static const struct device_type pci_epf_type = {
};
static int
-pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf)
+pci_epf_match_id(const struct pci_epf_device_id *id, struct pci_epf *epf)
{
while (id->name[0]) {
- if (strcmp(epf->name, id->name) == 0)
+ if (strcmp(epf->name, id->name) == 0) {
+ epf->id = id;
return true;
+ }
id++;
}
@@ -526,7 +528,7 @@ static int pci_epf_device_probe(struct device *dev)
epf->driver = driver;
- return driver->probe(epf);
+ return driver->probe(epf, epf->id);
}
static void pci_epf_device_remove(struct device *dev)
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 4b143cf7b4ce..474b5d6ab4b3 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -104,6 +104,7 @@ struct qcom_llcc_config {
int size;
bool need_llcc_cfg;
const u32 *reg_offset;
+ const struct llcc_edac_reg *edac_reg;
};
enum llcc_reg_offset {
@@ -296,6 +297,60 @@ static const struct llcc_slice_config sm8450_data[] = {
{LLCC_AENPU, 8, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
};
+static const struct llcc_edac_reg common_edac_reg = {
+ .trp_ecc_error_status0 = 0x20344,
+ .trp_ecc_error_status1 = 0x20348,
+ .trp_ecc_sb_err_syn0 = 0x2304c,
+ .trp_ecc_db_err_syn0 = 0x20370,
+ .trp_ecc_error_cntr_clear = 0x20440,
+ .trp_interrupt_0_status = 0x20480,
+ .trp_interrupt_0_clear = 0x20484,
+ .trp_interrupt_0_enable = 0x20488,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x3000c,
+ .cmn_interrupt_0_enable = 0x3001c,
+ .cmn_interrupt_2_enable = 0x3003c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x40000,
+ .drp_ecc_error_cntr_clear = 0x40004,
+ .drp_interrupt_status = 0x41000,
+ .drp_interrupt_clear = 0x41008,
+ .drp_interrupt_enable = 0x4100c,
+ .drp_ecc_error_status0 = 0x42044,
+ .drp_ecc_error_status1 = 0x42048,
+ .drp_ecc_sb_err_syn0 = 0x4204c,
+ .drp_ecc_db_err_syn0 = 0x42070,
+};
+
+static const struct llcc_edac_reg sm8450_edac_reg = {
+ .trp_ecc_error_status0 = 0x20344,
+ .trp_ecc_error_status1 = 0x20348,
+ .trp_ecc_sb_err_syn0 = 0x2034c,
+ .trp_ecc_db_err_syn0 = 0x20370,
+ .trp_ecc_error_cntr_clear = 0x20440,
+ .trp_interrupt_0_status = 0x20480,
+ .trp_interrupt_0_clear = 0x20484,
+ .trp_interrupt_0_enable = 0x20488,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x3400c,
+ .cmn_interrupt_0_enable = 0x3401c,
+ .cmn_interrupt_2_enable = 0x3403c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x50000,
+ .drp_ecc_error_cntr_clear = 0x50004,
+ .drp_interrupt_status = 0x50020,
+ .drp_interrupt_clear = 0x50028,
+ .drp_interrupt_enable = 0x5002c,
+ .drp_ecc_error_status0 = 0x520f4,
+ .drp_ecc_error_status1 = 0x520f8,
+ .drp_ecc_sb_err_syn0 = 0x520fc,
+ .drp_ecc_db_err_syn0 = 0x52120,
+};
+
static const u32 llcc_v1_2_reg_offset[] = {
[LLCC_COMMON_HW_INFO] = 0x00030000,
[LLCC_COMMON_STATUS0] = 0x0003000c,
@@ -311,6 +366,7 @@ static const struct qcom_llcc_config sc7180_cfg = {
.size = ARRAY_SIZE(sc7180_data),
.need_llcc_cfg = true,
.reg_offset = llcc_v1_2_reg_offset,
+ .edac_reg = &common_edac_reg,
};
static const struct qcom_llcc_config sc7280_cfg = {
@@ -318,6 +374,7 @@ static const struct qcom_llcc_config sc7280_cfg = {
.size = ARRAY_SIZE(sc7280_data),
.need_llcc_cfg = true,
.reg_offset = llcc_v1_2_reg_offset,
+ .edac_reg = &common_edac_reg,
};
static const struct qcom_llcc_config sc8180x_cfg = {
@@ -339,6 +396,7 @@ static const struct qcom_llcc_config sdm845_cfg = {
.size = ARRAY_SIZE(sdm845_data),
.need_llcc_cfg = false,
.reg_offset = llcc_v1_2_reg_offset,
+ .edac_reg = &common_edac_reg,
};
static const struct qcom_llcc_config sm6350_cfg = {
@@ -346,6 +404,7 @@ static const struct qcom_llcc_config sm6350_cfg = {
.size = ARRAY_SIZE(sm6350_data),
.need_llcc_cfg = true,
.reg_offset = llcc_v1_2_reg_offset,
+ .edac_reg = &common_edac_reg,
};
static const struct qcom_llcc_config sm8150_cfg = {
@@ -353,6 +412,7 @@ static const struct qcom_llcc_config sm8150_cfg = {
.size = ARRAY_SIZE(sm8150_data),
.need_llcc_cfg = true,
.reg_offset = llcc_v1_2_reg_offset,
+ .edac_reg = &common_edac_reg,
};
static const struct qcom_llcc_config sm8250_cfg = {
@@ -360,6 +420,7 @@ static const struct qcom_llcc_config sm8250_cfg = {
.size = ARRAY_SIZE(sm8250_data),
.need_llcc_cfg = true,
.reg_offset = llcc_v1_2_reg_offset,
+ .edac_reg = &common_edac_reg,
};
static const struct qcom_llcc_config sm8350_cfg = {
@@ -367,6 +428,7 @@ static const struct qcom_llcc_config sm8350_cfg = {
.size = ARRAY_SIZE(sm8350_data),
.need_llcc_cfg = true,
.reg_offset = llcc_v1_2_reg_offset,
+ .edac_reg = &common_edac_reg,
};
static const struct qcom_llcc_config sm8450_cfg = {
@@ -374,6 +436,7 @@ static const struct qcom_llcc_config sm8450_cfg = {
.size = ARRAY_SIZE(sm8450_data),
.need_llcc_cfg = true,
.reg_offset = llcc_v21_reg_offset,
+ .edac_reg = &sm8450_edac_reg,
};
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
@@ -774,6 +837,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->cfg = llcc_cfg;
drv_data->cfg_size = sz;
+ drv_data->edac_reg = cfg->edac_reg;
mutex_init(&drv_data->lock);
platform_set_drvdata(pdev, drv_data);
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index a48778e1a4ee..7a5c7705f86f 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -149,7 +149,7 @@ struct pci_epc {
/* mutex to protect against concurrent access of EP controller */
struct mutex lock;
unsigned long function_num_map;
- struct atomic_notifier_head notifier;
+ struct blocking_notifier_head notifier;
};
/**
@@ -195,7 +195,13 @@ static inline void *epc_get_drvdata(struct pci_epc *epc)
static inline int
pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb)
{
- return atomic_notifier_chain_register(&epc->notifier, nb);
+ return blocking_notifier_chain_register(&epc->notifier, nb);
+}
+
+static inline int
+pci_epc_unregister_notifier(struct pci_epc *epc, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&epc->notifier, nb);
}
struct pci_epc *
@@ -209,7 +215,9 @@ void pci_epc_destroy(struct pci_epc *epc);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_linkdown(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
+void pci_epc_bme_notify(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 009a07147c61..e03c57129ed5 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -20,6 +20,8 @@ enum pci_epc_interface_type;
enum pci_notify_event {
CORE_INIT,
LINK_UP,
+ LINK_DOWN,
+ BME,
};
enum pci_barno {
@@ -84,7 +86,7 @@ struct pci_epf_ops {
* @id_table: identifies EPF devices for probing
*/
struct pci_epf_driver {
- int (*probe)(struct pci_epf *epf);
+ int (*probe)(struct pci_epf *epf, const struct pci_epf_device_id *id);
void (*remove)(struct pci_epf *epf);
struct device_driver driver;
@@ -126,6 +128,7 @@ struct pci_epf_bar {
* @epc: the EPC device to which this EPF device is bound
* @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
+ * @id: Pointer to the EPF device ID
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
* @nb: notifier block to notify EPF of any EPC events (like linkup)
* @lock: mutex to protect pci_epf_ops
@@ -153,6 +156,7 @@ struct pci_epf {
struct pci_epc *epc;
struct pci_epf *epf_pf;
struct pci_epf_driver *driver;
+ const struct pci_epf_device_id *id;
struct list_head list;
struct notifier_block nb;
/* mutex to protect against concurrent access of pci_epf_ops */
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 9ed5384c5ca1..d12ec3200d6e 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -57,9 +57,6 @@ struct llcc_slice_desc {
/**
* struct llcc_edac_reg_data - llcc edac registers data for each error type
* @name: Name of the error
- * @synd_reg: Syndrome register address
- * @count_status_reg: Status register address to read the error count
- * @ways_status_reg: Status register address to read the error ways
* @reg_cnt: Number of registers
* @count_mask: Mask value to get the error count
* @ways_mask: Mask value to get the error ways
@@ -68,9 +65,6 @@ struct llcc_slice_desc {
*/
struct llcc_edac_reg_data {
char *name;
- u64 synd_reg;
- u64 count_status_reg;
- u64 ways_status_reg;
u32 reg_cnt;
u32 count_mask;
u32 ways_mask;
@@ -78,6 +72,34 @@ struct llcc_edac_reg_data {
u8 ways_shift;
};
+struct llcc_edac_reg {
+ /* LLCC TRP registers */
+ u32 trp_ecc_error_status0;
+ u32 trp_ecc_error_status1;
+ u32 trp_ecc_sb_err_syn0;
+ u32 trp_ecc_db_err_syn0;
+ u32 trp_ecc_error_cntr_clear;
+ u32 trp_interrupt_0_status;
+ u32 trp_interrupt_0_clear;
+ u32 trp_interrupt_0_enable;
+
+ /* LLCC Common registers */
+ u32 cmn_status0;
+ u32 cmn_interrupt_0_enable;
+ u32 cmn_interrupt_2_enable;
+
+ /* LLCC DRP registers */
+ u32 drp_ecc_error_cfg;
+ u32 drp_ecc_error_cntr_clear;
+ u32 drp_interrupt_status;
+ u32 drp_interrupt_clear;
+ u32 drp_interrupt_enable;
+ u32 drp_ecc_error_status0;
+ u32 drp_ecc_error_status1;
+ u32 drp_ecc_sb_err_syn0;
+ u32 drp_ecc_db_err_syn0;
+};
+
/**
* struct llcc_drv_data - Data associated with the llcc driver
* @regmap: regmap associated with the llcc device
@@ -96,6 +118,7 @@ struct llcc_drv_data {
struct regmap *regmap;
struct regmap *bcast_regmap;
const struct llcc_slice_config *cfg;
+ const struct llcc_edac_reg *edac_reg;
struct mutex lock;
u32 cfg_size;
u32 max_slices;