diff options
author | Kalle Valo <kvalo@codeaurora.org> | 2019-12-20 16:15:46 +0200 |
---|---|---|
committer | Kalle Valo <kvalo@codeaurora.org> | 2019-12-20 16:15:46 +0200 |
commit | dc3ad30e8022e5f833d4ae26935f03aff6d1f733 (patch) | |
tree | 95b23492c03de22b7c147a676fd6e3f2a60c625c | |
parent | 8753c0269822a4e8a89ef28ba733a97bb2387f80 (diff) | |
parent | 782f5184d4809ad6896b430e4d2b0ca1b2bccc6b (diff) |
Merge branch 'mhi-upstream-wip' of https://git.linaro.org/people/manivannan.sadhasivam/linux into ath11k-qca6390-mhiath11k-qca6390-mhi-201912201415
-rw-r--r-- | Documentation/index.rst | 1 | ||||
-rw-r--r-- | Documentation/mhi/index.rst | 18 | ||||
-rw-r--r-- | Documentation/mhi/mhi.rst | 218 | ||||
-rw-r--r-- | Documentation/mhi/topology.rst | 60 | ||||
-rw-r--r-- | MAINTAINERS | 7 | ||||
-rw-r--r-- | drivers/bus/Kconfig | 1 | ||||
-rw-r--r-- | drivers/bus/Makefile | 3 | ||||
-rw-r--r-- | drivers/bus/mhi/Kconfig | 15 | ||||
-rw-r--r-- | drivers/bus/mhi/Makefile | 7 | ||||
-rw-r--r-- | drivers/bus/mhi/controllers/Kconfig | 13 | ||||
-rw-r--r-- | drivers/bus/mhi/controllers/Makefile | 1 | ||||
-rw-r--r-- | drivers/bus/mhi/controllers/mhi_qcom.c | 471 | ||||
-rw-r--r-- | drivers/bus/mhi/controllers/mhi_qcom.h | 25 | ||||
-rw-r--r-- | drivers/bus/mhi/core/Makefile | 3 | ||||
-rw-r--r-- | drivers/bus/mhi/core/mhi_boot.c | 531 | ||||
-rw-r--r-- | drivers/bus/mhi/core/mhi_init.c | 1255 | ||||
-rw-r--r-- | drivers/bus/mhi/core/mhi_internal.h | 682 | ||||
-rw-r--r-- | drivers/bus/mhi/core/mhi_main.c | 1372 | ||||
-rw-r--r-- | drivers/bus/mhi/core/mhi_pm.c | 1062 | ||||
-rw-r--r-- | include/linux/mhi.h | 650 | ||||
-rw-r--r-- | include/linux/mod_devicetable.h | 12 |
21 files changed, 6407 insertions, 0 deletions
diff --git a/Documentation/index.rst b/Documentation/index.rst index e99d0bd2589d..edc9b211bbff 100644 --- a/Documentation/index.rst +++ b/Documentation/index.rst @@ -133,6 +133,7 @@ needed). misc-devices/index mic/index scheduler/index + mhi/index Architecture-agnostic documentation ----------------------------------- diff --git a/Documentation/mhi/index.rst b/Documentation/mhi/index.rst new file mode 100644 index 000000000000..1d8dec302780 --- /dev/null +++ b/Documentation/mhi/index.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=== +MHI +=== + +.. toctree:: + :maxdepth: 1 + + mhi + topology + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/mhi/mhi.rst b/Documentation/mhi/mhi.rst new file mode 100644 index 000000000000..718dbbdc7a04 --- /dev/null +++ b/Documentation/mhi/mhi.rst @@ -0,0 +1,218 @@ +.. SPDX-License-Identifier: GPL-2.0 + +========================== +MHI (Modem Host Interface) +========================== + +This document provides information about the MHI protocol. + +Overview +======== + +MHI is a protocol developed by Qualcomm Innovation Center, Inc., It is used +by the host processors to control and communicate with modem devices over high +speed peripheral buses or shared memory. Even though MHI can be easily adapted +to any peripheral buses, it is primarily used with PCIe based devices. MHI +provides logical channels over the physical buses and allows transporting the +modem protocols, such as IP data packets, modem control messages, and +diagnostics over at least one of those logical channels. Also, the MHI +protocol provides data acknowledgment feature and manages the power state of the +modems via one or more logical channels. + +MHI Internals +============= + +MMIO +---- + +MMIO (Memory mapped IO) consists of a set of registers in the device hardware, +which are mapped to the host memory space by the peripheral buses like PCIe. +Following are the major components of MMIO register space: + +MHI control registers: Access to MHI configurations registers + +MHI BHI registers: BHI (Boot Host Interface) registers are used by the host +for downloading the firmware to the device before MHI initialization. + +Channel Doorbell array: Channel Doorbell (DB) registers used by the host to +notify the device when there is new work to do. + +Event Doorbell array: Associated with event context array, the Event Doorbell +(DB) registers are used by the host to notify the device when new events are +available. + +Debug registers: A set of registers and counters used by the device to expose +debugging information like performance, functional, and stability to the host. + +Data structures +--------------- + +All data structures used by MHI are in the host system memory. Using the +physical interface, the device accesses those data structures. MHI data +structures and data buffers in the host system memory regions are mapped for +the device. + +Channel context array: All channel configurations are organized in channel +context data array. + +Transfer rings: Used by the host to schedule work items for a channel. The +transfer rings are organized as a circular queue of Transfer Descriptors (TD). + +Event context array: All event configurations are organized in the event context +data array. + +Event rings: Used by the device to send completion and state transition messages +to the host + +Command context array: All command configurations are organized in command +context data array. + +Command rings: Used by the host to send MHI commands to the device. The command +rings are organized as a circular queue of Command Descriptors (CD). + +Channels +-------- + +MHI channels are logical, unidirectional data pipes between a host and a device. +The concept of channels in MHI is similar to endpoints in USB. MHI supports up +to 256 channels. However, specific device implementations may support less than +the maximum number of channels allowed. + +Two unidirectional channels with their associated transfer rings form a +bidirectional data pipe, which can be used by the upper-layer protocols to +transport application data packets (such as IP packets, modem control messages, +diagnostics messages, and so on). Each channel is associated with a single +transfer ring. + +Transfer rings +-------------- + +Transfers between the host and device are organized by channels and defined by +Transfer Descriptors (TD). TDs are managed through transfer rings, which are +defined for each channel between the device and host and reside in the host +memory. TDs consist of one or more ring elements (or transfer blocks):: + + [Read Pointer (RP)] ----------->[Ring Element] } TD + [Write Pointer (WP)]- [Ring Element] + - [Ring Element] + --------->[Ring Element] + [Ring Element] + +Below is the basic usage of transfer rings: + +* Host allocates memory for transfer ring. +* Host sets the base pointer, read pointer, and write pointer in corresponding + channel context. +* Ring is considered empty when RP == WP. +* Ring is considered full when WP + 1 == RP. +* RP indicates the next element to be serviced by the device. +* When the host has a new buffer to send, it updates the ring element with + buffer information, increments the WP to the next element and rings the + associated channel DB. + +Event rings +----------- + +Events from the device to host are organized in event rings and defined by Event +Descriptors (ED). Event rings are used by the device to report events such as +data transfer completion status, command completion status, and state changes +to the host. Event rings are the array of EDs that resides in the host +memory. EDs consist of one or more ring elements (or transfer blocks):: + + [Read Pointer (RP)] ----------->[Ring Element] } ED + [Write Pointer (WP)]- [Ring Element] + - [Ring Element] + --------->[Ring Element] + [Ring Element] + +Below is the basic usage of event rings: + +* Host allocates memory for event ring. +* Host sets the base pointer, read pointer, and write pointer in corresponding + channel context. +* Both host and device has a local copy of RP, WP. +* Ring is considered empty (no events to service) when WP + 1 == RP. +* Ring is considered full of events when RP == WP. +* When there is a new event the device needs to send, the device updates ED + pointed by RP, increments the RP to the next element and triggers the + interrupt. + +Ring Element +------------ + +A Ring Element is a data structure used to transfer a single block +of data between the host and the device. Transfer ring element types contain a +single buffer pointer, the size of the buffer, and additional control +information. Other ring element types may only contain control and status +information. For single buffer operations, a ring descriptor is composed of a +single element. For large multi-buffer operations (such as scatter and gather), +elements can be chained to form a longer descriptor. + +MHI Operations +============== + +MHI States +---------- + +MHI_STATE_RESET +~~~~~~~~~~~~~~~ +MHI is in reset state after power-up or hardware reset. The host is not allowed +to access device MMIO register space. + +MHI_STATE_READY +~~~~~~~~~~~~~~~ +MHI is ready for initialization. The host can start MHI initialization by +programming MMIO registers. + +MHI_STATE_M0 +~~~~~~~~~~~~ +MHI is running and operational in the device. The host can start channels by +issuing channel start command. + +MHI_STATE_M1 +~~~~~~~~~~~~ +MHI operation is suspended by the device. This state is entered when the +device detects inactivity at the physical interface within a preset time. + +MHI_STATE_M2 +~~~~~~~~~~~~ +MHI is in low power state. MHI operation is suspended and the device may +enter lower power mode. + +MHI_STATE_M3 +~~~~~~~~~~~~ +MHI operation stopped by the host. This state is entered when the host suspends +MHI operation. + +MHI Initialization +------------------ + +After system boots, the device is enumerated over the physical interface. +In the case of PCIe, the device is enumerated and assigned BAR-0 for +the device's MMIO register space. To initialize the MHI in a device, +the host performs the following operations: + +* Allocates the MHI context for event, channel and command arrays. +* Initializes the context array, and prepares interrupts. +* Waits until the device enters READY state. +* Programs MHI MMIO registers and sets device into MHI_M0 state. +* Waits for the device to enter M0 state. + +MHI Data Transfer +----------------- + +MHI data transfer is initiated by the host to transfer data to the device. +Following are the sequence of operations performed by the host to transfer +data to device: + +* Host prepares TD with buffer information. +* Host increments the WP of the corresponding channel transfer ring. +* Host rings the channel DB register. +* Device wakes up to process the TD. +* Device generates a completion event for the processed TD by updating ED. +* Device increments the RP of the corresponding event ring. +* Device triggers IRQ to wake up the host. +* Host wakes up and checks the event ring for completion event. +* Host updates the WP of the corresponding event ring to indicate that the + data transfer has been completed successfully. + diff --git a/Documentation/mhi/topology.rst b/Documentation/mhi/topology.rst new file mode 100644 index 000000000000..90d80a7f116d --- /dev/null +++ b/Documentation/mhi/topology.rst @@ -0,0 +1,60 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============ +MHI Topology +============ + +This document provides information about the MHI topology modeling and +representation in the kernel. + +MHI Controller +-------------- + +MHI controller driver manages the interaction with the MHI client devices +such as the external modems and WiFi chipsets. It is also the MHI bus master +which is in charge of managing the physical link between the host and device. +It is however not involved in the actual data transfer as the data transfer +is taken care by the physical bus such as PCIe. Each controller driver exposes +channels and events based on the client device type. + +Below are the roles of the MHI controller driver: + +* Turns on the physical bus and establishes the link to the device +* Configures IRQs, SMMU, and IOMEM +* Allocates struct mhi_controller and registers with the MHI bus framework + with channel and event configurations using mhi_register_controller. +* Initiates power on and shutdown sequence +* Initiates suspend and resume power management operations of the device. + +MHI Device +---------- + +MHI device is the logical device which binds to a maximum of two MHI channels +for bi-directional communication. Once MHI is in powered on state, the MHI +core will create MHI devices based on the channel configuration exposed +by the controller. There can be a single MHI device for each channel or for a +couple of channels. + +Each supported device is enumerated in:: + + /sys/bus/mhi/devices/ + +MHI Driver +---------- + +MHI driver is the client driver which binds to one or more MHI devices. The MHI +driver sends and receives the upper-layer protocol packets like IP packets, +modem control messages, and diagnostics messages over MHI. The MHI core will +bind the MHI devices to the MHI driver. + +Each supported driver is enumerated in:: + + /sys/bus/mhi/drivers/ + +Below are the roles of the MHI driver: + +* Registers the driver with the MHI bus framework using mhi_driver_register. +* Prepares the device for transfer by calling mhi_prepare_for_transfer. +* Initiates data transfer by calling mhi_queue_transfer. +* Once the data transfer is finished, calls mhi_unprepare_from_transfer to + end data transfer. diff --git a/MAINTAINERS b/MAINTAINERS index 09a599fa3e1a..7113cd900c51 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10772,6 +10772,13 @@ M: Vladimir Vid <vladimir.vid@sartura.hr> S: Maintained F: arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts +MHI BUS +M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> +S: Maintained +F: drivers/bus/mhi/ +F: include/linux/mhi.h +F: Documentation/mhi/ + MICROBLAZE ARCHITECTURE M: Michal Simek <monstr@monstr.eu> W: http://www.monstr.eu/fdt/ diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 50200d1c06ea..383934e54786 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -202,5 +202,6 @@ config DA8XX_MSTPRI peripherals. source "drivers/bus/fsl-mc/Kconfig" +source "drivers/bus/mhi/Kconfig" endmenu diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 1320bcf9fa9d..05f32cd694a4 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -34,3 +34,6 @@ obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o + +# MHI +obj-$(CONFIG_MHI_BUS) += mhi/ diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig new file mode 100644 index 000000000000..debcb2260c83 --- /dev/null +++ b/drivers/bus/mhi/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# MHI bus +# +# Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +# + +config MHI_BUS + tristate "Modem Host Interface (MHI) bus" + help + Driver for MHI protocol. Modem Host Interface (MHI) is a communication + protocol used by the host processors to control and communicate with + modem devices over a high speed peripheral bus or shared memory. + +source "drivers/bus/mhi/controllers/Kconfig" diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile new file mode 100644 index 000000000000..3458ba3658e6 --- /dev/null +++ b/drivers/bus/mhi/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the MHI stack +# + +# core layer +obj-y += core/ +obj-y += controllers/
\ No newline at end of file diff --git a/drivers/bus/mhi/controllers/Kconfig b/drivers/bus/mhi/controllers/Kconfig new file mode 100644 index 000000000000..c83933c29047 --- /dev/null +++ b/drivers/bus/mhi/controllers/Kconfig @@ -0,0 +1,13 @@ +menu "MHI controllers" + +config MHI_QCOM + tristate "MHI QCOM Controller support" + depends on MHI_BUS + help + If you say yes to this option, MHI controller support for QCOM modem + chipsets will be enabled. QCOM PCIe based modems uses MHI as the + communication protocol. MHI controller driver is the bus master for + such modems. As the bus master driver, it oversees power management + operations such as suspend, resume, powering on and off the device. + +endmenu diff --git a/drivers/bus/mhi/controllers/Makefile b/drivers/bus/mhi/controllers/Makefile new file mode 100644 index 000000000000..292f696c7789 --- /dev/null +++ b/drivers/bus/mhi/controllers/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MHI_QCOM) += mhi_qcom.o diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c new file mode 100644 index 000000000000..d8cddedeb9bf --- /dev/null +++ b/drivers/bus/mhi/controllers/mhi_qcom.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/list.h> +#include <linux/memblock.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include "mhi_qcom.h" + +struct firmware_info { + u32 dev_id; + const char *fw_image; + const char *edl_image; +}; + +static const struct firmware_info firmware_table[] = { + {.dev_id = 0x305, .fw_image = "sdx50m/sbl1.mbn"}, + {.dev_id = 0x304, .fw_image = "sbl.mbn", .edl_image = "edl.mbn"}, + /* default, set to debug.mbn */ + {.fw_image = "debug.mbn"}, +}; + +static struct mhi_channel_config qcom_mhi_channels[] = { + { + .num = 0, + .name = "LOOPBACK", + .num_elements = 32, + .event_ring = 0, + .dir = DMA_TO_DEVICE, + .ee = MHI_EE_AMSS, + .pollcfg = 0, + .data_type = MHI_BUF_RAW, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + .auto_start = false, + }, + { + .num = 1, + .name = "LOOPBACK", + .num_elements = 32, + .event_ring = 0, + .dir = DMA_FROM_DEVICE, + .ee = MHI_EE_AMSS, + .pollcfg = 0, + .data_type = MHI_BUF_RAW, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + .auto_start = false, + }, +}; + +static struct mhi_event_config qcom_mhi_events[] = { + { + .num_elements = 32, + .irq_moderation_ms = 0, + .irq = 0, + .channel = 0, + .mode = MHI_DB_BRST_DISABLE, + .data_type = MHI_ER_CTRL, + .hardware_event = false, + .client_managed = false, + .offload_channel = false, + }, +}; + +static struct mhi_controller_config mhi_qcom_config = { + .max_channels = 128, + .timeout_ms = 1000, + .use_bounce_buf = false, + .buf_len = 0, + .num_channels = ARRAY_SIZE(qcom_mhi_channels), + .ch_cfg = qcom_mhi_channels, + .num_events = ARRAY_SIZE(qcom_mhi_events), + .event_cfg = qcom_mhi_events, +}; + +static void qcom_mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl, + struct mhi_qcom_dev *qdev) +{ + struct pci_dev *pci_dev = qdev->pci_dev; + + pci_free_irq_vectors(pci_dev); + kfree(mhi_cntrl->irq); + mhi_cntrl->irq = NULL; + iounmap(mhi_cntrl->regs); + mhi_cntrl->regs = NULL; + pci_clear_master(pci_dev); + pci_release_region(pci_dev, qdev->bars); + pci_disable_device(pci_dev); +} + +static int qcom_mhi_init_pci_dev(struct mhi_controller *mhi_cntrl, + struct mhi_qcom_dev *qdev) +{ + struct pci_dev *pci_dev = qdev->pci_dev; + int ret; + resource_size_t start, len; + int i; + + qdev->bars = QCOM_PCI_BAR_NUM; + ret = pci_assign_resource(pci_dev, qdev->bars); + if (ret) + return ret; + + ret = pci_enable_device(pci_dev); + if (ret) + return ret; + + ret = pci_request_region(pci_dev, qdev->bars, "mhi"); + if (ret) + goto error_request_region; + + pci_set_master(pci_dev); + + ret = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64)); + if (ret) + goto error_dma_mask; + + ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); + if (ret) + goto error_dma_mask; + + start = pci_resource_start(pci_dev, qdev->bars); + len = pci_resource_len(pci_dev, qdev->bars); + mhi_cntrl->regs = ioremap_nocache(start, len); + if (!mhi_cntrl->regs) + goto error_ioremap; + + ret = pci_alloc_irq_vectors(pci_dev, mhi_cntrl->nr_irqs_req, + mhi_cntrl->nr_irqs_req, PCI_IRQ_MSI); + if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->nr_irqs_req) + goto error_req_irq; + + mhi_cntrl->nr_irqs = ret; + mhi_cntrl->irq = kmalloc_array(mhi_cntrl->nr_irqs, + sizeof(*mhi_cntrl->irq), GFP_KERNEL); + if (!mhi_cntrl->irq) { + ret = -ENOMEM; + goto error_alloc_irq_vec; + } + + for (i = 0; i < mhi_cntrl->nr_irqs; i++) { + mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i); + if (mhi_cntrl->irq[i] < 0) { + ret = mhi_cntrl->irq[i]; + goto error_get_irq_vec; + } + } + + /* Configure runtime PM */ + pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_QCOM_AUTOSUSPEND_MS); + pm_runtime_use_autosuspend(&pci_dev->dev); + pm_suspend_ignore_children(&pci_dev->dev, true); + + return 0; + +error_get_irq_vec: + kfree(mhi_cntrl->irq); + mhi_cntrl->irq = NULL; + +error_alloc_irq_vec: + pci_free_irq_vectors(pci_dev); + +error_req_irq: + iounmap(mhi_cntrl->regs); + +error_ioremap: +error_dma_mask: + pci_clear_master(pci_dev); + pci_release_region(pci_dev, qdev->bars); + +error_request_region: + pci_disable_device(pci_dev); + + return ret; +} + +static int qcom_mhi_runtime_suspend(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct mhi_qcom_dev *qdev = pci_get_drvdata(pci_dev); + struct mhi_controller *mhi_cntrl = qdev->mhi_cntrl; + int ret = 0; + + dev_info(mhi_cntrl->dev, "Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_pm_suspend(mhi_cntrl); + + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} + +static int qcom_mhi_runtime_idle(struct device *dev) +{ + /* + * In MHI power management, the device will go to runtime suspend only + * after entering MHI State M2, which is handled by the MHI core. So + * return -EBUSY here to indicate the PM framework that the device is + * not ready to be suspended even if the usage count is 0. + */ + return -EBUSY; +} + +static int qcom_mhi_runtime_resume(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct mhi_qcom_dev *qdev = pci_get_drvdata(pci_dev); + struct mhi_controller *mhi_cntrl = qdev->mhi_cntrl; + int ret = 0; + + dev_info(mhi_cntrl->dev, "Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + if (!qdev->powered_on) { + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + + /* enter M0 state */ + ret = mhi_pm_resume(mhi_cntrl); + + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} + +static int qcom_mhi_system_resume(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct mhi_qcom_dev *qdev = pci_get_drvdata(pci_dev); + struct mhi_controller *mhi_cntrl = qdev->mhi_cntrl; + int ret = 0; + + ret = qcom_mhi_runtime_resume(dev); + if (ret) { + dev_err(mhi_cntrl->dev, "Failed to resume link\n"); + } else { + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + + return ret; +} + +static int qcom_mhi_system_suspend(struct device *dev) +{ + /* If rpm status is still active then force suspend */ + if (!pm_runtime_status_suspended(dev)) + return qcom_mhi_runtime_suspend(dev); + + pm_runtime_set_suspended(dev); + pm_runtime_disable(dev); + + return 0; +} + +/* Checks if link is down */ +static int qcom_mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_qcom_dev *qdev = priv; + u16 dev_id; + int ret; + + /* Try reading device id, if dev id don't match then link is down */ + ret = pci_read_config_word(qdev->pci_dev, PCI_DEVICE_ID, &dev_id); + + return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0; +} + +static int qcom_mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_qcom_dev *qdev = priv; + struct device *dev = &qdev->pci_dev->dev; + + return pm_runtime_get(dev); +} + +static void qcom_mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_qcom_dev *qdev = priv; + struct device *dev = &qdev->pci_dev->dev; + + pm_runtime_put_noidle(dev); +} + +static void qcom_mhi_status_cb(struct mhi_controller *mhi_cntrl, + void *priv, enum mhi_callback reason) +{ + struct mhi_qcom_dev *qdev = priv; + struct device *dev = &qdev->pci_dev->dev; + + if (reason == MHI_CB_IDLE) { + pm_runtime_mark_last_busy(dev); + pm_request_autosuspend(dev); + } +} + +static struct mhi_controller *qcom_mhi_register_controller(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl; + const struct firmware_info *firmware_info; + int ret, i; + + mhi_cntrl = mhi_alloc_controller(); + if (!mhi_cntrl) + return ERR_PTR(-ENOMEM); + + mhi_cntrl->dev = &pci_dev->dev; + mhi_cntrl->dev_id = pci_dev->device; + mhi_cntrl->bus_id = pci_dev->bus->number; + + /* + * Covers the entire possible physical ram region. Remote side is + * going to calculate a size of this range, so subtract 1 to prevent + * rollover. + */ + mhi_cntrl->iova_start = 0; + mhi_cntrl->iova_stop = U64_MAX - 1; + + /* Setup MHI power management callbacks */ + mhi_cntrl->status_cb = qcom_mhi_status_cb; + mhi_cntrl->runtime_get = qcom_mhi_runtime_get; + mhi_cntrl->runtime_put = qcom_mhi_runtime_put; + mhi_cntrl->link_status = qcom_mhi_link_status; + + mhi_cntrl->name = pci_name(pci_dev); + + ret = mhi_register_controller(mhi_cntrl, &mhi_qcom_config); + if (ret) + goto error_register; + + for (i = 0; i < ARRAY_SIZE(firmware_table); i++) { + firmware_info = firmware_table + i; + if (mhi_cntrl->dev_id == firmware_info->dev_id) + break; + } + + mhi_cntrl->fw_image = firmware_info->fw_image; + mhi_cntrl->edl_image = firmware_info->edl_image; + + return mhi_cntrl; + +error_register: + mhi_free_controller(mhi_cntrl); + + return ERR_PTR(-EINVAL); +} + +int mhi_qcom_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id) +{ + struct mhi_controller *mhi_cntrl; + struct mhi_qcom_dev *qdev; + int ret; + + qdev = kzalloc(sizeof(*qdev), GFP_KERNEL); + if (!qdev) + return -ENOMEM; + + qdev->pci_dev = pci_dev; + pci_set_drvdata(pci_dev, qdev); + + mhi_cntrl = qcom_mhi_register_controller(pci_dev); + if (IS_ERR(mhi_cntrl)) { + mhi_free_controller(mhi_cntrl); + pci_set_drvdata(pci_dev, NULL); + return PTR_ERR(mhi_cntrl); + } + + ret = qcom_mhi_init_pci_dev(mhi_cntrl, qdev); + if (ret) + goto error_init_pci; + + /* Start power up sequence */ + ret = mhi_async_power_up(mhi_cntrl); + if (ret) + goto error_power_up; + + mhi_controller_set_devdata(mhi_cntrl, qdev); + qdev->powered_on = true; + qdev->mhi_cntrl = mhi_cntrl; + + /* Decrement usage count as recommended by PCI framework */ + pm_runtime_put_noidle(&pci_dev->dev); + + /* Mask last_busy so that runtime PM won't issue suspend immediately */ + pm_runtime_mark_last_busy(&pci_dev->dev); + + /* Allow runtime PM */ + pm_runtime_allow(&pci_dev->dev); + + return 0; + +error_power_up: + qcom_mhi_deinit_pci_dev(mhi_cntrl, qdev); + +error_init_pci: + mhi_unregister_controller(mhi_cntrl); + mhi_free_controller(mhi_cntrl); + pci_set_drvdata(pci_dev, NULL); + + return ret; +} + +static void mhi_qcom_pci_remove(struct pci_dev *pci_dev) +{ + struct mhi_qcom_dev *qdev = pci_get_drvdata(pci_dev); + struct mhi_controller *mhi_cntrl; + + if (!qdev) + return; + + mhi_cntrl = qdev->mhi_cntrl; + mhi_power_down(mhi_cntrl, true); + mhi_unregister_controller(mhi_cntrl); + mhi_free_controller(mhi_cntrl); + qcom_mhi_deinit_pci_dev(mhi_cntrl, qdev); +} + +static const struct dev_pm_ops pm_ops = { + SET_RUNTIME_PM_OPS(qcom_mhi_runtime_suspend, + qcom_mhi_runtime_resume, + qcom_mhi_runtime_idle) + SET_SYSTEM_SLEEP_PM_OPS(qcom_mhi_system_suspend, qcom_mhi_system_resume) +}; + +static struct pci_device_id mhi_qcom_pci_device_id[] = { + { PCI_DEVICE(QCOM_PCI_VENDOR_ID, 0x0300) }, + { PCI_DEVICE(QCOM_PCI_VENDOR_ID, 0x0301) }, + { PCI_DEVICE(QCOM_PCI_VENDOR_ID, 0x0302) }, + { PCI_DEVICE(QCOM_PCI_VENDOR_ID, 0x0303) }, + { PCI_DEVICE(QCOM_PCI_VENDOR_ID, 0x0304) }, + { PCI_DEVICE(QCOM_PCI_VENDOR_ID, 0x0305) }, + { PCI_DEVICE(QCOM_PCI_VENDOR_ID, 0x1101) }, + { /* sentinel */ }, +}; + +static struct pci_driver mhi_qcom_pci_driver = { + .name = "mhi_qcom", + .id_table = mhi_qcom_pci_device_id, + .probe = mhi_qcom_pci_probe, + .remove = mhi_qcom_pci_remove, + .driver = { + .pm = &pm_ops + } +}; + +module_pci_driver(mhi_qcom_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("MHI_CORE"); +MODULE_DESCRIPTION("MHI Qcom Controller Driver"); diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h new file mode 100644 index 000000000000..e060dd923b64 --- /dev/null +++ b/drivers/bus/mhi/controllers/mhi_qcom.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + */ +#ifndef _MHI_QCOM_ +#define _MHI_QCOM_ + +#define QCOM_PCI_VENDOR_ID 0x17cb +#define QCOM_PCI_BAR_NUM 0 + +#define MHI_QCOM_AUTOSUSPEND_MS 1000 + +struct mhi_qcom_dev { + struct pci_dev *pci_dev; + struct mhi_controller *mhi_cntrl; + int bars; + bool powered_on; +}; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl); +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id); + +#endif /* _MHI_QCOM_ */ diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile new file mode 100644 index 000000000000..3988de86885f --- /dev/null +++ b/drivers/bus/mhi/core/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_MHI_BUS) := mhi.o + +mhi-y := mhi_init.o mhi_main.o mhi_pm.o mhi_boot.o diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c new file mode 100644 index 000000000000..02af66a65d44 --- /dev/null +++ b/drivers/bus/mhi/core/mhi_boot.c @@ -0,0 +1,531 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/firmware.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include "mhi_internal.h" + +/* Setup RDDM vector table for RDDM buffer transfer */ +static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) +{ + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + unsigned int i; + + for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = mhi_buf->len; + } +} + +/* Collect RDDM buffer during kernel panic */ +static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) +{ + int ret; + struct mhi_buf *mhi_buf; + u32 sequence_id; + u32 rx_status; + enum mhi_ee_type ee; + struct image_info *rddm_image = mhi_cntrl->rddm_image; + const u32 delayus = 100; + u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + void __iomem *base = mhi_cntrl->bhie; + + dev_info(mhi_cntrl->dev, + "Entered with pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* + * This should only be executing during a kernel panic, we expect all + * other cores to shutdown while we're collecting RDDM buffer. After + * returning from this function, we expect the device to reset. + * + * Normaly, we read/write pm_state only after grabbing the + * pm_lock, but since we're in a panic, let's skip it. + */ + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + return -EIO; + + /* + * There is no gurantee this state change would take effect since + * we're setting it w/o grabbing pm_lock + */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + /* update should take the effect immediately */ + smp_wmb(); + + /* Setup the RX vector table */ + mhi_rddm_prepare(mhi_cntrl, rddm_image); + mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1]; + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; + + if (unlikely(!sequence_id)) + sequence_id = 1; + + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status); + if (ret) + return -EIO; + + if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) + return 0; + + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); + + dev_err(mhi_cntrl->dev, "Did not complete RDDM transfer\n"); + dev_err(mhi_cntrl->dev, "Current EE:%s\n", TO_MHI_EXEC_STR(ee)); + dev_err(mhi_cntrl->dev, "RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret); + + return -EIO; +} + +/* Download RDDM image from device */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) +{ + void __iomem *base = mhi_cntrl->bhie; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + struct image_info *rddm_image = mhi_cntrl->rddm_image; + struct mhi_buf *mhi_buf; + int ret; + u32 rx_status; + u32 sequence_id; + + if (!rddm_image) + return -ENOMEM; + + if (in_panic) + return __mhi_download_rddm_in_panic(mhi_cntrl); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(mhi_cntrl->dev, + "MHI is not in valid state, pm_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + return -EIO; + } + + mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); + + /* Vector table is the last entry */ + mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1]; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + + sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + read_unlock_bh(pm_lock); + + /* Wait for the image download to complete */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status) || rx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} +EXPORT_SYMBOL_GPL(mhi_download_rddm_img); + +/* Download AMSS image to device */ +static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, + const struct mhi_buf *mhi_buf) +{ + void __iomem *base = mhi_cntrl->bhie; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + u32 tx_status, sequence_id; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); + + sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK; + mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, + BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, + sequence_id); + read_unlock_bh(pm_lock); + + /* Wait for the image download to complete */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_TXVECSTATUS_OFFS, + BHIE_TXVECSTATUS_STATUS_BMSK, + BHIE_TXVECSTATUS_STATUS_SHFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} + +/* Download SBL image to device */ +static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, + void *buf, + size_t size) +{ + u32 tx_status, val, session_id; + int i, ret; + void __iomem *base = mhi_cntrl->bhi; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + dma_addr_t dma_addr = dma_map_single(mhi_cntrl->dev, buf, size, + DMA_TO_DEVICE); + struct { + char *name; + u32 offset; + } error_reg[] = { + { "ERROR_CODE", BHI_ERRCODE }, + { "ERROR_DBG1", BHI_ERRDBG1 }, + { "ERROR_DBG2", BHI_ERRDBG2 }, + { "ERROR_DBG3", BHI_ERRDBG3 }, + { NULL }, + }; + + if (dma_mapping_error(mhi_cntrl->dev, dma_addr)) + return -ENOMEM; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + /* Start SBL download via BHI protocol */ + mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, + upper_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, + lower_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); + session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK; + mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id); + read_unlock_bh(pm_lock); + + /* Wait for the image download to complete */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, + BHI_STATUS_MASK, BHI_STATUS_SHIFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + goto invalid_pm_state; + + if (tx_status == BHI_STATUS_ERROR) { + dev_err(mhi_cntrl->dev, "Image transfer failed\n"); + read_lock_bh(pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + for (i = 0; error_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, base, + error_reg[i].offset, &val); + if (ret) + break; + dev_err(mhi_cntrl->dev, "reg:%s value:0x%x\n", + error_reg[i].name, val); + } + } + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + dma_unmap_single(mhi_cntrl->dev, dma_addr, size, DMA_TO_DEVICE); + + return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT; + +invalid_pm_state: + dma_unmap_single(mhi_cntrl->dev, dma_addr, size, DMA_TO_DEVICE); + + return -EIO; +} + +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info) +{ + int i; + struct mhi_buf *mhi_buf = image_info->mhi_buf; + + for (i = 0; i < image_info->entries; i++, mhi_buf++) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + + kfree(image_info->mhi_buf); + kfree(image_info); +} + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, + size_t alloc_size) +{ + size_t seg_size = mhi_cntrl->seg_len; + int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; + int i; + struct image_info *img_info; + struct mhi_buf *mhi_buf; + + img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); + if (!img_info) + return -ENOMEM; + + /* Allocate memory for entries */ + img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), + GFP_KERNEL); + if (!img_info->mhi_buf) + goto error_alloc_mhi_buf; + + /* Allocate and populate vector table */ + mhi_buf = img_info->mhi_buf; + for (i = 0; i < segments; i++, mhi_buf++) { + size_t vec_size = seg_size; + + /* Vector table is the last entry */ + if (i == segments - 1) + vec_size = sizeof(struct bhi_vec_entry) * i; + + mhi_buf->len = vec_size; + mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size, + &mhi_buf->dma_addr, + GFP_KERNEL); + if (!mhi_buf->buf) + goto error_alloc_segment; + } + + img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; + img_info->entries = segments; + *image_info = img_info; + + return 0; + +error_alloc_segment: + for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + +error_alloc_mhi_buf: + kfree(img_info); + + return -ENOMEM; +} + +static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, + const struct firmware *firmware, + struct image_info *img_info) +{ + size_t remainder = firmware->size; + size_t to_cpy; + const u8 *buf = firmware->data; + int i = 0; + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + + while (remainder) { + to_cpy = min(remainder, mhi_buf->len); + memcpy(mhi_buf->buf, buf, to_cpy); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = to_cpy; + + buf += to_cpy; + remainder -= to_cpy; + i++; + bhi_vec++; + mhi_buf++; + } +} + +void mhi_fw_load_worker(struct work_struct *work) +{ + int ret; + struct mhi_controller *mhi_cntrl; + const char *fw_name; + const struct firmware *firmware; + struct image_info *image_info; + void *buf; + size_t size; + + mhi_cntrl = container_of(work, struct mhi_controller, fw_worker); + + dev_info(mhi_cntrl->dev, "Waiting for device to enter PBL from: %s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_PBL(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(mhi_cntrl->dev, "Device MHI is not in valid state\n"); + return; + } + + /* If device is in pass through, we do not have to load firmware */ + if (mhi_cntrl->ee == MHI_EE_PTHRU) + return; + + fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? + mhi_cntrl->edl_image : mhi_cntrl->fw_image; + + if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || + !mhi_cntrl->seg_len))) { + dev_err(mhi_cntrl->dev, + "No firmware image defined or !sbl_size || !seg_len\n"); + return; + } + + ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev); + if (ret) { + dev_err(mhi_cntrl->dev, "Error loading firmware: %d\n", + ret); + return; + } + + size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; + + /* SBL size provided is maximum size, not necessarily the image size */ + if (size > firmware->size) + size = firmware->size; + + buf = kmemdup(firmware->data, size, GFP_KERNEL); + if (!buf) { + release_firmware(firmware); + return; + } + + /* Download SBL image */ + ret = mhi_fw_load_sbl(mhi_cntrl, buf, size); + kfree(buf); + + if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL) + release_firmware(firmware); + + /* Error or in EDL mode, we're done */ + if (ret || mhi_cntrl->ee == MHI_EE_EDL) + return; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* + * If we're doing fbc, populate vector tables while + * device transitioning into MHI READY state + */ + if (mhi_cntrl->fbc_download) { + ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, + firmware->size); + if (ret) + goto error_alloc_fw_table; + + /* Load the firmware into BHIE vec table */ + mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); + } + + /* Transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + + if (!mhi_cntrl->fbc_download) + return; + + if (ret) + goto error_read; + + /* Wait for the BHIE event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_BHIE || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(mhi_cntrl->dev, "MHI did not enter BHIE\n"); + goto error_read; + } + + /* Start full firmware image download */ + image_info = mhi_cntrl->fbc_image; + ret = mhi_fw_load_amss(mhi_cntrl, + /* Vector table is the last entry */ + &image_info->mhi_buf[image_info->entries - 1]); + + release_firmware(firmware); + + return; + +error_read: + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + +error_alloc_fw_table: + release_firmware(firmware); +} diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c new file mode 100644 index 000000000000..18be62661049 --- /dev/null +++ b/drivers/bus/mhi/core/mhi_init.c @@ -0,0 +1,1255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include "mhi_internal.h" + +const char * const mhi_ee_str[MHI_EE_MAX] = { + [MHI_EE_PBL] = "PBL", + [MHI_EE_SBL] = "SBL", + [MHI_EE_AMSS] = "AMSS", + [MHI_EE_BHIE] = "BHIE", + [MHI_EE_RDDM] = "RDDM", + [MHI_EE_PTHRU] = "PASS THRU", + [MHI_EE_EDL] = "EDL", + [MHI_EE_DISABLE_TRANSITION] = "DISABLE", +}; + +const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { + [DEV_ST_TRANSITION_PBL] = "PBL", + [DEV_ST_TRANSITION_READY] = "READY", + [DEV_ST_TRANSITION_SBL] = "SBL", + [DEV_ST_TRANSITION_AMSS] = "AMSS", + [DEV_ST_TRANSITION_BHIE] = "BHIE", +}; + +const char * const mhi_state_str[MHI_STATE_MAX] = { + [MHI_STATE_RESET] = "RESET", + [MHI_STATE_READY] = "READY", + [MHI_STATE_M0] = "M0", + [MHI_STATE_M1] = "M1", + [MHI_STATE_M2] = "M2", + [MHI_STATE_M3] = "M3", + [MHI_STATE_BHI] = "BHI", + [MHI_STATE_SYS_ERR] = "SYS_ERR", +}; + +static const char * const mhi_pm_state_str[] = { + [MHI_PM_STATE_DISABLE] = "DISABLE", + [MHI_PM_STATE_POR] = "POR", + [MHI_PM_STATE_M0] = "M0", + [MHI_PM_STATE_M2] = "M2", + [MHI_PM_STATE_M3_ENTER] = "M?->M3", + [MHI_PM_STATE_M3] = "M3", + [MHI_PM_STATE_M3_EXIT] = "M3->M0", + [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error", + [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect", + [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process", + [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", + [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect", +}; + +const char *to_mhi_pm_state_str(enum mhi_pm_state state) +{ + int index = find_last_bit((unsigned long *)&state, 32); + + if (index >= ARRAY_SIZE(mhi_pm_state_str)) + return "Invalid State"; + + return mhi_pm_state_str[index]; +} + +/* MHI protocol requires the transfer ring to be aligned with ring length */ +static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring, + u64 len) +{ + ring->alloc_size = len + (len - 1); + ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size, + &ring->dma_handle, GFP_KERNEL); + if (!ring->pre_aligned) + return -ENOMEM; + + ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); + + return 0; +} + +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + } + + free_irq(mhi_cntrl->irq[0], mhi_cntrl); +} + +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) +{ + int i; + int ret; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + /* Setup BHI_INTVEC IRQ */ + ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, + mhi_intvec_threaded_handler, IRQF_SHARED, + "bhi", mhi_cntrl); + if (ret) + return ret; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ret = request_irq(mhi_cntrl->irq[mhi_event->irq], + mhi_irq_handler, IRQF_SHARED, "mhi", + mhi_event); + if (ret) { + dev_err(mhi_cntrl->dev, + "Error requesting irq:%d for ev:%d\n", + mhi_cntrl->irq[mhi_event->irq], i); + goto error_request; + } + } + + return 0; + +error_request: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + } + free_irq(mhi_cntrl->irq[0], mhi_cntrl); + + return ret; +} + +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event *mhi_event; + struct mhi_ring *ring; + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { + ring = &mhi_cmd->ring; + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + ring->base = NULL; + ring->iommu_base = 0; + } + + mhi_free_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ring = &mhi_event->ring; + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + ring->base = NULL; + ring->iommu_base = 0; + } + + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); + + kfree(mhi_ctxt); + mhi_cntrl->mhi_ctxt = NULL; +} + +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + struct mhi_ctxt *mhi_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_event_ctxt *er_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + int ret = -ENOMEM, i; + + atomic_set(&mhi_cntrl->dev_wake, 0); + atomic_set(&mhi_cntrl->alloc_size, 0); + + mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); + if (!mhi_ctxt) + return -ENOMEM; + + /* Setup channel ctxt */ + mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, + &mhi_ctxt->chan_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->chan_ctxt) + goto error_alloc_chan_ctxt; + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + /* Skip if it is an offload channel */ + if (mhi_chan->offload_ch) + continue; + + chan_ctxt->chstate = MHI_CH_STATE_DISABLED; + chan_ctxt->brstmode = mhi_chan->db_cfg.brstmode; + chan_ctxt->pollcfg = mhi_chan->db_cfg.pollcfg; + chan_ctxt->chtype = mhi_chan->dir; + chan_ctxt->erindex = mhi_chan->er_index; + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + mhi_chan->tre_ring.db_addr = &chan_ctxt->wp; + } + + /* Setup event context */ + mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, + &mhi_ctxt->er_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->er_ctxt) + goto error_alloc_er_ctxt; + + er_ctxt = mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip if it is an offload event */ + if (mhi_event->offload_ev) + continue; + + er_ctxt->intmodc = 0; + er_ctxt->intmodt = mhi_event->intmod; + er_ctxt->ertype = MHI_ER_TYPE_VALID; + er_ctxt->msivec = mhi_event->irq; + mhi_event->db_cfg.db_mode = true; + + ring->el_size = sizeof(struct mhi_tre); + ring->len = ring->el_size * ring->elements; + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_er; + + /* + * If the read pointer equals to the write pointer, then the + * ring is empty + */ + ring->rp = ring->wp = ring->base; + er_ctxt->rbase = ring->iommu_base; + er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; + er_ctxt->rlen = ring->len; + ring->ctxt_wp = &er_ctxt->wp; + } + + /* Setup cmd context */ + mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * + NR_OF_CMD_RINGS, + &mhi_ctxt->cmd_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->cmd_ctxt) + goto error_alloc_er; + + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->el_size = sizeof(struct mhi_tre); + ring->elements = CMD_EL_PER_RING; + ring->len = ring->el_size * ring->elements; + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_cmd; + + ring->rp = ring->wp = ring->base; + cmd_ctxt->rbase = ring->iommu_base; + cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; + cmd_ctxt->rlen = ring->len; + ring->ctxt_wp = &cmd_ctxt->wp; + } + + mhi_cntrl->mhi_ctxt = mhi_ctxt; + + return 0; + +error_alloc_cmd: + for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { + struct mhi_ring *ring = &mhi_cmd->ring; + + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + mhi_free_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + i = mhi_cntrl->total_ev_rings; + mhi_event = mhi_cntrl->mhi_event + i; + +error_alloc_er: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) + continue; + + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + +error_alloc_er_ctxt: + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); + +error_alloc_chan_ctxt: + kfree(mhi_ctxt); + + return ret; +} + +int mhi_init_mmio(struct mhi_controller *mhi_cntrl) +{ + u32 val; + int i, ret; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + void __iomem *base = mhi_cntrl->regs; + struct { + u32 offset; + u32 mask; + u32 shift; + u32 val; + } reg_info[] = { + { + CCABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + CCABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + ECABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + ECABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + CRCBAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + CRCBAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, + mhi_cntrl->total_ev_rings, + }, + { + MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, + mhi_cntrl->hw_ev_rings, + }, + { + MHICTRLBASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLBASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLLIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHICTRLLIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { 0, 0, 0 } + }; + + dev_info(mhi_cntrl->dev, "Initializing MHI registers\n"); + + /* Read channel db offset */ + ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, + CHDBOFF_CHDBOFF_SHIFT, &val); + if (ret) { + dev_err(mhi_cntrl->dev, "Unable to read CHDBOFF register\n"); + return -EIO; + } + + /* Setup wake db */ + mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0); + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0); + mhi_cntrl->wake_set = false; + + /* Setup channel db address for each channel in tre_ring */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) + mhi_chan->tre_ring.db_addr = base + val; + + /* Read event ring db offset */ + ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, + ERDBOFF_ERDBOFF_SHIFT, &val); + if (ret) { + dev_err(mhi_cntrl->dev, "Unable to read ERDBOFF register\n"); + return -EIO; + } + + /* Setup event db address for each ev_ring */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->ring.db_addr = base + val; + } + + /* Setup DB register for primary CMD rings */ + mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; + + /* Write to MMIO registers */ + for (i = 0; reg_info[i].offset; i++) + mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].mask, reg_info[i].shift, + reg_info[i].val); + + return 0; +} + +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + + mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); + kfree(buf_ring->base); + + buf_ring->base = tre_ring->base = NULL; + chan_ctxt->rbase = 0; +} + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + tre_ring->el_size = sizeof(struct mhi_tre); + tre_ring->len = tre_ring->el_size * tre_ring->elements; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); + if (ret) + return -ENOMEM; + + buf_ring->el_size = sizeof(struct mhi_buf_info); + buf_ring->len = buf_ring->el_size * buf_ring->elements; + buf_ring->base = kzalloc(buf_ring->len, GFP_KERNEL); + + if (!buf_ring->base) { + mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); + return -ENOMEM; + } + + chan_ctxt->chstate = MHI_CH_STATE_ENABLED; + chan_ctxt->rbase = tre_ring->iommu_base; + chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; + chan_ctxt->rlen = tre_ring->len; + tre_ring->ctxt_wp = &chan_ctxt->wp; + + tre_ring->rp = tre_ring->wp = tre_ring->base; + buf_ring->rp = buf_ring->wp = buf_ring->base; + mhi_chan->db_cfg.db_mode = 1; + + /* Update to all cores */ + smp_wmb(); + + return 0; +} + +int mhi_device_configure(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + struct mhi_buf *cfg_tbl, + int elements) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *ch_ctxt; + int er_index, chan; + + switch (dir) { + case DMA_TO_DEVICE: + mhi_chan = mhi_dev->ul_chan; + break; + case DMA_BIDIRECTIONAL: + case DMA_FROM_DEVICE: + case DMA_NONE: + mhi_chan = mhi_dev->dl_chan; + break; + default: + return -EINVAL; + } + + er_index = mhi_chan->er_index; + chan = mhi_chan->chan; + + for (; elements > 0; elements--, cfg_tbl++) { + /* update event context array */ + if (!strcmp(cfg_tbl->name, "ECA")) { + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[er_index]; + if (sizeof(*er_ctxt) != cfg_tbl->len) { + dev_err(mhi_cntrl->dev, + "Invalid ECA size, expected:%zu actual%zu\n", + sizeof(*er_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)er_ctxt, cfg_tbl->buf, sizeof(*er_ctxt)); + continue; + } + + /* update channel context array */ + if (!strcmp(cfg_tbl->name, "CCA")) { + ch_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[chan]; + if (cfg_tbl->len != sizeof(*ch_ctxt)) { + dev_err(mhi_cntrl->dev, + "Invalid CCA size, expected:%zu actual:%zu\n", + sizeof(*ch_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)ch_ctxt, cfg_tbl->buf, sizeof(*ch_ctxt)); + continue; + } + + return -EINVAL; + } + + return 0; +} + +static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, + struct mhi_controller_config *config) +{ + int i, num = 0; + struct mhi_event *mhi_event; + struct mhi_event_config *event_cfg; + + num = config->num_events; + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* Populate event ring */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < num; ++i) { + event_cfg = &config->event_cfg[i]; + + mhi_event->er_index = i++; + mhi_event->ring.elements = event_cfg->num_elements; + mhi_event->intmod = event_cfg->irq_moderation_ms; + mhi_event->irq = event_cfg->irq; + + if (event_cfg->channel != U32_MAX) { + /* This event ring has a dedicated channel */ + mhi_event->chan = event_cfg->channel; + if (mhi_event->chan >= mhi_cntrl->max_chan) { + dev_err(mhi_cntrl->dev, + "Event Ring channel not available\n"); + goto error_ev_cfg; + } + + mhi_event->mhi_chan = + &mhi_cntrl->mhi_chan[mhi_event->chan]; + } + + /* Priority is fixed to 1 for now */ + mhi_event->priority = 1; + + mhi_event->db_cfg.brstmode = event_cfg->mode; + if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) + goto error_ev_cfg; + + mhi_event->db_cfg.process_db = + (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + + mhi_event->data_type = event_cfg->data_type; + + switch (mhi_event->data_type) { + case MHI_ER_DATA: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + default: + dev_err(mhi_cntrl->dev, + "Event Ring type not supported\n"); + goto error_ev_cfg; + } + + mhi_event->hw_ring = event_cfg->hardware_event; + if (mhi_event->hw_ring) + mhi_cntrl->hw_ev_rings++; + else + mhi_cntrl->sw_ev_rings++; + + mhi_event->cl_manage = event_cfg->client_managed; + mhi_event->offload_ev = event_cfg->offload_channel; + mhi_event++; + } + + /* We need IRQ for each event ring + additional one for BHI */ + mhi_cntrl->nr_irqs_req = mhi_cntrl->total_ev_rings + 1; + + return 0; + +error_ev_cfg: + + kfree(mhi_cntrl->mhi_event); + return -EINVAL; +} + +static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, + struct mhi_controller_config *config) +{ + int i; + u32 chan; + struct mhi_channel_config *ch_cfg; + + mhi_cntrl->max_chan = config->max_channels; + + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, + sizeof(*mhi_cntrl->mhi_chan), GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* Populate channel configurations */ + for (i = 0; i < config->num_channels; ++i) { + struct mhi_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + dev_err(mhi_cntrl->dev, + "Channel %d not available\n", chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + + mhi_chan->buf_ring.elements = ch_cfg->num_elements; + if (!mhi_chan->buf_ring.elements) + goto error_chan_cfg; + + mhi_chan->tre_ring.elements = mhi_chan->buf_ring.elements; + mhi_chan->er_index = ch_cfg->event_ring; + mhi_chan->dir = ch_cfg->dir; + + mhi_chan->ee = ch_cfg->ee; + if (mhi_chan->ee >= MHI_EE_MAX_SUPPORTED) + goto error_chan_cfg; + + mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; + mhi_chan->xfer_type = ch_cfg->data_type; + + switch (mhi_chan->xfer_type) { + case MHI_BUF_RAW: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_buf; + break; + case MHI_BUF_SKB: + mhi_chan->queue_xfer = mhi_queue_skb; + break; + case MHI_BUF_SCLIST: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_sclist; + break; + case MHI_BUF_NOP: + mhi_chan->queue_xfer = mhi_queue_nop; + break; + default: + dev_err(mhi_cntrl->dev, + "Channel datatype not supported\n"); + goto error_chan_cfg; + } + + mhi_chan->lpm_notify = ch_cfg->lpm_notify; + mhi_chan->offload_ch = ch_cfg->offload_channel; + mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; + mhi_chan->pre_alloc = ch_cfg->auto_queue; + mhi_chan->auto_start = ch_cfg->auto_start; + + /* + * If MHI host allocates buffers, then the channel direction + * should be DMA_FROM_DEVICE and the buffer type should be + * MHI_BUF_RAW + */ + if (mhi_chan->pre_alloc && (mhi_chan->dir != DMA_FROM_DEVICE || + mhi_chan->xfer_type != MHI_BUF_RAW)) { + dev_err(mhi_cntrl->dev, + "Invalid channel configuration\n"); + goto error_chan_cfg; + } + + /* + * Bi-directional and direction less channel must be an + * offload channel + */ + if ((mhi_chan->dir == DMA_BIDIRECTIONAL || + mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { + dev_err(mhi_cntrl->dev, + "Invalid channel configuration\n"); + goto error_chan_cfg; + } + + /* If MHI host allocates buffers then client cannot queue */ + if (mhi_chan->pre_alloc) + mhi_chan->queue_xfer = mhi_queue_nop; + + if (!mhi_chan->offload_ch) { + mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; + if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { + dev_err(mhi_cntrl->dev, + "Invalid Door bell mode\n"); + goto error_chan_cfg; + } + + mhi_chan->db_cfg.process_db = + (mhi_chan->db_cfg.brstmode == + MHI_DB_BRST_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + } + + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return -EINVAL; +} + +static int parse_config(struct mhi_controller *mhi_cntrl, + struct mhi_controller_config *config) +{ + int ret; + + /* Parse MHI channel configuration */ + ret = parse_ch_cfg(mhi_cntrl, config); + if (ret) + return ret; + + /* Parse MHI event configuration */ + ret = parse_ev_cfg(mhi_cntrl, config); + if (ret) + goto error_ev_cfg; + + mhi_cntrl->timeout_ms = config->timeout_ms; + if (!mhi_cntrl->timeout_ms) + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + + mhi_cntrl->bounce_buf = config->use_bounce_buf; + mhi_cntrl->buffer_len = config->buf_len; + if (!mhi_cntrl->buffer_len) + mhi_cntrl->buffer_len = MHI_MAX_MTU; + + return 0; + +error_ev_cfg: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} + +int mhi_register_controller(struct mhi_controller *mhi_cntrl, + struct mhi_controller_config *config) +{ + int ret; + int i; + struct mhi_event *mhi_event; + struct mhi_chan *mhi_chan; + struct mhi_cmd *mhi_cmd; + struct mhi_device *mhi_dev; + + if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put) + return -EINVAL; + + if (!mhi_cntrl->status_cb || !mhi_cntrl->link_status) + return -EINVAL; + + ret = parse_config(mhi_cntrl, config); + if (ret) + return -EINVAL; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, + sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto error_alloc_cmd; + } + + INIT_LIST_HEAD(&mhi_cntrl->transition_list); + mutex_init(&mhi_cntrl->pm_mutex); + rwlock_init(&mhi_cntrl->pm_lock); + spin_lock_init(&mhi_cntrl->transition_lock); + spin_lock_init(&mhi_cntrl->wlock); + INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); + INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker); + INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker); + init_waitqueue_head(&mhi_cntrl->state_event); + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) + spin_lock_init(&mhi_cmd->lock); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + /* Skip for offload events */ + if (mhi_event->offload_ev) + continue; + + mhi_event->mhi_cntrl = mhi_cntrl; + spin_lock_init(&mhi_event->lock); + if (mhi_event->data_type == MHI_ER_CTRL) + tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, + (ulong)mhi_event); + else + tasklet_init(&mhi_event->task, mhi_ev_task, + (ulong)mhi_event); + } + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + mutex_init(&mhi_chan->mutex); + init_completion(&mhi_chan->completion); + rwlock_init(&mhi_chan->lock); + } + + if (mhi_cntrl->bounce_buf) { + mhi_cntrl->map_single = mhi_map_single_use_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; + } else { + mhi_cntrl->map_single = mhi_map_single_no_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; + } + + /* Register controller with MHI bus */ + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) { + ret = -ENOMEM; + goto error_alloc_dev; + } + + mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; + mhi_dev->mhi_cntrl = mhi_cntrl; + dev_set_name(&mhi_dev->dev, "%s", mhi_cntrl->name); + ret = device_add(&mhi_dev->dev); + if (ret) + goto error_add_dev; + + mhi_cntrl->mhi_dev = mhi_dev; + + return 0; + +error_add_dev: + mhi_dealloc_device(mhi_cntrl, mhi_dev); + +error_alloc_dev: + kfree(mhi_cntrl->mhi_cmd); + +error_alloc_cmd: + kfree(mhi_cntrl->mhi_chan); + kfree(mhi_cntrl->mhi_event); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_register_controller); + +void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_event); + kfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); +} +EXPORT_SYMBOL_GPL(mhi_unregister_controller); + +struct mhi_controller *mhi_alloc_controller(void) +{ + struct mhi_controller *mhi_cntrl; + + mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL); + + return mhi_cntrl; +} +EXPORT_SYMBOL_GPL(mhi_alloc_controller); + +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret; + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) + goto error_dev_ctxt; + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) + goto error_setup_irq; + + /* + * Allocate RDDM table if specified, this table is for debug purpose + * so we'll ignore erros + */ + if (mhi_cntrl->rddm_size) + mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, + mhi_cntrl->rddm_size); + + mhi_cntrl->pre_init = true; + + mutex_unlock(&mhi_cntrl->pm_mutex); + + return 0; + +error_setup_irq: + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up); + +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + + mhi_deinit_free_irq(mhi_cntrl); + mhi_deinit_dev_ctxt(mhi_cntrl); + mhi_cntrl->pre_init = false; +} +EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); + +/* Match MHI device to driver */ +static int mhi_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + const struct mhi_device_id *id; + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->chan_name, id->chan)) { + mhi_dev->id = id; + return 1; + } + + return 0; +}; + +static void mhi_release_device(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + kfree(mhi_dev); +} + +struct bus_type mhi_bus_type = { + .name = "mhi", + .dev_name = "mhi", + .match = mhi_match, +}; + +static int mhi_driver_probe(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device_driver *drv = dev->driver; + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + struct mhi_event *mhi_event; + struct mhi_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_chan *dl_chan = mhi_dev->dl_chan; + bool auto_start = false; + int ret; + + if (ul_chan) { + /* + * If channel supports LPM notifications then status_cb should + * be provided + */ + if (ul_chan->lpm_notify && !mhi_drv->status_cb) + return -EINVAL; + + /* For non-offload channels then xfer_cb should be provided */ + if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) + return -EINVAL; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + auto_start = ul_chan->auto_start; + } + + if (dl_chan) { + /* + * If channel supports LPM notifications then status_cb should + * be provided + */ + if (dl_chan->lpm_notify && !mhi_drv->status_cb) + return -EINVAL; + + /* For non-offload channels then xfer_cb should be provided */ + if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) + return -EINVAL; + + mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; + + /* + * If the channel event ring is managed by client, then + * status_cb must be provided so that the framework can + * notify pending data + */ + if (mhi_event->cl_manage && !mhi_drv->status_cb) + return -EINVAL; + + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + + auto_start = (auto_start || dl_chan->auto_start); + } + + /* Call the user provided probe function */ + ret = mhi_drv->probe(mhi_dev, mhi_dev->id); + + /* If this is an autostart channel, start the transfer */ + if (!ret && auto_start) + mhi_prepare_for_transfer(mhi_dev); + + return ret; +} + +static int mhi_driver_remove(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + enum mhi_ch_state ch_state[] = { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_DISABLED + }; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Reset both channels */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + /* Wake all threads waiting for completion */ + write_lock_irq(&mhi_chan->lock); + mhi_chan->ccs = MHI_EV_CC_INVALID; + complete_all(&mhi_chan->completion); + write_unlock_irq(&mhi_chan->lock); + + /* Set the channel state to disabled */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + ch_state[dir] = mhi_chan->ch_state; + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + /* Reset the non-offload channel */ + if (!mhi_chan->offload_ch) + mhi_reset_chan(mhi_cntrl, mhi_chan); + } + + mhi_drv->remove(mhi_dev); + + /* De-init channel if it was enabled */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + if (ch_state[dir] == MHI_CH_STATE_ENABLED && + !mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + + /* Remove associated device */ + mhi_chan->mhi_dev = NULL; + + mutex_unlock(&mhi_chan->mutex); + } + + read_lock_bh(&mhi_cntrl->pm_lock); + while (atomic_read(&mhi_dev->dev_wake)) + mhi_device_put(mhi_dev); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; +} + +int mhi_driver_register(struct mhi_driver *mhi_drv) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + driver->bus = &mhi_bus_type; + driver->probe = mhi_driver_probe; + driver->remove = mhi_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(mhi_driver_register); + +void mhi_driver_unregister(struct mhi_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_driver_unregister); + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + struct device *dev; + + if (!mhi_dev) + return NULL; + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_bus_type; + dev->release = mhi_release_device; + dev->parent = mhi_cntrl->dev; + mhi_dev->mhi_cntrl = mhi_cntrl; + atomic_set(&mhi_dev->dev_wake, 0); + + return mhi_dev; +} + +static int __init mhi_init(void) +{ + return bus_register(&mhi_bus_type); +} + +static void __exit mhi_exit(void) +{ + bus_unregister(&mhi_bus_type); +} + +postcore_initcall(mhi_init); +module_exit(mhi_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("MHI_CORE"); +MODULE_DESCRIPTION("MHI Host Interface"); diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h new file mode 100644 index 000000000000..70c6c9466e2d --- /dev/null +++ b/drivers/bus/mhi/core/mhi_internal.h @@ -0,0 +1,682 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + */ + +#ifndef _MHI_INT_H +#define _MHI_INT_H + +extern struct bus_type mhi_bus_type; + +/* MHI mmio register mapping */ +#define PCI_INVALID_READ(val) (val == U32_MAX) + +#define MHIREGLEN (0x0) +#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) +#define MHIREGLEN_MHIREGLEN_SHIFT (0) + +#define MHIVER (0x8) +#define MHIVER_MHIVER_MASK (0xFFFFFFFF) +#define MHIVER_MHIVER_SHIFT (0) + +#define MHICFG (0x10) +#define MHICFG_NHWER_MASK (0xFF000000) +#define MHICFG_NHWER_SHIFT (24) +#define MHICFG_NER_MASK (0xFF0000) +#define MHICFG_NER_SHIFT (16) +#define MHICFG_NHWCH_MASK (0xFF00) +#define MHICFG_NHWCH_SHIFT (8) +#define MHICFG_NCH_MASK (0xFF) +#define MHICFG_NCH_SHIFT (0) + +#define CHDBOFF (0x18) +#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) +#define CHDBOFF_CHDBOFF_SHIFT (0) + +#define ERDBOFF (0x20) +#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) +#define ERDBOFF_ERDBOFF_SHIFT (0) + +#define BHIOFF (0x28) +#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) +#define BHIOFF_BHIOFF_SHIFT (0) + +#define BHIEOFF (0x2C) +#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) +#define BHIEOFF_BHIEOFF_SHIFT (0) + +#define DEBUGOFF (0x30) +#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) +#define DEBUGOFF_DEBUGOFF_SHIFT (0) + +#define MHICTRL (0x38) +#define MHICTRL_MHISTATE_MASK (0x0000FF00) +#define MHICTRL_MHISTATE_SHIFT (8) +#define MHICTRL_RESET_MASK (0x2) +#define MHICTRL_RESET_SHIFT (1) + +#define MHISTATUS (0x48) +#define MHISTATUS_MHISTATE_MASK (0x0000FF00) +#define MHISTATUS_MHISTATE_SHIFT (8) +#define MHISTATUS_SYSERR_MASK (0x4) +#define MHISTATUS_SYSERR_SHIFT (2) +#define MHISTATUS_READY_MASK (0x1) +#define MHISTATUS_READY_SHIFT (0) + +#define CCABAP_LOWER (0x58) +#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) +#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) + +#define CCABAP_HIGHER (0x5C) +#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) +#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) + +#define ECABAP_LOWER (0x60) +#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) +#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) + +#define ECABAP_HIGHER (0x64) +#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) +#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) + +#define CRCBAP_LOWER (0x68) +#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) +#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) + +#define CRCBAP_HIGHER (0x6C) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) + +#define CRDB_LOWER (0x70) +#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) +#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) + +#define CRDB_HIGHER (0x74) +#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) +#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) + +#define MHICTRLBASE_LOWER (0x80) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) + +#define MHICTRLBASE_HIGHER (0x84) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) + +#define MHICTRLLIMIT_LOWER (0x88) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) + +#define MHICTRLLIMIT_HIGHER (0x8C) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) + +#define MHIDATABASE_LOWER (0x98) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) + +#define MHIDATABASE_HIGHER (0x9C) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) + +#define MHIDATALIMIT_LOWER (0xA0) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) + +#define MHIDATALIMIT_HIGHER (0xA4) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) + +/* MHI BHI offfsets */ +#define BHI_BHIVERSION_MINOR (0x00) +#define BHI_BHIVERSION_MAJOR (0x04) +#define BHI_IMGADDR_LOW (0x08) +#define BHI_IMGADDR_HIGH (0x0C) +#define BHI_IMGSIZE (0x10) +#define BHI_RSVD1 (0x14) +#define BHI_IMGTXDB (0x18) +#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHI_TXDB_SEQNUM_SHFT (0) +#define BHI_RSVD2 (0x1C) +#define BHI_INTVEC (0x20) +#define BHI_RSVD3 (0x24) +#define BHI_EXECENV (0x28) +#define BHI_STATUS (0x2C) +#define BHI_ERRCODE (0x30) +#define BHI_ERRDBG1 (0x34) +#define BHI_ERRDBG2 (0x38) +#define BHI_ERRDBG3 (0x3C) +#define BHI_SERIALNU (0x40) +#define BHI_SBLANTIROLLVER (0x44) +#define BHI_NUMSEG (0x48) +#define BHI_MSMHWID(n) (0x4C + (0x4 * n)) +#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n)) +#define BHI_RSVD5 (0xC4) +#define BHI_STATUS_MASK (0xC0000000) +#define BHI_STATUS_SHIFT (30) +#define BHI_STATUS_ERROR (3) +#define BHI_STATUS_SUCCESS (2) +#define BHI_STATUS_RESET (0) + +/* MHI BHIE offsets */ +#define BHIE_MSMSOCID_OFFS (0x0000) +#define BHIE_TXVECADDR_LOW_OFFS (0x002C) +#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) +#define BHIE_TXVECSIZE_OFFS (0x0034) +#define BHIE_TXVECDB_OFFS (0x003C) +#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECDB_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_OFFS (0x0044) +#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_TXVECSTATUS_STATUS_SHFT (30) +#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) +#define BHIE_RXVECADDR_LOW_OFFS (0x0060) +#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) +#define BHIE_RXVECSIZE_OFFS (0x0068) +#define BHIE_RXVECDB_OFFS (0x0070) +#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECDB_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_OFFS (0x0078) +#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_RXVECSTATUS_STATUS_SHFT (30) +#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) + +struct mhi_event_ctxt { + u32 reserved : 8; + u32 intmodc : 8; + u32 intmodt : 16; + u32 ertype; + u32 msivec; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_chan_ctxt { + u32 chstate : 8; + u32 brstmode : 2; + u32 pollcfg : 6; + u32 reserved : 16; + u32 chtype; + u32 erindex; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_cmd_ctxt { + u32 reserved0; + u32 reserved1; + u32 reserved2; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_tre { + u64 ptr; + u32 dword[2]; +}; + +struct bhi_vec_entry { + u64 dma_addr; + u64 size; +}; + +enum mhi_cmd_type { + MHI_CMD_NOP = 1, + MHI_CMD_RESET_CHAN = 16, + MHI_CMD_STOP_CHAN = 17, + MHI_CMD_START_CHAN = 18, +}; + +/* no operation command */ +#define MHI_TRE_CMD_NOOP_PTR (0) +#define MHI_TRE_CMD_NOOP_DWORD0 (0) +#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16) + +/* channel reset command */ +#define MHI_TRE_CMD_RESET_PTR (0) +#define MHI_TRE_CMD_RESET_DWORD0 (0) +#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_RESET_CHAN << 16)) + +/* channel stop command */ +#define MHI_TRE_CMD_STOP_PTR (0) +#define MHI_TRE_CMD_STOP_DWORD0 (0) +#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_STOP_CHAN << 16)) + +/* channel start command */ +#define MHI_TRE_CMD_START_PTR (0) +#define MHI_TRE_CMD_START_DWORD0 (0) +#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_START_CHAN << 16)) + +#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) + +/* event descriptor macros */ +#define MHI_TRE_EV_PTR(ptr) (ptr) +#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) +#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) +#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) +#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) +#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) + +/* transfer descriptor macros */ +#define MHI_TRE_DATA_PTR(ptr) (ptr) +#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ + | (ieot << 9) | (ieob << 8) | chain) + +enum mhi_pkt_type { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, + MHI_PKT_TYPE_STALE_EVENT, /* internal event */ +}; + +/* MHI transfer completion events */ +enum mhi_ev_ccs { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_EOT = 0x2, + MHI_EV_CC_OVERFLOW = 0x3, + MHI_EV_CC_EOB = 0x4, + MHI_EV_CC_OOB = 0x5, + MHI_EV_CC_DB_MODE = 0x6, + MHI_EV_CC_UNDEFINED_ERR = 0x10, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +enum mhi_ch_state { + MHI_CH_STATE_DISABLED = 0x0, + MHI_CH_STATE_ENABLED = 0x1, + MHI_CH_STATE_RUNNING = 0x2, + MHI_CH_STATE_SUSPENDED = 0x3, + MHI_CH_STATE_STOP = 0x4, + MHI_CH_STATE_ERROR = 0x5, +}; + +#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ + mode != MHI_DB_BRST_ENABLE) + +extern const char * const mhi_ee_str[MHI_EE_MAX]; +#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ + "INVALID_EE" : mhi_ee_str[ee]) + +#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ + ee == MHI_EE_EDL) + +enum dev_st_transition { + DEV_ST_TRANSITION_PBL, + DEV_ST_TRANSITION_READY, + DEV_ST_TRANSITION_SBL, + DEV_ST_TRANSITION_AMSS, + DEV_ST_TRANSITION_BHIE, + DEV_ST_TRANSITION_MAX, +}; + +extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; +#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ + "INVALID_STATE" : dev_state_tran_str[state]) + +enum mhi_state { + MHI_STATE_RESET = 0x0, + MHI_STATE_READY = 0x1, + MHI_STATE_M0 = 0x2, + MHI_STATE_M1 = 0x3, + MHI_STATE_M2 = 0x4, + MHI_STATE_M3 = 0x5, + MHI_STATE_BHI = 0x7, + MHI_STATE_SYS_ERR = 0xFF, + MHI_STATE_MAX, +}; + +extern const char * const mhi_state_str[MHI_STATE_MAX]; +#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ + !mhi_state_str[state]) ? \ + "INVALID_STATE" : mhi_state_str[state]) + +/* internal power states */ +enum mhi_pm_state { + MHI_PM_STATE_DISABLE, + MHI_PM_STATE_POR, + MHI_PM_STATE_M0, + MHI_PM_STATE_M2, + MHI_PM_STATE_M3_ENTER, + MHI_PM_STATE_M3, + MHI_PM_STATE_M3_EXIT, + MHI_PM_STATE_FW_DL_ERR, + MHI_PM_STATE_SYS_ERR_DETECT, + MHI_PM_STATE_SYS_ERR_PROCESS, + MHI_PM_STATE_SHUTDOWN_PROCESS, + MHI_PM_STATE_LD_ERR_FATAL_DETECT, + MHI_PM_STATE_MAX +}; + +#define MHI_PM_DISABLE BIT(0) +#define MHI_PM_POR BIT(1) +#define MHI_PM_M0 BIT(2) +#define MHI_PM_M2 BIT(3) +#define MHI_PM_M3_ENTER BIT(4) +#define MHI_PM_M3 BIT(5) +#define MHI_PM_M3_EXIT BIT(6) +/* firmware download failure state */ +#define MHI_PM_FW_DL_ERR BIT(7) +#define MHI_PM_SYS_ERR_DETECT BIT(8) +#define MHI_PM_SYS_ERR_PROCESS BIT(9) +#define MHI_PM_SHUTDOWN_PROCESS BIT(10) +/* link not accessible */ +#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) + +#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ + MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) +#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) +#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) +#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & MHI_PM_M0) +#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M2)) +#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) +#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) +#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ + MHI_PM_IN_ERROR_STATE(pm_state)) +#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ + (MHI_PM_M3_ENTER | MHI_PM_M3)) + +#define NR_OF_CMD_RINGS 1 +#define CMD_EL_PER_RING 128 +#define PRIMARY_CMD_RING 0 +#define MHI_DEV_WAKE_DB 127 +#define MHI_MAX_MTU 0xffff + +enum mhi_er_type { + MHI_ER_TYPE_INVALID = 0x0, + MHI_ER_TYPE_VALID = 0x1, +}; + +struct db_cfg { + bool reset_req; + bool db_mode; + u32 pollcfg; + enum mhi_db_brst_mode brstmode; + dma_addr_t db_val; + void (*process_db)(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, void __iomem *io_addr, + dma_addr_t db_val); +}; + +struct mhi_pm_transitions { + enum mhi_pm_state from_state; + u32 to_states; +}; + +struct state_transition { + struct list_head node; + enum dev_st_transition state; +}; + +struct mhi_ctxt { + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + dma_addr_t er_ctxt_addr; + dma_addr_t chan_ctxt_addr; + dma_addr_t cmd_ctxt_addr; +}; + +struct mhi_ring { + dma_addr_t dma_handle; + dma_addr_t iommu_base; + u64 *ctxt_wp; /* point to ctxt wp */ + void *pre_aligned; + void *base; + void *rp; + void *wp; + size_t el_size; + size_t len; + size_t elements; + size_t alloc_size; + void __iomem *db_addr; +}; + +struct mhi_cmd { + struct mhi_ring ring; + spinlock_t lock; +}; + +struct mhi_buf_info { + dma_addr_t p_addr; + void *v_addr; + void *bb_addr; + void *wp; + size_t len; + void *cb_buf; + enum dma_data_direction dir; +}; + +struct mhi_event { + u32 er_index; + u32 intmod; + u32 irq; + int chan; /* this event ring is dedicated to a channel */ + u32 priority; + enum mhi_er_data_type data_type; + struct mhi_ring ring; + struct db_cfg db_cfg; + bool hw_ring; + bool cl_manage; + bool offload_ev; /* managed by a device driver */ + spinlock_t lock; + struct mhi_chan *mhi_chan; /* dedicated to channel */ + struct tasklet_struct task; + int (*process_event)(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota); + struct mhi_controller *mhi_cntrl; +}; + +struct mhi_chan { + u32 chan; + const char *name; + /* + * important, when consuming increment tre_ring first, when releasing + * decrement buf_ring first. If tre_ring has space, buf_ring + * guranteed to have space so we do not need to check both rings. + */ + struct mhi_ring buf_ring; + struct mhi_ring tre_ring; + u32 er_index; + u32 intmod; + enum dma_data_direction dir; + struct db_cfg db_cfg; + enum mhi_ee_type ee; + enum mhi_buf_type xfer_type; + enum mhi_ch_state ch_state; + enum mhi_ev_ccs ccs; + bool lpm_notify; + bool configured; + bool offload_ch; + bool pre_alloc; + bool auto_start; + /* functions that generate the transfer ring elements */ + int (*gen_tre)(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, void *buf, void *cb, + size_t len, enum mhi_flags flags); + int (*queue_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum mhi_flags mflags); + /* xfer call back */ + struct mhi_device *mhi_dev; + void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); + struct mutex mutex; + struct completion completion; + rwlock_t lock; + struct list_head node; +}; + +/* default MHI timeout */ +#define MHI_TIMEOUT_MS (1000) + +/* power management apis */ +enum mhi_pm_state __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum mhi_pm_state state); +const char *to_mhi_pm_state_str(enum mhi_pm_state state); +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); +enum mhi_state mhi_get_m_state(struct mhi_controller *mhi_cntrl); +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum dev_st_transition state); +void mhi_pm_st_worker(struct work_struct *work); +void mhi_fw_load_worker(struct work_struct *work); +void mhi_pm_sys_err_worker(struct work_struct *work); +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); +void mhi_ctrl_ev_task(unsigned long data); +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); +void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason); +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + enum mhi_cmd_type cmd); +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); + +/* queue transfer buffer */ +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + void *buf, void *cb, size_t buf_len, enum mhi_flags flags); +int mhi_queue_buf(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum mhi_flags mflags); +int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum mhi_flags mflags); +int mhi_queue_sclist(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum mhi_flags mflags); +int mhi_queue_nop(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum mhi_flags mflags); + +/* register access methods */ +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, + void __iomem *db_addr, dma_addr_t db_val); +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_mode, void __iomem *db_addr, + dma_addr_t db_val); +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out); +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 shift, u32 *out); +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val); +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val); +void mhi_ring_er_db(struct mhi_event *mhi_event); +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t db_val); +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state); +int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability, + u32 *offset); + +/* memory allocation methods */ +static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + dma_addr_t *dma_handle, + gfp_t gfp) +{ + void *buf = dma_alloc_coherent(mhi_cntrl->dev, size, dma_handle, gfp); + + if (buf) + atomic_add(size, &mhi_cntrl->alloc_size); + + return buf; +} + +static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + void *vaddr, + dma_addr_t dma_handle) +{ + atomic_sub(size, &mhi_cntrl->alloc_size); + dma_free_coherent(mhi_cntrl->dev, size, vaddr, dma_handle); +} + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); +static inline void mhi_dealloc_device(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev) +{ + kfree(mhi_dev); +} + +int mhi_destroy_device(struct device *dev, void *data); +void mhi_create_devices(struct mhi_controller *mhi_cntrl); +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, size_t alloc_size); +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info); + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_ctrl_ev_task(unsigned long data); +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); + +/* initialization methods */ +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +int mhi_init_mmio(struct mhi_controller *mhi_cntrl); +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); +int mhi_dtr_init(void); + +/* isr handlers */ +irqreturn_t mhi_irq_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_handler(int irq_number, void *dev); +void mhi_ev_task(unsigned long data); + +#endif /* _MHI_INT_H */ diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c new file mode 100644 index 000000000000..06647cdc5a2b --- /dev/null +++ b/drivers/bus/mhi/core/mhi_main.c @@ -0,0 +1,1372 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include "mhi_internal.h" + +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out) +{ + u32 tmp = le32_to_cpu(readl_relaxed(base + offset)); + + /* If there is any unexpected value, query the link status */ + if (PCI_INVALID_READ(tmp) && + mhi_cntrl->link_status(mhi_cntrl, mhi_cntrl->priv_data)) + return -EIO; + + *out = tmp; + + return 0; +} + +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, + u32 mask, u32 shift, u32 *out) +{ + u32 tmp; + int ret; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return ret; + + *out = (tmp & mask) >> shift; + + return 0; +} + +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val) +{ + writel_relaxed(val, base + offset); +} + +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val) +{ + int ret; + u32 tmp; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return; + + tmp &= ~mask; + tmp |= (val << shift); + mhi_write_reg(mhi_cntrl, base, offset, tmp); +} + +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t db_val) +{ + mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val)); + mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val)); +} + +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t db_val) +{ + if (db_cfg->db_mode) { + db_cfg->db_val = db_val; + mhi_write_db(mhi_cntrl, db_addr, db_val); + db_cfg->db_mode = 0; + } +} + +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t db_val) +{ + db_cfg->db_val = db_val; + mhi_write_db(mhi_cntrl, db_addr, db_val); +} + +void mhi_ring_er_db(struct mhi_event *mhi_event) +{ + struct mhi_ring *ring = &mhi_event->ring; + + mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, + ring->db_addr, *ring->ctxt_wp); +} + +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) +{ + dma_addr_t db; + struct mhi_ring *ring = &mhi_cmd->ring; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = db; + mhi_write_db(mhi_cntrl, ring->db_addr, db); +} + +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *ring = &mhi_chan->tre_ring; + dma_addr_t db; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = db; + mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, + ring->db_addr, db); +} + +enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) +{ + u32 exec; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); + + return (ret) ? MHI_EE_MAX : exec; +} + +enum mhi_state mhi_get_m_state(struct mhi_controller *mhi_cntrl) +{ + u32 state; + int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, &state); + return ret ? MHI_STATE_MAX : state; +} + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + buf_info->p_addr = dma_map_single(mhi_cntrl->dev, buf_info->v_addr, + buf_info->len, buf_info->dir); + if (dma_mapping_error(mhi_cntrl->dev, buf_info->p_addr)) + return -ENOMEM; + + return 0; +} + +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len, + &buf_info->p_addr, GFP_ATOMIC); + + if (!buf) + return -ENOMEM; + + if (buf_info->dir == DMA_TO_DEVICE) + memcpy(buf, buf_info->v_addr, buf_info->len); + + buf_info->bb_addr = buf; + + return 0; +} + +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + dma_unmap_single(mhi_cntrl->dev, buf_info->p_addr, buf_info->len, + buf_info->dir); +} + +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + if (buf_info->dir == DMA_FROM_DEVICE) + memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); + + mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr, + buf_info->p_addr); +} + +int mhi_queue_sclist(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum mhi_flags mflags) +{ + return -EINVAL; +} + +int mhi_queue_nop(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum mhi_flags mflags) +{ + return -EINVAL; +} + +static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->wp += ring->el_size; + if (ring->wp >= (ring->base + ring->len)) + ring->wp = ring->base; + /* smp update */ + smp_wmb(); +} + +static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + /* smp update */ + smp_wmb(); +} + +static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + int nr_el; + + if (ring->wp < ring->rp) { + nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; + } else { + nr_el = (ring->rp - ring->base) / ring->el_size; + nr_el += ((ring->base + ring->len - ring->wp) / + ring->el_size) - 1; + } + + return nr_el; +} + +static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) +{ + return (addr - ring->iommu_base) + ring->base; +} + +static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + dma_addr_t ctxt_wp; + + /* Update the WP */ + ring->wp += ring->el_size; + ctxt_wp = *ring->ctxt_wp + ring->el_size; + + if (ring->wp >= (ring->base + ring->len)) { + ring->wp = ring->base; + ctxt_wp = ring->iommu_base; + } + + *ring->ctxt_wp = ctxt_wp; + + /* Update the RP */ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + + /* Update to all cores */ + smp_wmb(); +} + +static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *tmp = ring->wp + ring->el_size; + + if (tmp >= (ring->base + ring->len)) + tmp = ring->base; + + return (tmp == ring->rp); +} + +int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum mhi_flags mflags) +{ + struct sk_buff *skb = buf; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_ring *buf_ring = &mhi_chan->buf_ring; + struct mhi_buf_info *buf_info; + struct mhi_tre *mhi_tre; + bool assert_wake = false; + int ret; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + + /* generate the tre */ + buf_info = buf_ring->wp; + buf_info->v_addr = skb->data; + buf_info->cb_buf = skb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = len; + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + goto map_error; + + mhi_tre = tre_ring->wp; + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + read_lock_bh(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_bh(&mhi_chan->lock); + } + + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +map_error: + if (assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + void *buf, void *cb, size_t buf_len, enum mhi_flags flags) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_tre *mhi_tre; + struct mhi_buf_info *buf_info; + int eot, eob, chain, bei; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + buf_info = buf_ring->wp; + buf_info->v_addr = buf; + buf_info->cb_buf = cb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = buf_len; + + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + return ret; + + eob = !!(flags & MHI_EOB); + eot = !!(flags & MHI_EOT); + chain = !!(flags & MHI_CHAIN); + bei = !!(mhi_chan->intmod); + + mhi_tre = tre_ring->wp; + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + return 0; +} + +int mhi_queue_transfer(struct mhi_device *mhi_dev, + enum dma_data_direction dir, void *buf, size_t len, + enum mhi_flags mflags) +{ + if (dir == DMA_TO_DEVICE) + return mhi_dev->ul_chan->queue_xfer(mhi_dev, mhi_dev->ul_chan, + buf, len, mflags); + else + return mhi_dev->dl_chan->queue_xfer(mhi_dev, mhi_dev->dl_chan, + buf, len, mflags); +} +EXPORT_SYMBOL_GPL(mhi_queue_transfer); + +int mhi_queue_buf(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum mhi_flags mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring; + unsigned long flags; + bool assert_wake = false; + int ret; + + /* + * this check here only as a guard, it's always + * possible mhi can enter error while executing rest of function, + * which is not fatal so we do not need to hold pm_lock + */ + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) + return -EIO; + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + tre_ring = &mhi_chan->tre_ring; + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags); + if (unlikely(ret)) + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + unsigned long flags; + + read_lock_irqsave(&mhi_chan->lock, flags); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irqrestore(&mhi_chan->lock, flags); + } + + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); + + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + return 0; +} + +int mhi_destroy_device(struct device *dev, void *data) +{ + struct mhi_device *mhi_dev; + struct mhi_controller *mhi_cntrl; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* Only destroy virtual devices thats attached to bus */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + dev_info(mhi_cntrl->dev, "destroy device for chan:%s\n", + mhi_dev->chan_name); + + /* Notify the client and remove the device from mhi bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) +{ + struct mhi_driver *mhi_drv; + + if (!mhi_dev->dev.driver) + return; + + mhi_drv = to_mhi_driver(mhi_dev->dev.driver); + + if (mhi_drv->status_cb) + mhi_drv->status_cb(mhi_dev, cb_reason); +} + +/* Bind mhi channels to mhi devices */ +void mhi_create_devices(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_chan *mhi_chan; + struct mhi_device *mhi_dev; + int ret; + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->configured || mhi_chan->ee != mhi_cntrl->ee) + continue; + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) + return; + + mhi_dev->dev_type = MHI_DEVICE_XFER; + switch (mhi_chan->dir) { + case DMA_TO_DEVICE: + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + break; + case DMA_FROM_DEVICE: + /* We use dl_chan as offload channels */ + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + break; + default: + dev_err(mhi_cntrl->dev, "Direction not supported\n"); + mhi_dealloc_device(mhi_cntrl, mhi_dev); + return; + } + + mhi_chan->mhi_dev = mhi_dev; + + /* Check next channel if it matches */ + if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { + if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { + i++; + mhi_chan++; + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + } + mhi_chan->mhi_dev = mhi_dev; + } + } + + mhi_dev->chan_name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%04x_%s", mhi_chan->chan, + mhi_dev->chan_name); + + ret = device_add(&mhi_dev->dev); + if (ret) + mhi_dealloc_device(mhi_cntrl, mhi_dev); + } +} + +static int parse_xfer_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + u32 ev_code; + struct mhi_result result; + unsigned long flags = 0; + + ev_code = MHI_TRE_GET_EV_CODE(event); + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* + * if it's a DB Event then we need to grab the lock + * with preemption disable and as a write because we + * have to update db register and another thread could + * be doing same. + */ + if (ev_code >= MHI_EV_CC_OOB) + write_lock_irqsave(&mhi_chan->lock, flags); + else + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_tx_event; + + switch (ev_code) { + case MHI_EV_CC_OVERFLOW: + case MHI_EV_CC_EOB: + case MHI_EV_CC_EOT: + { + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); + struct mhi_tre *local_rp, *ev_tre; + void *dev_rp; + struct mhi_buf_info *buf_info; + u16 xfer_len; + + /* Get the TRB this event points to */ + ev_tre = mhi_to_virtual(tre_ring, ptr); + + /* device rp after servicing the TREs */ + dev_rp = ev_tre + 1; + if (dev_rp >= (tre_ring->base + tre_ring->len)) + dev_rp = tre_ring->base; + + result.dir = mhi_chan->dir; + + /* local rp */ + local_rp = tre_ring->rp; + while (local_rp != dev_rp) { + buf_info = buf_ring->rp; + /* if it's last tre get len from the event */ + if (local_rp == ev_tre) + xfer_len = MHI_TRE_GET_EV_LEN(event); + else + xfer_len = buf_info->len; + + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + + result.buf_addr = buf_info->cb_buf; + result.bytes_xferd = xfer_len; + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + local_rp = tre_ring->rp; + + /* notify client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + /* + * recycle the buffer if buffer is pre-allocated, + * if there is error, not much we can do apart from + * dropping the packet + */ + if (mhi_chan->pre_alloc) { + if (mhi_queue_buf(mhi_chan->mhi_dev, mhi_chan, + buf_info->cb_buf, + buf_info->len, MHI_EOT)) { + dev_err(mhi_cntrl->dev, + "Error recycling buffer for chan:%d\n", + mhi_chan->chan); + kfree(buf_info->cb_buf); + } + } + } + break; + } /* CC_EOT */ + case MHI_EV_CC_OOB: + case MHI_EV_CC_DB_MODE: + { + unsigned long flags; + + mhi_chan->db_cfg.db_mode = 1; + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + if (tre_ring->wp != tre_ring->rp && + MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + break; + } + case MHI_EV_CC_BAD_TRE: + default: + dev_err(mhi_cntrl->dev, "Unknown event 0x%x\n", ev_code); + break; + } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ + +end_process_tx_event: + if (ev_code >= MHI_EV_CC_OOB) + write_unlock_irqrestore(&mhi_chan->lock, flags); + else + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, + struct mhi_tre *tre) +{ + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); + struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *mhi_ring = &cmd_ring->ring; + struct mhi_tre *cmd_pkt; + struct mhi_chan *mhi_chan; + u32 chan; + + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); + + chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + + mhi_del_ring_element(mhi_cntrl, mhi_ring); +} + +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + u32 chan; + struct mhi_chan *mhi_chan; + + /* + * This is a quick check to avoid unnecessary event processing + * in case MHI is already in error state, but it's still possible + * to transition to error state while processing events + */ + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return -EIO; + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + + switch (type) { + case MHI_PKT_TYPE_STATE_CHANGE_EVENT: + { + enum mhi_state new_state; + + new_state = MHI_TRE_GET_EV_STATE(local_rp); + + dev_info(mhi_cntrl->dev, + "MHI state change event to state: %s\n", + TO_MHI_STATE_STR(new_state)); + + switch (new_state) { + case MHI_STATE_M0: + mhi_pm_m0_transition(mhi_cntrl); + break; + case MHI_STATE_M1: + mhi_pm_m1_transition(mhi_cntrl); + break; + case MHI_STATE_M3: + mhi_pm_m3_transition(mhi_cntrl); + break; + case MHI_STATE_SYS_ERR: + { + enum mhi_pm_state new_state; + + dev_info(mhi_cntrl->dev, + "MHI system error detected\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + new_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (new_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + break; + } + default: + dev_err(mhi_cntrl->dev, "Invalid state: %s\n", + TO_MHI_STATE_STR(new_state)); + } + + break; + } + case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: + mhi_process_cmd_completion(mhi_cntrl, local_rp); + break; + case MHI_PKT_TYPE_EE_EVENT: + { + enum dev_st_transition st = DEV_ST_TRANSITION_MAX; + enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp); + + dev_info(mhi_cntrl->dev, "MHI EE received event:%s\n", + TO_MHI_EXEC_STR(event)); + switch (event) { + case MHI_EE_SBL: + st = DEV_ST_TRANSITION_SBL; + break; + case MHI_EE_AMSS: + st = DEV_ST_TRANSITION_AMSS; + break; + case MHI_EE_RDDM: + mhi_cntrl->status_cb(mhi_cntrl, + mhi_cntrl->priv_data, + MHI_CB_EE_RDDM); + /* fallthrough */ + case MHI_EE_BHIE: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = event; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up(&mhi_cntrl->state_event); + break; + default: + dev_err(mhi_cntrl->dev, "Unhandled EE event\n"); + } + if (st != DEV_ST_TRANSITION_MAX) + mhi_queue_state_transition(mhi_cntrl, st); + + break; + } + case MHI_PKT_TYPE_TX_EVENT: + chan = MHI_TRE_GET_EV_CHID(local_rp); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + break; + default: + dev_err(mhi_cntrl->dev, "Unhandled event type %d\n", + type); + break; + } + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return count; +} + +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + u32 chan; + struct mhi_chan *mhi_chan; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return -EIO; + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp && event_quota > 0) { + enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + chan = MHI_TRE_GET_EV_CHID(local_rp); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return count; +} + +void mhi_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + + /* process all pending events */ + spin_lock_bh(&mhi_event->lock); + mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + spin_unlock_bh(&mhi_event->lock); +} + +void mhi_ctrl_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + enum mhi_state state = MHI_STATE_MAX; + enum mhi_pm_state pm_state = 0; + int ret; + + /* Process ctrl events events */ + ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + + /* + * We received an IRQ but no events to process, maybe device went to + * SYS_ERR state? Check the state to confirm. + */ + if (!ret) { + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + state = mhi_get_m_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + dev_info(mhi_cntrl->dev, "MHI system error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + } +} + +irqreturn_t mhi_irq_handler(int irq_number, void *dev) +{ + struct mhi_event *mhi_event = dev; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_ring *ev_ring = &mhi_event->ring; + void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + /* Only proceed if event ring has pending events */ + if (ev_ring->rp == dev_rp) + return IRQ_HANDLED; + + /* For client managed event ring, notify pending data */ + if (mhi_event->cl_manage) { + struct mhi_chan *mhi_chan = mhi_event->mhi_chan; + struct mhi_device *mhi_dev = mhi_chan->mhi_dev; + + if (mhi_dev) + mhi_notify(mhi_dev, MHI_CB_PENDING_DATA); + } else { + tasklet_schedule(&mhi_event->task); + } + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + enum mhi_state state = MHI_STATE_MAX; + enum mhi_pm_state pm_state = 0; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + state = mhi_get_m_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + dev_info(mhi_cntrl->dev, "MHI system error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_handler(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + + /* Wake up events waiting for state change */ + wake_up(&mhi_cntrl->state_event); + + return IRQ_WAKE_THREAD; +} + +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum mhi_cmd_type cmd) +{ + struct mhi_tre *cmd_tre = NULL; + struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *ring = &mhi_cmd->ring; + int chan = 0; + + if (mhi_chan) + chan = mhi_chan->chan; + + spin_lock_bh(&mhi_cmd->lock); + if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { + spin_unlock_bh(&mhi_cmd->lock); + return -ENOMEM; + } + + /* prepare the cmd tre */ + cmd_tre = ring->wp; + switch (cmd) { + case MHI_CMD_RESET_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); + break; + case MHI_CMD_START_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_START_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); + break; + default: + dev_err(mhi_cntrl->dev, "Command not supported\n"); + break; + } + + /* queue to hardware */ + mhi_add_ring_element(mhi_cntrl, ring); + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_cmd->lock); + + return 0; +} + +static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret; + + dev_info(mhi_cntrl->dev, "Entered: unprepare channel:%d\n", + mhi_chan->chan); + + /* no more processing events for this channel */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { + write_unlock_irq(&mhi_chan->lock); + mutex_unlock(&mhi_chan->mutex); + return; + } + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + goto error_invalid_state; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN); + if (ret) + goto error_completion; + + /* even if it fails we will still reset */ + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) + dev_err(mhi_cntrl->dev, + "Failed to receive cmd completion, still resetting\n"); + +error_completion: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + +error_invalid_state: + if (!mhi_chan->offload_ch) { + mhi_reset_chan(mhi_cntrl, mhi_chan); + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + } + dev_info(mhi_cntrl->dev, "chan:%d successfully resetted\n", + mhi_chan->chan); + mutex_unlock(&mhi_chan->mutex); +} + +static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret = 0; + + dev_info(mhi_cntrl->dev, "Entered: preparing channel:%d\n", + mhi_chan->chan); + + if (mhi_cntrl->ee != mhi_chan->ee) + return -ENOTCONN; + + mutex_lock(&mhi_chan->mutex); + /* client manages channel context for offload channels */ + if (!mhi_chan->offload_ch) { + ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); + if (ret) + goto error_init_chan; + } + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + ret = -EIO; + goto error_pm_state; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN); + if (ret) + goto error_send_cmd; + + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { + ret = -EIO; + goto error_send_cmd; + } + + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = MHI_CH_STATE_ENABLED; + write_unlock_irq(&mhi_chan->lock); + + /* pre allocate buffer for xfer ring */ + if (mhi_chan->pre_alloc) { + int nr_el = get_nr_avail_ring_elements(mhi_cntrl, + &mhi_chan->tre_ring); + size_t len = mhi_cntrl->buffer_len; + + while (nr_el--) { + void *buf; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto error_pre_alloc; + } + + /* prepare transfer descriptors */ + ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, + len, MHI_EOT); + if (ret) { + kfree(buf); + goto error_pre_alloc; + } + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_lock_irq(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mutex_unlock(&mhi_chan->mutex); + + dev_info(mhi_cntrl->dev, "Chan:%d successfully moved to start state\n", + mhi_chan->chan); + + return 0; + +error_send_cmd: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + +error_pm_state: + if (!mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + +error_init_chan: + mutex_unlock(&mhi_chan->mutex); + + return ret; + +error_pre_alloc: + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mutex_unlock(&mhi_chan->mutex); + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + return ret; +} + +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_event_ctxt *er_ctxt; + struct mhi_event *mhi_event; + struct mhi_ring *ev_ring, *buf_ring, *tre_ring; + unsigned long flags; + int chan = mhi_chan->chan; + struct mhi_result result; + + /* Nothing to reset, client hasn't queued buffers */ + if (mhi_chan->offload_ch) + return; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + ev_ring = &mhi_event->ring; + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; + + /* Mark all stale events related to channel as STALE event */ + spin_lock_irqsave(&mhi_event->lock, flags); + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + if (!mhi_event->mhi_chan) { + local_rp = ev_ring->rp; + while (dev_rp != local_rp) { + if (MHI_TRE_GET_EV_TYPE(local_rp) == + MHI_PKT_TYPE_TX_EVENT && + chan == MHI_TRE_GET_EV_CHID(local_rp)) + local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, + MHI_PKT_TYPE_STALE_EVENT); + local_rp++; + if (local_rp == (ev_ring->base + ev_ring->len)) + local_rp = ev_ring->base; + } + } else { + ev_ring->rp = dev_rp; + ev_ring->wp = ev_ring->rp - ev_ring->el_size; + if (ev_ring->wp < ev_ring->base) + ev_ring->wp = ev_ring->base + ev_ring->len - + ev_ring->el_size; + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + } + + spin_unlock_irqrestore(&mhi_event->lock, flags); + + /* Reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + while (tre_ring->rp != tre_ring->wp) { + struct mhi_buf_info *buf_info = buf_ring->rp; + + if (mhi_chan->dir == DMA_TO_DEVICE) + mhi_cntrl->wake_put(mhi_cntrl, false); + + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + + if (mhi_chan->pre_alloc) { + kfree(buf_info->cb_buf); + } else { + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + } + + read_unlock_bh(&mhi_cntrl->pm_lock); +} + +/* Move channel to start state */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +{ + int ret, dir; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + + if (!mhi_chan) + continue; + + ret = __mhi_prepare_channel(mhi_cntrl, mhi_chan); + if (ret) + goto error_open_chan; + } + + return 0; + +error_open_chan: + for (--dir; dir >= 0; dir--) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer); + +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + int dir; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } +} +EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); + +int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev, + enum dma_data_direction dir) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? + mhi_dev->ul_chan : mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); +} +EXPORT_SYMBOL_GPL(mhi_get_no_free_descriptors); + +struct mhi_controller *mhi_get_controller(const char *dev_name) +{ + struct mhi_device *mhi_dev; + struct device *dev; + + dev = bus_find_device_by_name(&mhi_bus_type, NULL, dev_name); + if (!dev) + return NULL; + + mhi_dev = to_mhi_device(dev); + + return mhi_dev->mhi_cntrl; +} +EXPORT_SYMBOL_GPL(mhi_get_controller); + +int mhi_poll(struct mhi_device *mhi_dev, u32 budget) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + int ret; + + spin_lock_bh(&mhi_event->lock); + ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); + spin_unlock_bh(&mhi_event->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_poll); diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c new file mode 100644 index 000000000000..db11036cd6d5 --- /dev/null +++ b/drivers/bus/mhi/core/mhi_pm.c @@ -0,0 +1,1062 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mhi.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include "mhi_internal.h" + +/* + * Not all MHI state transitions are synchronous. Transitions like Linkdown, + * SYS_ERR, and shutdown can happen anytime asynchronously. This function will + * transition to a new state only if we're allowed to. + * + * Priority increases as we go down. For instance, from any state in L0, the + * transition can be made to states in L1, L2 and L3. A notable exception to + * this rule is state DISABLE. From DISABLE state we can only transition to + * POR state. Also, while in L2 state, user cannot jump back to previous + * L1 or L0 states. + * + * Valid transitions: + * L0: DISABLE <--> POR + * POR <--> POR + * POR -> M0 -> M2 --> M0 + * POR -> FW_DL_ERR + * FW_DL_ERR <--> FW_DL_ERR + * M0 -> FW_DL_ERR + * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 + * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR + * L2: SHUTDOWN_PROCESS -> DISABLE + * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT + * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS + */ +static struct mhi_pm_transitions const dev_state_transitions[] = { + /* L0 States */ + { + MHI_PM_DISABLE, + MHI_PM_POR + }, + { + MHI_PM_POR, + MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M0, + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT | + MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M2, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_ENTER, + MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3, + MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_EXIT, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_FW_DL_ERR, + MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L1 States */ + { + MHI_PM_SYS_ERR_DETECT, + MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_SYS_ERR_PROCESS, + MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L2 States */ + { + MHI_PM_SHUTDOWN_PROCESS, + MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L3 States */ + { + MHI_PM_LD_ERR_FATAL_DETECT, + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS + }, +}; + +enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, + enum mhi_pm_state state) +{ + unsigned long cur_state = mhi_cntrl->pm_state; + int index = find_last_bit(&cur_state, 32); + + if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) + return cur_state; + + if (unlikely(dev_state_transitions[index].from_state != cur_state)) + return cur_state; + + if (unlikely(!(dev_state_transitions[index].to_states & state))) + return cur_state; + + mhi_cntrl->pm_state = state; + return mhi_cntrl->pm_state; +} + +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) +{ + if (state == MHI_STATE_RESET) { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); + } else { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, + MHICTRL_MHISTATE_SHIFT, state); + } +} + +/* Assert device wake db */ +static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) +{ + unsigned long flags; + + /* + * If force flag is set, then increment the wake count value and + * ring wake db + */ + if (unlikely(force)) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + atomic_inc(&mhi_cntrl->dev_wake); + if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } else { + /* + * If resources are already requested, then just increment + * the wake count value and return + */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && + MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } +} + +/* De-assert device wake db */ +static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, + bool override) +{ + unsigned long flags; + + /* + * Only continue if there is a single resource, else just decrement + * and return + */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && + MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && + mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); + mhi_cntrl->wake_set = false; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); +} + +/* Handle device ready state transition */ +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) +{ + void __iomem *base = mhi_cntrl->regs; + u32 reset = 1, ready = 0; + struct mhi_event *mhi_event; + enum mhi_pm_state cur_state; + int ret, i; + + /* Wait for RESET to be cleared and READY bit to be set by the client */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, &reset) || + mhi_read_reg_field(mhi_cntrl, base, MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, &ready) || + (!reset && ready), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + /* Check if device entered error state */ + if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { + dev_err(mhi_cntrl->dev, "Device link is not accessible\n"); + return -EIO; + } + + /* Timeout if device did not transition to ready state */ + if (reset || !ready) { + dev_err(mhi_cntrl->dev, "Device Ready timeout\n"); + return -ETIMEDOUT; + } + + dev_info(mhi_cntrl->dev, "Device in READY State\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + mhi_cntrl->dev_state = MHI_STATE_READY; + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_POR) { + dev_err(mhi_cntrl->dev, "Error moving to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_POR), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + dev_err(mhi_cntrl->dev, "Device registers not accessible\n"); + goto error_mmio; + } + + /* Configure MMIO registers */ + ret = mhi_init_mmio(mhi_cntrl); + if (ret) { + dev_err(mhi_cntrl->dev, "Error configuring MMIO registers\n"); + goto error_mmio; + } + + /* Add elements to all SW event rings */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip if this is an offload or HW event */ + if (mhi_event->offload_ev || mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + /* Update all cores */ + smp_wmb(); + + /* Ring the event ring db */ + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* Set MHI to M0 state */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +error_mmio: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; +} + +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state; + struct mhi_chan *mhi_chan; + int i; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M0; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_M0)) { + dev_err(mhi_cntrl->dev, "Unable to transition to M0 state\n"); + return -EIO; + } + + mhi_cntrl->M0++; + + /* Wake up the device */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + + /* Ring all event rings and CMD ring only if we're in AMSS EE */ + if (mhi_cntrl->ee == MHI_EE_AMSS) { + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct mhi_cmd *mhi_cmd = + &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* Only ring primary cmd ring if ring is not empty */ + spin_lock_irq(&mhi_cmd->lock); + if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + spin_unlock_irq(&mhi_cmd->lock); + } + + /* Ring channel db registers */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->db_cfg.reset_req) + mhi_chan->db_cfg.db_mode = true; + + /* Only ring DB if ring is not empty */ + if (tre_ring->base && tre_ring->wp != tre_ring->rp) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + write_unlock_irq(&mhi_chan->lock); + } + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + wake_up(&mhi_cntrl->state_event); + + return 0; +} + +/* + * After receiving the MHI state change event from the device indicating the + * transition to M1 state, the host can transition the device to M2 state + * for keeping it in low power state. + */ +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state state; + + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); + if (state == MHI_PM_M2) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); + mhi_cntrl->dev_state = MHI_STATE_M2; + mhi_cntrl->M2++; + + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* If there are any pending resources, exit M2 immediately */ + if (unlikely(atomic_read(&mhi_cntrl->dev_wake))) { + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + } else { + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, + MHI_CB_IDLE); + } + } else { + write_unlock_irq(&mhi_cntrl->pm_lock); + } +} + +/* MHI M3 completion handler */ +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state state; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M3; + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (state != MHI_PM_M3) { + dev_err(mhi_cntrl->dev, "Unable to transition to M3 state\n"); + return -EIO; + } + + wake_up(&mhi_cntrl->state_event); + mhi_cntrl->M3++; + + return 0; +} + +/* Handle device AMSS transition */ +static int mhi_pm_amss_transition(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event; + + dev_info(mhi_cntrl->dev, "Processing AMSS transition\n"); + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_AMSS; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up(&mhi_cntrl->state_event); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + /* Add elements to all HW event rings */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || !mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + /* Update to all cores */ + smp_wmb(); + + spin_lock_irq(&mhi_event->lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + /* + * The devices are only created when the client device switches its + * Execution Environment (EE) to either SBL or AMSS states + */ + mhi_create_devices(mhi_cntrl); + + return 0; +} + +/* Handle SYS_ERR and Shutdown transitions */ +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, + enum mhi_pm_state transition_state) +{ + enum mhi_pm_state cur_state, prev_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + int ret, i; + + dev_info(mhi_cntrl->dev, + "Transitioning from PM state: %s to: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + to_mhi_pm_state_str(transition_state)); + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + prev_state = mhi_cntrl->pm_state; + cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); + if (cur_state == transition_state) { + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + } + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != transition_state) { + dev_info(mhi_cntrl->dev, + "Failed to transition to state: %s from: %s\n", + to_mhi_pm_state_str(transition_state), + to_mhi_pm_state_str(cur_state)); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* Trigger MHI RESET so that the device will not access host memory */ + if (MHI_REG_ACCESS_VALID(prev_state)) { + u32 in_reset = -1; + unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); + + dev_info(mhi_cntrl->dev, "Triggering MHI Reset in device\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* Wait for the reset bit to be cleared by the device */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, + MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, + &in_reset) || + !in_reset, timeout); + if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) { + dev_err(mhi_cntrl->dev, + "Device failed to exit MHI Reset state\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* + * Device will clear BHI_INTVEC as a part of RESET processing, + * hence re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + dev_info(mhi_cntrl->dev, + "Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + tasklet_kill(&mhi_event->task); + } + + /* Release lock and wait for all pending threads to complete */ + mutex_unlock(&mhi_cntrl->pm_mutex); + dev_info(mhi_cntrl->dev, + "Waiting for all pending threads to complete\n"); + wake_up(&mhi_cntrl->state_event); + flush_work(&mhi_cntrl->st_worker); + flush_work(&mhi_cntrl->fw_worker); + + dev_info(mhi_cntrl->dev, + "Reset all active channels and remove MHI devices\n"); + device_for_each_child(mhi_cntrl->dev, NULL, mhi_destroy_device); + + mutex_lock(&mhi_cntrl->pm_mutex); + + /* Reset the ev rings and cmd rings */ + dev_info(mhi_cntrl->dev, "Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip offload events */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + if (cur_state == MHI_PM_SYS_ERR_PROCESS) { + mhi_ready_state_transition(mhi_cntrl); + } else { + /* Move to disable state */ + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_DISABLE)) + dev_err(mhi_cntrl->dev, + "Error moving from PM state: %s to: %s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_DISABLE)); + } + + dev_info(mhi_cntrl->dev, "Exiting with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +/* Queue a new work item and schedule work */ +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum dev_st_transition state) +{ + struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); + unsigned long flags; + + if (!item) + return -ENOMEM; + + item->state = state; + spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); + list_add_tail(&item->node, &mhi_cntrl->transition_list); + spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); + + schedule_work(&mhi_cntrl->st_worker); + + return 0; +} + +/* SYS_ERR worker */ +void mhi_pm_sys_err_worker(struct work_struct *work) +{ + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + syserr_worker); + + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); +} + +/* Device State Transition worker */ +void mhi_pm_st_worker(struct work_struct *work) +{ + struct state_transition *itr, *tmp; + LIST_HEAD(head); + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + st_worker); + spin_lock_irq(&mhi_cntrl->transition_lock); + list_splice_tail_init(&mhi_cntrl->transition_list, &head); + spin_unlock_irq(&mhi_cntrl->transition_lock); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dev_info(mhi_cntrl->dev, "Handling state transition: %s\n", + TO_DEV_STATE_TRANS_STR(itr->state)); + + switch (itr->state) { + case DEV_ST_TRANSITION_PBL: + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (MHI_IN_PBL(mhi_cntrl->ee)) + wake_up(&mhi_cntrl->state_event); + break; + case DEV_ST_TRANSITION_SBL: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_SBL; + write_unlock_irq(&mhi_cntrl->pm_lock); + /* + * The devices are only created when the client device + * switches its Execution Environment (EE) to either + * SBL or AMSS states + */ + mhi_create_devices(mhi_cntrl); + break; + case DEV_ST_TRANSITION_AMSS: + mhi_pm_amss_transition(mhi_cntrl); + break; + case DEV_ST_TRANSITION_READY: + mhi_ready_state_transition(mhi_cntrl); + break; + default: + break; + } + kfree(itr); + } +} + +int mhi_async_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 val; + enum mhi_ee_type current_ee; + enum dev_st_transition next_state; + + dev_info(mhi_cntrl->dev, "Requested to power on\n"); + + if (mhi_cntrl->nr_irqs < mhi_cntrl->total_ev_rings) + return -EINVAL; + + /* Supply default wake routines if not provided by controller driver */ + if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put) { + mhi_cntrl->wake_get = mhi_assert_dev_wake; + mhi_cntrl->wake_put = mhi_deassert_dev_wake; + } + + mutex_lock(&mhi_cntrl->pm_mutex); + mhi_cntrl->pm_state = MHI_PM_DISABLE; + + if (!mhi_cntrl->pre_init) { + /* Setup device context */ + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) + goto error_dev_ctxt; + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) + goto error_setup_irq; + } + + /* Setup BHI offset & INTVEC */ + write_lock_irq(&mhi_cntrl->pm_lock); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + goto error_bhi_offset; + } + + mhi_cntrl->bhi = mhi_cntrl->regs + val; + + /* Setup BHIE offset */ + if (mhi_cntrl->fbc_download) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_err(mhi_cntrl->dev, "Error reading BHIE offset\n"); + goto error_bhi_offset; + } + + mhi_cntrl->bhie = mhi_cntrl->regs + val; + } + + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + mhi_cntrl->pm_state = MHI_PM_POR; + mhi_cntrl->ee = MHI_EE_MAX; + current_ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* confirm device is in valid exec env */ + if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { + dev_info(mhi_cntrl->dev, "Not a valid EE for power on\n"); + ret = -EIO; + goto error_bhi_offset; + } + + /* transition to next state */ + next_state = MHI_IN_PBL(current_ee) ? + DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; + + if (next_state == DEV_ST_TRANSITION_PBL) + schedule_work(&mhi_cntrl->fw_worker); + + mhi_queue_state_transition(mhi_cntrl, next_state); + + mutex_unlock(&mhi_cntrl->pm_mutex); + + dev_info(mhi_cntrl->dev, "Power on setup success\n"); + + return 0; + +error_bhi_offset: + if (!mhi_cntrl->pre_init) + mhi_deinit_free_irq(mhi_cntrl); + +error_setup_irq: + if (!mhi_cntrl->pre_init) + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_async_power_up); + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + enum mhi_pm_state cur_state; + + /* If it's not a graceful shutdown, force MHI to linkdown state */ + if (!graceful) { + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_LD_ERR_FATAL_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) + dev_info(mhi_cntrl->dev, + "Failed to move to state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + } + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); + + if (!mhi_cntrl->pre_init) { + /* Free all allocated resources */ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + mhi_deinit_free_irq(mhi_cntrl); + mhi_deinit_dev_ctxt(mhi_cntrl); + } +} +EXPORT_SYMBOL_GPL(mhi_power_down); + +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret = mhi_async_power_up(mhi_cntrl); + + if (ret) + return ret; + + wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_AMSS || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + return (mhi_cntrl->ee == MHI_EE_AMSS) ? 0 : -EIO; +} +EXPORT_SYMBOL(mhi_sync_power_up); + +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) +{ + int ret; + enum mhi_pm_state new_state; + struct mhi_chan *itr, *tmp; + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return -EINVAL; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* Return busy if there are any pending resources */ + if (atomic_read(&mhi_cntrl->dev_wake)) + return -EBUSY; + + /* Take MHI out of M2 state */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M1 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(mhi_cntrl->dev, + "Could not enter M0/M1 state"); + return -EIO; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + + if (atomic_read(&mhi_cntrl->dev_wake)) { + write_unlock_irq(&mhi_cntrl->pm_lock); + return -EBUSY; + } + + dev_info(mhi_cntrl->dev, "Allowing M3 transition\n"); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); + if (new_state != MHI_PM_M3_ENTER) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_err(mhi_cntrl->dev, + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_ENTER), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M3 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_info(mhi_cntrl->dev, "Wait for M3 completion\n"); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M3 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(mhi_cntrl->dev, + "Did not enter M3 state, MHI state: %s, PM state: %s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Notify clients about entering LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); + mutex_unlock(&itr->mutex); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_pm_suspend); + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state; + int ret; + struct mhi_chan *itr, *tmp; + + dev_info(mhi_cntrl->dev, "Entered with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return 0; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* Notify clients about exiting LPM */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); + mutex_unlock(&itr->mutex); + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); + if (cur_state != MHI_PM_M3_EXIT) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_info(mhi_cntrl->dev, + "Error setting to PM state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_M3_EXIT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* Set MHI to M0 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + dev_err(mhi_cntrl->dev, + "Did not enter M0 state, MHI state: %s, PM state: %s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_pm_resume); + +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) +{ + int ret; + + /* Wake up the device */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + return 0; +} + +void mhi_device_get(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + atomic_inc(&mhi_dev->dev_wake); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL_GPL(mhi_device_get); + +int mhi_device_get_sync(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + ret = __mhi_device_get_sync(mhi_cntrl); + if (!ret) + atomic_inc(&mhi_dev->dev_wake); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_device_get_sync); + +void mhi_device_put(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + atomic_dec(&mhi_dev->dev_wake); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL_GPL(mhi_device_put); + +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) +{ + int ret; + + /* Before entering RDDM mode, device needs to enter M0 state */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + goto no_reg_access; + + dev_info(mhi_cntrl->dev, "Triggering SYS_ERR to force RDDM state\n"); + + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + mhi_cntrl->wake_put(mhi_cntrl, false); + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + + /* Wait for RDDM event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + ret = !ret ? 0 : -EIO; + + return ret; + +no_reg_access: + mhi_cntrl->wake_put(mhi_cntrl, false); + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + + return -EIO; +} +EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); diff --git a/include/linux/mhi.h b/include/linux/mhi.h new file mode 100644 index 000000000000..2d59fca9caa5 --- /dev/null +++ b/include/linux/mhi.h @@ -0,0 +1,650 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + */ +#ifndef _MHI_H_ +#define _MHI_H_ + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/mutex.h> +#include <linux/rwlock_types.h> +#include <linux/slab.h> +#include <linux/spinlock_types.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + +struct mhi_chan; +struct mhi_event; +struct mhi_ctxt; +struct mhi_cmd; +struct image_info; +struct bhi_vec_entry; +struct mhi_buf_info; + +/** + * enum mhi_callback - MHI callback + * @MHI_CB_IDLE: MHI entered idle state + * @MHI_CB_PENDING_DATA: New data available for client to process + * @MHI_CB_LPM_ENTER: MHI host entered low power mode + * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode + * @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment + */ +enum mhi_callback { + MHI_CB_IDLE, + MHI_CB_PENDING_DATA, + MHI_CB_LPM_ENTER, + MHI_CB_LPM_EXIT, + MHI_CB_EE_RDDM, +}; + +/** + * enum mhi_flags - Transfer flags + * @MHI_EOB: End of buffer for bulk transfer + * @MHI_EOT: End of transfer + * @MHI_CHAIN: Linked transfer + */ +enum mhi_flags { + MHI_EOB, + MHI_EOT, + MHI_CHAIN, +}; + +/** + * enum mhi_device_type - Device types + * @MHI_DEVICE_XFER: Handles data transfer + * @MHI_DEVICE_TIMESYNC: Use for timesync feature + * @MHI_DEVICE_CONTROLLER: Control device + */ +enum mhi_device_type { + MHI_DEVICE_XFER, + MHI_DEVICE_TIMESYNC, + MHI_DEVICE_CONTROLLER, +}; + +/** + * struct image_info - Firmware and RDDM table table + * @mhi_buf - Buffer for firmware and RDDM table + * @entries - # of entries in table + */ +struct image_info { + struct mhi_buf *mhi_buf; + struct bhi_vec_entry *bhi_vec; + u32 entries; +}; + +/** + * enum mhi_ee_type - Execution environment types + * @MHI_EE_PBL: Primary Bootloader + * @MHI_EE_SBL: Secondary Bootloader + * @MHI_EE_AMSS: Modem, aka the primary runtime EE + * @MHI_EE_BHIE: BHI extension + * @MHI_EE_RDDM: Ram dump download mode + * @MHI_EE_PTHRU: Passthrough + * @MHI_EE_EDL: Embedded downloader + */ +enum mhi_ee_type { + MHI_EE_PBL = 0x0, + MHI_EE_SBL = 0x1, + MHI_EE_AMSS = 0x2, + MHI_EE_BHIE = 0x3, + MHI_EE_RDDM = 0x4, + MHI_EE_PTHRU = 0x5, + MHI_EE_EDL = 0x6, + MHI_EE_MAX_SUPPORTED = MHI_EE_EDL, + MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ + MHI_EE_MAX, +}; + +/** + * enum mhi_buf_type - Accepted buffer type for the channel + * @MHI_BUF_RAW: Raw buffer + * @MHI_BUF_SKB: SKB struct + * @MHI_BUF_SCLIST: Scatter-gather list + * @MHI_BUF_NOP: CPU offload channel, host does not accept transfer + */ +enum mhi_buf_type { + MHI_BUF_RAW, + MHI_BUF_SKB, + MHI_BUF_SCLIST, + MHI_BUF_NOP, +}; + +/** + * enum mhi_er_data_type - Event ring data types + * @MHI_ER_DATA: Only client data over this ring + * @MHI_ER_CTRL: MHI control data and client data + * @MHI_ER_TSYNC: Time sync events + */ +enum mhi_er_data_type { + MHI_ER_DATA, + MHI_ER_CTRL, + MHI_ER_TSYNC, +}; + +/** + * enum mhi_db_brst_mode - Doorbell mode + * @MHI_DB_BRST_DISABLE: Burst mode disable + * @MHI_DB_BRST_ENABLE: Burst mode enable + */ +enum mhi_db_brst_mode { + MHI_DB_BRST_DISABLE = 0x2, + MHI_DB_BRST_ENABLE = 0x3, +}; + +/** + * struct mhi_channel_config - Channel configuration structure for controller + * @num: The number assigned to this channel + * @name: The name of this channel + * @num_elements: The number of elements that can be queued to this channel + * @event_ring: The event rung index that services this channel + * @dir: Direction that data may flow on this channel + * @ee: Under which execution environment is this channel valid + * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds + for UL channels, multiple of 8 ring elements for DL channels + * @data_type: Data type accepted by this channel + * @doorbell: Doorbell mode + * @lpm_notify: The channel master requires low power mode notifications + * @offload_channel: The client manages the channel completely + * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition + * @auto_queue: Framework will automatically queue buffers for DL traffic + * @auto_start: Automatically start (open) this channel + */ +struct mhi_channel_config { + u32 num; + char *name; + u32 num_elements; + u32 event_ring; + enum dma_data_direction dir; + enum mhi_ee_type ee; + u32 pollcfg; + enum mhi_buf_type data_type; + enum mhi_db_brst_mode doorbell; + bool lpm_notify; + bool offload_channel; + bool doorbell_mode_switch; + bool auto_queue; + bool auto_start; +}; + +/** + * struct mhi_event_config - Event ring configuration structure for controller + * @num_elements: The number of elements that can be queued to this ring + * @irq_moderation_ms: Delay irq for additional events to be aggregated + * @irq: IRQ associated with this ring + * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring + * @mode: Doorbell mode + * @data_type: Type of data this ring will process + * @hardware_event: This ring is associated with hardware channels + * @client_managed: This ring is client managed + * @offload_channel: This ring is associated with an offloaded channel + * @priority: Priority of this ring. Use 1 for now + */ +struct mhi_event_config { + u32 num_elements; + u32 irq_moderation_ms; + u32 irq; + u32 channel; + enum mhi_db_brst_mode mode; + enum mhi_er_data_type data_type; + bool hardware_event; + bool client_managed; + bool offload_channel; + u32 priority; +}; + +/** + * struct mhi_controller_config - Root MHI controller configuration + * @max_channels: Maximum number of channels supported + * @timeout_ms: Timeout value for operations. 0 means use default + * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access + * @buf_len: Size of automatically allocated buffers. 0 means use default + * @num_channels: Number of channels defined in @ch_cfg + * @ch_cfg: Array of defined channels + * @num_events: Number of event rings defined in @event_cfg + * @event_cfg: Array of defined event rings + */ +struct mhi_controller_config { + u32 max_channels; + u32 timeout_ms; + bool use_bounce_buf; + u32 buf_len; + u32 num_channels; + struct mhi_channel_config *ch_cfg; + u32 num_events; + struct mhi_event_config *event_cfg; +}; + +/** + * struct mhi_controller - Master MHI controller structure + * @name: Name of the controller + * @dev: Driver model device node for the controller + * @mhi_dev: MHI device instance for the controller + * @dev_id: Device ID of the controller + * @bus_id: Physical bus instance used by the controller + * @regs: Base address of MHI MMIO register space + * @bhi: Points to base of MHI BHI register space + * @bhie: Points to base of MHI BHIe register space + * @wake_db: MHI WAKE doorbell register address + * @iova_start: IOMMU starting address for data + * @iova_stop: IOMMU stop address for data + * @fw_image: Firmware image name for normal booting + * @edl_image: Firmware image name for emergency download mode + * @fbc_download: MHI host needs to do complete image transfer + * @rddm_size: RAM dump size that host should allocate for debugging purpose + * @sbl_size: SBL image size + * @seg_len: BHIe vector size + * @fbc_image: Points to firmware image buffer + * @rddm_image: Points to RAM dump buffer + * @max_chan: Maximum number of channels the controller supports + * @mhi_chan: Points to the channel configuration table + * @lpm_chans: List of channels that require LPM notifications + * @total_ev_rings: Total # of event rings allocated + * @hw_ev_rings: Number of hardware event rings + * @sw_ev_rings: Number of software event rings + * @nr_irqs_req: Number of IRQs required to operate + * @nr_irqs: Number of IRQ allocated by bus master + * @irq: base irq # to request + * @mhi_event: MHI event ring configurations table + * @mhi_cmd: MHI command ring configurations table + * @mhi_ctxt: MHI device context, shared memory between host and device + * @timeout_ms: Timeout in ms for state transitions + * @pm_mutex: Mutex for suspend/resume operation + * @pre_init: MHI host needs to do pre-initialization before power up + * @pm_lock: Lock for protecting MHI power management state + * @pm_state: MHI power management state + * @ee: MHI device execution environment + * @dev_state: MHI device state + * @wake_set: Device wakeup set flag + * @dev_wake: Device wakeup count + * @alloc_size: Total memory allocations size of the controller + * @transition_list: List of MHI state transitions + * @wlock: Lock for protecting device wakeup + * @M0: M0 state counter for debugging + * @M2: M2 state counter for debugging + * @M3: M3 state counter for debugging + * @st_worker: State transition worker + * @fw_worker: Firmware download worker + * @syserr_worker: System error worker + * @state_event: State change event + * @status_cb: CB function to notify various power states to bus master + * @link_status: CB function to query link status of the device + * @wake_get: CB function to assert device wake + * @wake_put: CB function to de-assert device wake + * @runtime_get: CB function to controller runtime resume + * @runtimet_put: CB function to decrement pm usage + * @lpm_disable: CB function to request disable link level low power modes + * @lpm_enable: CB function to request enable link level low power modes again + * @map_single: CB function to create TRE buffer + * @unmap_single: CB function to destroy TRE buffer + * @dtr_dev: Controller supports IOCTL operation using DTR signal + * @bounce_buf: Use of bounce buffer + * @buffer_len: Bounce buffer length + * @priv_data: Points to bus master's private data + */ +struct mhi_controller { + const char *name; + struct device *dev; + struct mhi_device *mhi_dev; + u32 dev_id; + u32 bus_id; + void __iomem *regs; + void __iomem *bhi; + void __iomem *bhie; + void __iomem *wake_db; + dma_addr_t iova_start; + dma_addr_t iova_stop; + const char *fw_image; + const char *edl_image; + bool fbc_download; + size_t rddm_size; + size_t sbl_size; + size_t seg_len; + struct image_info *fbc_image; + struct image_info *rddm_image; + u32 max_chan; + struct mhi_chan *mhi_chan; + struct list_head lpm_chans; + u32 total_ev_rings; + u32 hw_ev_rings; + u32 sw_ev_rings; + u32 nr_irqs_req; + u32 nr_irqs; + int *irq; + + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + struct mhi_ctxt *mhi_ctxt; + + u32 timeout_ms; + struct mutex pm_mutex; + bool pre_init; + rwlock_t pm_lock; + u32 pm_state; + u32 ee; + u32 dev_state; + bool wake_set; + atomic_t dev_wake; + atomic_t alloc_size; + struct list_head transition_list; + spinlock_t transition_lock; + spinlock_t wlock; + u32 M0, M2, M3; + struct work_struct st_worker; + struct work_struct fw_worker; + struct work_struct syserr_worker; + wait_queue_head_t state_event; + + void (*status_cb)(struct mhi_controller *mhi_cntrl, void *priv, + enum mhi_callback cb); + int (*link_status)(struct mhi_controller *mhi_cntrl, void *priv); + void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); + void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); + int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv); + void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv); + void (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv); + void (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv); + int (*map_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + void (*unmap_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + + struct mhi_device *dtr_dev; + bool bounce_buf; + size_t buffer_len; + void *priv_data; +}; + +/** + * struct mhi_device - Structure representing a MHI device which binds + * to channels + * @dev: Driver model device node for the MHI device + * @ul_chan_id: MHI channel id for UL transfer + * @dl_chan_id: MHI channel id for DL transfer + * @tiocm: Device current terminal settings + * @id: Pointer to MHI device ID struct + * @chan_name: Name of the channel to which the device binds + * @mhi_cntrl: Controller the device belongs to + * @ul_chan: UL channel for the device + * @dl_chan: DL channel for the device + * @dev_wake: Device wakeup counter + * @dev_type: MHI device type + */ +struct mhi_device { + struct device dev; + int ul_chan_id; + int dl_chan_id; + u32 tiocm; + const struct mhi_device_id *id; + const char *chan_name; + struct mhi_controller *mhi_cntrl; + struct mhi_chan *ul_chan; + struct mhi_chan *dl_chan; + atomic_t dev_wake; + enum mhi_device_type dev_type; +}; + +/** + * struct mhi_result - Completed buffer information + * @buf_addr: Address of data buffer + * @dir: Channel direction + * @bytes_xfer: # of bytes transferred + * @transaction_status: Status of last transaction + */ +struct mhi_result { + void *buf_addr; + enum dma_data_direction dir; + size_t bytes_xferd; + int transaction_status; +}; + +/** + * struct mhi_buf - MHI Buffer description + * @buf: Virtual address of the buffer + * @dma_addr: IOMMU address of the buffer + * @len: # of bytes + * @name: Buffer label. For offload channel, configurations name must be: + * ECA - Event context array data + * CCA - Channel context array data + */ +struct mhi_buf { + void *buf; + dma_addr_t dma_addr; + size_t len; + const char *name; +}; + +/** + * struct mhi_driver - Structure representing a MHI client driver + * @probe: CB function for client driver probe function + * @remove: CB function for client driver remove function + * @ul_xfer_cb: CB function for UL data transfer + * @dl_xfer_cb: CB function for DL data transfer + * @status_cb: CB functions for asynchronous status + * @driver: Device driver model driver + */ +struct mhi_driver { + const struct mhi_device_id *id_table; + int (*probe)(struct mhi_device *mhi_dev, + const struct mhi_device_id *id); + void (*remove)(struct mhi_device *mhi_dev); + void (*ul_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*dl_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb); + struct device_driver driver; +}; + +#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver) +#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) + +/** + * mhi_controller_set_devdata - Set MHI controller private data + * @mhi_cntrl: MHI controller to set data + */ +static inline void mhi_controller_set_devdata(struct mhi_controller *mhi_cntrl, + void *priv) +{ + mhi_cntrl->priv_data = priv; +} + +/** + * mhi_controller_get_devdata - Get MHI controller private data + * @mhi_cntrl: MHI controller to get data + */ +static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl) +{ + return mhi_cntrl->priv_data; +} + +/** + * mhi_free_controller - Free MHI controller resources + * @mhi_cntrl: MHI controller to free + */ +static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl) +{ + kfree(mhi_cntrl); +} + +/** + * mhi_driver_register - Register driver with MHI framework + * @mhi_drv: Driver associated with the device + */ +int mhi_driver_register(struct mhi_driver *mhi_drv); + +/** + * mhi_driver_unregister - Unregister a driver for mhi_devices + * @mhi_drv: Driver associated with the device + */ +void mhi_driver_unregister(struct mhi_driver *mhi_drv); + +/** + * mhi_device_configure - Configure ECA or CCA context. For offload channels + * that the client manages, call this function to + * configure channel context or event context array + * associated with the channel + * @mhi_div: Device associated with the channels + * @dir: Direction of the channel + * @mhi_buf: Configuration data + * @elements: # of configuration elements + */ +int mhi_device_configure(struct mhi_device *mhi_div, + enum dma_data_direction dir, + struct mhi_buf *mhi_buf, + int elements); + +/** + * mhi_device_get - Disable device low power mode + * @mhi_dev: Device associated with the channel + */ +void mhi_device_get(struct mhi_device *mhi_dev); + +/** + * mhi_device_get_sync - Disable device low power mode. Synchronously + * take the controller out of suspended state + * @mhi_dev: Device associated with the channel + */ +int mhi_device_get_sync(struct mhi_device *mhi_dev); + +/** + * mhi_device_put - Re-enable device low power mode + * @mhi_dev: Device associated with the channel + */ +void mhi_device_put(struct mhi_device *mhi_dev); + +/** + * mhi_prepare_for_transfer - Setup channel for data transfer + * @mhi_dev: Device associated with the channels + */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_unprepare_from_transfer - Unprepare the channels + * @mhi_dev: Device associated with the channels + */ +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_get_no_free_descriptors - Get the # of TDs available to queue buffers + * @mhi_dev: Device associated with the channels + * @dir: Direction of the channel + */ +int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev, + enum dma_data_direction dir); + +/** + * mhi_poll - Poll for any available data in DL direction + * @mhi_dev: Device associated with the channels + * @budget: # of events to process + */ +int mhi_poll(struct mhi_device *mhi_dev, u32 budget); + +/** + * mhi_ioctl - User space IOCTL support for MHI channels + * @mhi_dev: Device associated with the channels + * @cmd: IOCTL cmd + * @arg: Optional parameter, iotcl cmd specific + */ +long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg); + +/** + * mhi_alloc_controller - Allocate mhi_controller structure. + */ +struct mhi_controller *mhi_alloc_controller(void); + +/** + * mhi_queue_transfer - Queue a buffer to device + * @mhi_dev: Device associated with the channels + * @dir: Data direction + * @buf: Data buffer (skb for hardware channels) + * @len: Size in bytes + * @mflags: Interrupt flags for the device + */ +int mhi_queue_transfer(struct mhi_device *mhi_dev, enum dma_data_direction dir, + void *buf, size_t len, enum mhi_flags mflags); + +/** + * mhi_register_controller - Register MHI controller + * @mhi_cntrl: MHI controller to register + * @config: Configuration to use for the controller + */ +int mhi_register_controller(struct mhi_controller *mhi_cntrl, + struct mhi_controller_config *config); + +/** + * mhi_unregister_controller - Unregister MHI controller + * @mhi_cntrl: MHI controller to unregister + */ +void mhi_unregister_controller(struct mhi_controller *mhi_cntrl); + +/** + * mhi_get_controller - Look up a registered controller + * @dev_name: Device name to lookup + */ +struct mhi_controller *mhi_get_controller(const char *dev_name); + +/** + * mhi_prepare_for_power_up - Do pre-initialization before power up. + * This is optional, call this before power up if + * the controller does not want bus framework to + * automatically free any allocated memory during + * shutdown process. + * @mhi_cntrl: MHI controller + */ +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_async_power_up - Start MHI power up sequence + * @mhi_cntrl: MHI controller + */ +int mhi_async_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_sync_power_up - Start MHI power up sequence and wait till the device + * device enters valid EE state + * @mhi_cntrl: MHI controller + */ +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_power_down - Start MHI power down sequence + * @mhi_cntrl: MHI controller + * @graceful: Link is still accessible, so do a graceful shutdown process + */ +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); + +/** + * mhi_unprepare_after_power_down - Free any allocated memory after power down + * @mhi_cntrl: MHI controller + */ +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_suspend - Move MHI into a suspended state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_resume - Resume MHI from suspended state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_resume(struct mhi_controller *mhi_cntrl); + +/** + * mhi_download_rddm_img - Download ramdump image from device for + * debugging purpose. + * @mhi_cntrl: MHI controller + * @in_panic: Download rddm image during kernel panic + */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic); + +/** + * mhi_force_rddm_mode - Force device into rddm mode + * @mhi_cntrl: MHI controller + */ +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); + +#endif /* _MHI_H_ */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 5714fd35a83c..c9e55ac5d15d 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -821,4 +821,16 @@ struct wmi_device_id { const void *context; }; +#define MHI_NAME_SIZE 32 + +/** + * struct mhi_device_id - MHI device identification + * @chan: MHI channel name + * @driver_data: driver data; + */ +struct mhi_device_id { + const char chan[MHI_NAME_SIZE]; + kernel_ulong_t driver_data; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ |