aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSrinivas Kandagatla <srinivas.kandagatla@linaro.org>2015-10-16 14:08:34 +0100
committerSrinivas Kandagatla <srinivas.kandagatla@linaro.org>2015-10-16 14:08:34 +0100
commit4eaa011202af83b4d51352454898053d76542cbf (patch)
tree9e260fbcdc4044fdc8592d1ecf54480d4f3b96af
parent01f09c96a05020891955b7e47e63cc3e4be65687 (diff)
parent3dca4e8b812b2f786e1f68261bca2b6ca8ea49dd (diff)
Merge branch 'tracking-qcomlt-hsuart' into integration-linux-qcomlt
* tracking-qcomlt-hsuart: tty: serial: msm: Unlock interrupts during SysRq processing tty: serial: msm: Remove 115.2 Kbps maximum baud rate limitation tty: serial: msm: Add RX DMA support tty: serial: msm: Add TX DMA support tty: serial: msm: Add msm prefix to all driver functions tty: serial: msm: Fix command Stale Event Enable definition tty: serial: msm: replaces (1 << x) with BIT(x) macro tty: serial: msm: Add mask value for UART_DM registers dmaengine: adm: Start next DMA even if there is no ongoing transaction dmaengine: adm: Don't reset controller during probe dmaengine: adm: Use 'soft' flush when stopping DMA dmaengine: adm: Fix ADM hardware descriptor creation when flow control is enabled dmaengine: Add ADM driver dt/bindings: qcom_adm: Fix channel specifiers
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_adm.txt16
-rw-r--r--Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt6
-rw-r--r--drivers/dma/Kconfig19
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/qcom_adm.c856
-rw-r--r--drivers/tty/serial/msm_serial.c618
-rw-r--r--drivers/tty/serial/msm_serial.h55
7 files changed, 1488 insertions, 83 deletions
diff --git a/Documentation/devicetree/bindings/dma/qcom_adm.txt b/Documentation/devicetree/bindings/dma/qcom_adm.txt
index 9bcab9115982..38d45f8a0dc8 100644
--- a/Documentation/devicetree/bindings/dma/qcom_adm.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_adm.txt
@@ -4,8 +4,7 @@ Required properties:
- compatible: must contain "qcom,adm" for IPQ/APQ8064 and MSM8960
- reg: Address range for DMA registers
- interrupts: Should contain one interrupt shared by all channels
-- #dma-cells: must be <2>. First cell denotes the channel number. Second cell
- denotes CRCI (client rate control interface) flow control assignment.
+- #dma-cells: must be <1>. First cell denotes the channel number.
- clocks: Should contain the core clock and interface clock.
- clock-names: Must contain "core" for the core clock and "iface" for the
interface clock.
@@ -22,7 +21,7 @@ Example:
compatible = "qcom,adm";
reg = <0x18300000 0x100000>;
interrupts = <0 170 0>;
- #dma-cells = <2>;
+ #dma-cells = <1>;
clocks = <&gcc ADM0_CLK>, <&gcc ADM0_PBUS_CLK>;
clock-names = "core", "iface";
@@ -35,15 +34,12 @@ Example:
qcom,ee = <0>;
};
-DMA clients must use the format descripted in the dma.txt file, using a three
+DMA clients must use the format descripted in the dma.txt file, using a two
cell specifier for each channel.
-Each dmas request consists of 3 cells:
+Each dmas request consists of two cells:
1. phandle pointing to the DMA controller
2. channel number
- 3. CRCI assignment, if applicable. If no CRCI flow control is required, use 0.
- The CRCI is used for flow control. It identifies the peripheral device that
- is the source/destination for the transferred data.
Example:
@@ -56,7 +52,7 @@ Example:
cs-gpios = <&qcom_pinmux 20 0>;
- dmas = <&adm_dma 6 9>,
- <&adm_dma 5 10>;
+ dmas = <&adm_dma 6>,
+ <&adm_dma 5>;
dma-names = "rx", "tx";
};
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt
index a2114c217376..182777fac9a2 100644
--- a/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt
@@ -26,6 +26,12 @@ Required properties:
Optional properties:
- dmas: Should contain dma specifiers for transmit and receive channels
- dma-names: Should contain "tx" for transmit and "rx" for receive channels
+- qcom,tx-crci: Identificator <u32> for Client Rate Control Interface to be
+ used with TX DMA channel. Required when using DMA for transmission
+ with UARTDM v1.3 and bellow.
+- qcom,rx-crci: Identificator <u32> for Client Rate Control Interface to be
+ used with RX DMA channel. Required when using DMA for reception
+ with UARTDM v1.3 and bellow.
Note: Aliases may be defined to ensure the correct ordering of the UARTs.
The alias serialN will result in the UART being assigned port N. If any
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index b4584757dae0..b7336aec3386 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -558,4 +558,23 @@ config DMATEST
config DMA_ENGINE_RAID
bool
+config QCOM_BAM_DMA
+ tristate "QCOM BAM DMA support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the QCOM BAM DMA controller. This controller
+ provides DMA capabilities for a variety of on-chip devices.
+
+config QCOM_ADM
+ tristate "Qualcomm ADM support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the Qualcomm ADM DMA controller. This controller
+ provides DMA capabilities for both general purpose and on-chip
+ peripheral devices.
+
endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 7711a7180726..ea264ee33eff 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
+obj-$(CONFIG_QCOM_ADM) += qcom_adm.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
diff --git a/drivers/dma/qcom_adm.c b/drivers/dma/qcom_adm.c
new file mode 100644
index 000000000000..9c4f8962f0c6
--- /dev/null
+++ b/drivers/dma/qcom_adm.c
@@ -0,0 +1,856 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/* ADM registers - calculated from channel number and security domain */
+#define ADM_CHAN_MULTI 0x4
+#define ADM_CI_MULTI 0x4
+#define ADM_CRCI_MULTI 0x4
+#define ADM_EE_MULTI 0x800
+#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * chan)
+#define ADM_EE_OFFS(ee) (ADM_EE_MULTI * ee)
+#define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee))
+#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * chan)
+#define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci))
+#define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan))
+#define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee))
+#define ADM_CI_CONF(ci) (0x390 + ci * ADM_CI_MULTI)
+#define ADM_GP_CTL 0x3d8
+#define ADM_CRCI_CTL(crci, ee) (0x400 + crci * ADM_CRCI_MULTI + \
+ ADM_EE_OFFS(ee))
+
+/* channel status */
+#define ADM_CH_STATUS_VALID BIT(1)
+
+/* channel result */
+#define ADM_CH_RSLT_VALID BIT(31)
+#define ADM_CH_RSLT_ERR BIT(3)
+#define ADM_CH_RSLT_FLUSH BIT(2)
+#define ADM_CH_RSLT_TPD BIT(1)
+
+/* channel conf */
+#define ADM_CH_CONF_SHADOW_EN BIT(12)
+#define ADM_CH_CONF_MPU_DISABLE BIT(11)
+#define ADM_CH_CONF_PERM_MPU_CONF BIT(9)
+#define ADM_CH_CONF_FORCE_RSLT_EN BIT(7)
+#define ADM_CH_CONF_SEC_DOMAIN(ee) (((ee & 0x3) << 4) | ((ee & 0x4) << 11))
+
+/* channel result conf */
+#define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1)
+#define ADM_CH_RSLT_CONF_IRQ_EN BIT(0)
+
+/* CRCI CTL */
+#define ADM_CRCI_CTL_MUX_SEL BIT(18)
+#define ADM_CRCI_CTL_RST BIT(17)
+
+/* CI configuration */
+#define ADM_CI_RANGE_END(x) (x << 24)
+#define ADM_CI_RANGE_START(x) (x << 16)
+#define ADM_CI_BURST_4_WORDS BIT(2)
+#define ADM_CI_BURST_8_WORDS BIT(3)
+
+/* GP CTL */
+#define ADM_GP_CTL_LP_EN BIT(12)
+#define ADM_GP_CTL_LP_CNT(x) (x << 8)
+
+/* Command pointer list entry */
+#define ADM_CPLE_LP BIT(31)
+#define ADM_CPLE_CMD_PTR_LIST BIT(29)
+
+/* Command list entry */
+#define ADM_CMD_LC BIT(31)
+#define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7)
+#define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
+
+#define ADM_CMD_TYPE_SINGLE 0x0
+#define ADM_CMD_TYPE_BOX 0x3
+
+#define ADM_CRCI_MUX_SEL BIT(4)
+#define ADM_DESC_ALIGN 8
+#define ADM_MAX_XFER (SZ_64K-1)
+#define ADM_MAX_ROWS (SZ_64K-1)
+#define ADM_MAX_CHANNELS 16
+
+struct adm_desc_hw_box {
+ u32 cmd;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 row_len;
+ u32 num_rows;
+ u32 row_offset;
+};
+
+struct adm_desc_hw_single {
+ u32 cmd;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 len;
+};
+
+struct adm_async_desc {
+ struct virt_dma_desc vd;
+ struct adm_device *adev;
+
+ size_t length;
+ enum dma_transfer_direction dir;
+ dma_addr_t dma_addr;
+ size_t dma_len;
+
+ void *cpl;
+ dma_addr_t cp_addr;
+ u32 crci;
+ u32 mux;
+ u32 blk_size;
+};
+
+struct adm_chan {
+ struct virt_dma_chan vc;
+ struct adm_device *adev;
+
+ /* parsed from DT */
+ u32 id; /* channel id */
+
+ struct adm_async_desc *curr_txd;
+ struct dma_slave_config slave;
+ struct list_head node;
+
+ int error;
+ int initialized;
+};
+
+static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
+{
+ return container_of(common, struct adm_chan, vc.chan);
+}
+
+struct adm_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct device_dma_parameters dma_parms;
+ struct adm_chan *channels;
+
+ u32 ee;
+
+ struct clk *core_clk;
+ struct clk *iface_clk;
+
+ int irq;
+};
+
+/**
+ * adm_free_chan - Frees dma resources associated with the specific channel
+ *
+ * Free all allocated descriptors associated with this channel
+ *
+ */
+static void adm_free_chan(struct dma_chan *chan)
+{
+ /* free all queued descriptors */
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+/**
+ * adm_get_blksize - Get block size from burst value
+ *
+ */
+static int adm_get_blksize(unsigned int burst)
+{
+ int ret;
+
+ switch (burst) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ ret = ffs(burst>>4) - 1;
+ break;
+ case 192:
+ ret = 4;
+ break;
+ case 256:
+ ret = 5;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * adm_process_fc_descriptors - Process descriptors for flow controlled xfers
+ *
+ * @achan: ADM channel
+ * @desc: Descriptor memory pointer
+ * @sg: Scatterlist entry
+ * @crci: CRCI value
+ * @burst: Burst size of transaction
+ * @direction: DMA transfer direction
+ */
+static void *adm_process_fc_descriptors(struct adm_chan *achan,
+ void *desc, struct scatterlist *sg, u32 crci, u32 burst,
+ enum dma_transfer_direction direction)
+{
+ struct adm_desc_hw_box *box_desc = NULL;
+ struct adm_desc_hw_single *single_desc;
+ u32 remainder = sg_dma_len(sg);
+ u32 rows, row_offset, crci_cmd;
+ u32 mem_addr = sg_dma_address(sg);
+ u32 *incr_addr = &mem_addr;
+ u32 *src, *dst;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ crci_cmd = ADM_CMD_SRC_CRCI(crci);
+ row_offset = burst;
+ src = &achan->slave.src_addr;
+ dst = &mem_addr;
+ } else {
+ crci_cmd = ADM_CMD_DST_CRCI(crci);
+ row_offset = burst << 16;
+ src = &mem_addr;
+ dst = &achan->slave.dst_addr;
+ }
+
+ while (remainder >= burst) {
+ box_desc = desc;
+ box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd;
+ box_desc->row_offset = row_offset;
+ box_desc->src_addr = *src;
+ box_desc->dst_addr = *dst;
+
+ rows = remainder / burst;
+ rows = min_t(u32, rows, ADM_MAX_ROWS);
+ box_desc->num_rows = rows << 16 | rows;
+ box_desc->row_len = burst << 16 | burst;
+
+ *incr_addr += burst * rows;
+ remainder -= burst * rows;
+ desc += sizeof(*box_desc);
+ }
+
+ /* if leftover bytes, do one single descriptor */
+ if (remainder) {
+ single_desc = desc;
+ single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd;
+ single_desc->len = remainder;
+ single_desc->src_addr = *src;
+ single_desc->dst_addr = *dst;
+ desc += sizeof(*single_desc);
+
+ if (sg_is_last(sg))
+ single_desc->cmd |= ADM_CMD_LC;
+ } else {
+ if (box_desc && sg_is_last(sg))
+ box_desc->cmd |= ADM_CMD_LC;
+ }
+
+ return desc;
+}
+
+/**
+ * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers
+ *
+ * @achan: ADM channel
+ * @desc: Descriptor memory pointer
+ * @sg: Scatterlist entry
+ * @direction: DMA transfer direction
+ */
+static void *adm_process_non_fc_descriptors(struct adm_chan *achan,
+ void *desc, struct scatterlist *sg,
+ enum dma_transfer_direction direction)
+{
+ struct adm_desc_hw_single *single_desc;
+ u32 remainder = sg_dma_len(sg);
+ u32 mem_addr = sg_dma_address(sg);
+ u32 *incr_addr = &mem_addr;
+ u32 *src, *dst;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ src = &achan->slave.src_addr;
+ dst = &mem_addr;
+ } else {
+ src = &mem_addr;
+ dst = &achan->slave.dst_addr;
+ }
+
+ do {
+ single_desc = desc;
+ single_desc->cmd = ADM_CMD_TYPE_SINGLE;
+ single_desc->src_addr = *src;
+ single_desc->dst_addr = *dst;
+ single_desc->len = (remainder > ADM_MAX_XFER) ?
+ ADM_MAX_XFER : remainder;
+
+ remainder -= single_desc->len;
+ *incr_addr += single_desc->len;
+ desc += sizeof(*single_desc);
+ } while (remainder);
+
+ /* set last command if this is the end of the whole transaction */
+ if (sg_is_last(sg))
+ single_desc->cmd |= ADM_CMD_LC;
+
+ return desc;
+}
+
+/**
+ * adm_prep_slave_sg - Prep slave sg transaction
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct adm_device *adev = achan->adev;
+ struct adm_async_desc *async_desc;
+ struct scatterlist *sg;
+ u32 i, burst;
+ u32 single_count = 0, box_count = 0, crci = 0;
+ void *desc;
+ u32 *cple;
+ int blk_size = 0;
+
+ if (!is_slave_direction(direction)) {
+ dev_err(adev->dev, "invalid dma direction\n");
+ return NULL;
+ }
+
+ /*
+ * get burst value from slave configuration
+ */
+ burst = (direction == DMA_MEM_TO_DEV) ?
+ achan->slave.dst_maxburst :
+ achan->slave.src_maxburst;
+
+ /* if using flow control, validate burst and crci values */
+ if (achan->slave.device_fc) {
+
+ blk_size = adm_get_blksize(burst);
+ if (blk_size < 0) {
+ dev_err(adev->dev, "invalid burst value: %d\n",
+ burst);
+ return ERR_PTR(-EINVAL);
+ }
+
+ crci = achan->slave.slave_id & 0xf;
+ if (!crci || achan->slave.slave_id > 0x1f) {
+ dev_err(adev->dev, "invalid crci value\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* iterate through sgs and compute allocation size of structures */
+ for_each_sg(sgl, sg, sg_len, i) {
+ if (achan->slave.device_fc) {
+ box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst,
+ ADM_MAX_ROWS);
+ if (sg_dma_len(sg) % burst)
+ single_count++;
+ } else {
+ single_count += DIV_ROUND_UP(sg_dma_len(sg),
+ ADM_MAX_XFER);
+ }
+ }
+
+ async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
+ if (!async_desc)
+ return ERR_PTR(-ENOMEM);
+
+ if (crci)
+ async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ?
+ ADM_CRCI_CTL_MUX_SEL : 0;
+ async_desc->crci = crci;
+ async_desc->blk_size = blk_size;
+ async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
+ box_count * sizeof(struct adm_desc_hw_box) +
+ sizeof(*cple) + 2 * ADM_DESC_ALIGN;
+
+ async_desc->cpl = dma_alloc_writecombine(adev->dev, async_desc->dma_len,
+ &async_desc->dma_addr, GFP_NOWAIT);
+
+ if (!async_desc->cpl) {
+ kfree(async_desc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ async_desc->adev = adev;
+
+ /* both command list entry and descriptors must be 8 byte aligned */
+ cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
+ desc = PTR_ALIGN(cple + 1, ADM_DESC_ALIGN);
+
+ /* init cmd list */
+ *cple = ADM_CPLE_LP;
+ *cple |= (desc - async_desc->cpl + async_desc->dma_addr) >> 3;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ async_desc->length += sg_dma_len(sg);
+
+ if (achan->slave.device_fc)
+ desc = adm_process_fc_descriptors(achan, desc, sg, crci,
+ burst, direction);
+ else
+ desc = adm_process_non_fc_descriptors(achan, desc, sg,
+ direction);
+ }
+
+ return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
+}
+
+/**
+ * adm_terminate_all - terminate all transactions on a channel
+ * @achan: adm dma channel
+ *
+ * Dequeues and frees all transactions, aborts current transaction
+ * No callbacks are done
+ *
+ */
+static int adm_terminate_all(struct dma_chan *chan)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct adm_device *adev = achan->adev;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+ vchan_get_all_descriptors(&achan->vc, &head);
+
+ /* send flush command to terminate current transaction */
+ writel_relaxed(BIT(31),
+ adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee));
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&achan->vc, &head);
+
+ return 0;
+}
+
+static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ unsigned long flag;
+
+ spin_lock_irqsave(&achan->vc.lock, flag);
+ memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
+ spin_unlock_irqrestore(&achan->vc.lock, flag);
+
+ return 0;
+}
+
+/**
+ * adm_start_dma - start next transaction
+ * @achan - ADM dma channel
+ */
+static void adm_start_dma(struct adm_chan *achan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
+ struct adm_device *adev = achan->adev;
+ struct adm_async_desc *async_desc;
+
+ lockdep_assert_held(&achan->vc.lock);
+
+ if (!vd)
+ return;
+
+ list_del(&vd->node);
+
+ /* write next command list out to the CMD FIFO */
+ async_desc = container_of(vd, struct adm_async_desc, vd);
+ achan->curr_txd = async_desc;
+
+ /* reset channel error */
+ achan->error = 0;
+
+ if (!achan->initialized) {
+ /* enable interrupts */
+ writel(ADM_CH_CONF_SHADOW_EN |
+ ADM_CH_CONF_PERM_MPU_CONF |
+ ADM_CH_CONF_MPU_DISABLE |
+ ADM_CH_CONF_SEC_DOMAIN(adev->ee),
+ adev->regs + ADM_CH_CONF(achan->id));
+
+ writel(ADM_CH_RSLT_CONF_IRQ_EN | ADM_CH_RSLT_CONF_FLUSH_EN,
+ adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
+
+ achan->initialized = 1;
+ }
+
+ /* set the crci block size if this transaction requires CRCI */
+ if (async_desc->crci) {
+ writel(async_desc->mux | async_desc->blk_size,
+ adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee));
+ }
+
+ /* make sure IRQ enable doesn't get reordered */
+ wmb();
+
+ /* write next command list out to the CMD FIFO */
+ writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
+ adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee));
+}
+
+/**
+ * adm_dma_irq - irq handler for ADM controller
+ * @irq: IRQ of interrupt
+ * @data: callback data
+ *
+ * IRQ handler for the bam controller
+ */
+static irqreturn_t adm_dma_irq(int irq, void *data)
+{
+ struct adm_device *adev = data;
+ u32 srcs, i;
+ struct adm_async_desc *async_desc;
+ unsigned long flags;
+
+ srcs = readl_relaxed(adev->regs +
+ ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee));
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++) {
+ struct adm_chan *achan = &adev->channels[i];
+ u32 status, result;
+
+ if (srcs & BIT(i)) {
+ status = readl_relaxed(adev->regs +
+ ADM_CH_STATUS_SD(i, adev->ee));
+
+ /* if no result present, skip */
+ if (!(status & ADM_CH_STATUS_VALID))
+ continue;
+
+ result = readl_relaxed(adev->regs +
+ ADM_CH_RSLT(i, adev->ee));
+
+ /* no valid results, skip */
+ if (!(result & ADM_CH_RSLT_VALID))
+ continue;
+
+ /* flag error if transaction was flushed or failed */
+ if (result & (ADM_CH_RSLT_ERR | ADM_CH_RSLT_FLUSH))
+ achan->error = 1;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+ async_desc = achan->curr_txd;
+
+ achan->curr_txd = NULL;
+
+ if (async_desc)
+ vchan_cookie_complete(&async_desc->vd);
+
+ /* kick off next DMA */
+ adm_start_dma(achan);
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * adm_tx_status - returns status of transaction
+ * @chan: dma channel
+ * @cookie: transaction cookie
+ * @txstate: DMA transaction state
+ *
+ * Return status of dma transaction
+ */
+static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+ size_t residue = 0;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+
+ vd = vchan_find_desc(&achan->vc, cookie);
+ if (vd)
+ residue = container_of(vd, struct adm_async_desc, vd)->length;
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+
+ /*
+ * residue is either the full length if it is in the issued list, or 0
+ * if it is in progress. We have no reliable way of determining
+ * anything inbetween
+ */
+ dma_set_residue(txstate, residue);
+
+ if (achan->error)
+ return DMA_ERROR;
+
+ return ret;
+}
+
+/**
+ * adm_issue_pending - starts pending transactions
+ * @chan: dma channel
+ *
+ * Issues all pending transactions and starts DMA
+ */
+static void adm_issue_pending(struct dma_chan *chan)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+
+ if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
+ adm_start_dma(achan);
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+}
+
+/**
+ * adm_dma_free_desc - free descriptor memory
+ * @vd: virtual descriptor
+ *
+ */
+static void adm_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct adm_async_desc *async_desc = container_of(vd,
+ struct adm_async_desc, vd);
+
+ dma_free_writecombine(async_desc->adev->dev, async_desc->dma_len,
+ async_desc->cpl, async_desc->dma_addr);
+ kfree(async_desc);
+}
+
+static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
+ u32 index)
+{
+ achan->id = index;
+ achan->adev = adev;
+
+ vchan_init(&achan->vc, &adev->common);
+ achan->vc.desc_free = adm_dma_free_desc;
+}
+
+static int adm_dma_probe(struct platform_device *pdev)
+{
+ struct adm_device *adev;
+ struct resource *iores;
+ int ret;
+ u32 i;
+
+ adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->dev = &pdev->dev;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adev->regs = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(adev->regs))
+ return PTR_ERR(adev->regs);
+
+ adev->irq = platform_get_irq(pdev, 0);
+ if (adev->irq < 0)
+ return adev->irq;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
+ if (ret) {
+ dev_err(adev->dev, "Execution environment unspecified\n");
+ return ret;
+ }
+
+ adev->core_clk = devm_clk_get(adev->dev, "core");
+ if (IS_ERR(adev->core_clk))
+ return PTR_ERR(adev->core_clk);
+
+ ret = clk_prepare_enable(adev->core_clk);
+ if (ret) {
+ dev_err(adev->dev, "failed to prepare/enable core clock\n");
+ return ret;
+ }
+
+ adev->iface_clk = devm_clk_get(adev->dev, "iface");
+ if (IS_ERR(adev->iface_clk)) {
+ ret = PTR_ERR(adev->iface_clk);
+ goto err_disable_core_clk;
+ }
+
+ ret = clk_prepare_enable(adev->iface_clk);
+ if (ret) {
+ dev_err(adev->dev, "failed to prepare/enable iface clock\n");
+ goto err_disable_core_clk;
+ }
+
+ adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS,
+ sizeof(*adev->channels), GFP_KERNEL);
+
+ if (!adev->channels) {
+ ret = -ENOMEM;
+ goto err_disable_clks;
+ }
+
+ /* allocate and initialize channels */
+ INIT_LIST_HEAD(&adev->common.channels);
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++)
+ adm_channel_init(adev, &adev->channels[i], i);
+
+ /* reset CRCIs */
+ for (i = 0; i < 16; i++)
+ writel(ADM_CRCI_CTL_RST, adev->regs +
+ ADM_CRCI_CTL(i, adev->ee));
+
+ /* configure client interfaces */
+ writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0));
+ writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1));
+ writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2));
+ writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf),
+ adev->regs + ADM_GP_CTL);
+
+ ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
+ 0, "adm_dma", adev);
+ if (ret)
+ goto err_disable_clks;
+
+ platform_set_drvdata(pdev, adev);
+
+ adev->common.dev = adev->dev;
+ adev->common.dev->dma_parms = &adev->dma_parms;
+
+ /* set capabilities */
+ dma_cap_zero(adev->common.cap_mask);
+ dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
+
+ /* initialize dmaengine apis */
+ adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV);
+ adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ adev->common.device_free_chan_resources = adm_free_chan;
+ adev->common.device_prep_slave_sg = adm_prep_slave_sg;
+ adev->common.device_issue_pending = adm_issue_pending;
+ adev->common.device_tx_status = adm_tx_status;
+ adev->common.device_terminate_all = adm_terminate_all;
+ adev->common.device_config = adm_slave_config;
+
+ ret = dma_async_device_register(&adev->common);
+ if (ret) {
+ dev_err(adev->dev, "failed to register dma async device\n");
+ goto err_disable_clks;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_xlate_by_chan_id,
+ &adev->common);
+ if (ret)
+ goto err_unregister_dma;
+
+ return 0;
+
+err_unregister_dma:
+ dma_async_device_unregister(&adev->common);
+err_disable_clks:
+ clk_disable_unprepare(adev->iface_clk);
+err_disable_core_clk:
+ clk_disable_unprepare(adev->core_clk);
+
+ return ret;
+}
+
+static int adm_dma_remove(struct platform_device *pdev)
+{
+ struct adm_device *adev = platform_get_drvdata(pdev);
+ struct adm_chan *achan;
+ u32 i;
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&adev->common);
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++) {
+ achan = &adev->channels[i];
+
+ /* mask IRQs for this channel/EE pair */
+ writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
+
+ adm_terminate_all(&adev->channels[i].vc.chan);
+ }
+
+ devm_free_irq(adev->dev, adev->irq, adev);
+
+ clk_disable_unprepare(adev->core_clk);
+ clk_disable_unprepare(adev->iface_clk);
+
+ return 0;
+}
+
+static const struct of_device_id adm_of_match[] = {
+ { .compatible = "qcom,adm", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adm_of_match);
+
+static struct platform_driver adm_dma_driver = {
+ .probe = adm_dma_probe,
+ .remove = adm_dma_remove,
+ .driver = {
+ .name = "adm-dma-engine",
+ .of_match_table = adm_of_match,
+ },
+};
+
+module_platform_driver(adm_dma_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index b73889c8ed4b..2d779b1f010f 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -20,6 +20,8 @@
#endif
#include <linux/atomic.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -31,6 +33,7 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
+#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -39,6 +42,11 @@
#include "msm_serial.h"
+#define UARTDM_BURST_SIZE 16 /* in bytes */
+#define UARTDM_TX_AIGN(x) ((x) & ~0x3) /* valid for > 1p3 */
+#define UARTDM_TX_MAX 256 /* in bytes, valid for <= 1p3 */
+#define UARTDM_RX_SIZE (UART_XMIT_SIZE / 4)
+
enum {
UARTDM_1P1 = 1,
UARTDM_1P2,
@@ -46,6 +54,17 @@ enum {
UARTDM_1P4,
};
+struct msm_dma {
+ struct dma_chan *chan;
+ enum dma_data_direction dir;
+ dma_addr_t phys;
+ unsigned char *virt;
+ dma_cookie_t cookie;
+ u32 enable_bit;
+ unsigned int count;
+ struct dma_async_tx_descriptor *desc;
+};
+
struct msm_port {
struct uart_port uart;
char name[16];
@@ -55,9 +74,153 @@ struct msm_port {
int is_uartdm;
unsigned int old_snap_state;
bool break_detected;
+ struct msm_dma tx_dma;
+ struct msm_dma rx_dma;
};
-static inline void wait_for_xmitr(struct uart_port *port)
+static void msm_handle_tx(struct uart_port *port);
+static void msm_start_rx_dma(struct msm_port *msm_port);
+
+void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
+{
+ struct device *dev = port->dev;
+ unsigned int mapped;
+ u32 val;
+
+ mapped = dma->count;
+ dma->count = 0;
+
+ dmaengine_terminate_all(dma->chan);
+
+ /*
+ * DMA Stall happens if enqueue and flush command happens concurrently.
+ * For example before changing the baud rate/protocol configuration and
+ * sending flush command to ADM, disable the channel of UARTDM.
+ * Note: should not reset the receiver here immediately as it is not
+ * suggested to do disable/reset or reset/disable at the same time.
+ */
+ val = msm_read(port, UARTDM_DMEN);
+ val &= ~dma->enable_bit;
+ msm_write(port, val, UARTDM_DMEN);
+
+ if (mapped)
+ dma_unmap_single(dev, dma->phys, mapped, dma->dir);
+}
+
+static void msm_release_dma(struct msm_port *msm_port)
+{
+ struct msm_dma *dma;
+
+ dma = &msm_port->tx_dma;
+ if (dma->chan) {
+ msm_stop_dma(&msm_port->uart, dma);
+ dma_release_channel(dma->chan);
+ }
+
+ memset(dma, 0, sizeof(*dma));
+
+ dma = &msm_port->rx_dma;
+ if (dma->chan) {
+ msm_stop_dma(&msm_port->uart, dma);
+ dma_release_channel(dma->chan);
+ kfree(dma->virt);
+ }
+
+ memset(dma, 0, sizeof(*dma));
+}
+
+static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
+{
+ struct device *dev = msm_port->uart.dev;
+ struct dma_slave_config conf;
+ struct msm_dma *dma;
+ u32 crci = 0;
+ int ret;
+
+ dma = &msm_port->tx_dma;
+
+ /* allocate DMA resources, if available */
+ dma->chan = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(dma->chan))
+ goto no_tx;
+
+ of_property_read_u32(dev->of_node, "qcom,tx-crci", &crci);
+
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = DMA_MEM_TO_DEV;
+ conf.device_fc = true;
+ conf.dst_addr = base + UARTDM_TF;
+ conf.dst_maxburst = UARTDM_BURST_SIZE;
+ conf.slave_id = crci;
+
+ ret = dmaengine_slave_config(dma->chan, &conf);
+ if (ret)
+ goto rel_tx;
+
+ dma->dir = DMA_TO_DEVICE;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ dma->enable_bit = UARTDM_DMEN_TX_DM_ENABLE;
+ else
+ dma->enable_bit = UARTDM_DMEN_TX_BAM_ENABLE;
+
+ return;
+
+rel_tx:
+ dma_release_channel(dma->chan);
+no_tx:
+ memset(dma, 0, sizeof(*dma));
+}
+
+static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
+{
+ struct device *dev = msm_port->uart.dev;
+ struct dma_slave_config conf;
+ struct msm_dma *dma;
+ u32 crci = 0;
+ int ret;
+
+ dma = &msm_port->rx_dma;
+
+ /* allocate DMA resources, if available */
+ dma->chan = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR(dma->chan))
+ goto no_rx;
+
+ of_property_read_u32(dev->of_node, "qcom,rx-crci", &crci);
+
+ dma->virt = kzalloc(UARTDM_RX_SIZE, GFP_KERNEL);
+ if (!dma->virt)
+ goto rel_rx;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = DMA_DEV_TO_MEM;
+ conf.device_fc = true;
+ conf.src_addr = base + UARTDM_RF;
+ conf.src_maxburst = UARTDM_BURST_SIZE;
+ conf.slave_id = crci;
+
+ ret = dmaengine_slave_config(dma->chan, &conf);
+ if (ret)
+ goto err;
+
+ dma->dir = DMA_FROM_DEVICE;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ dma->enable_bit = UARTDM_DMEN_RX_DM_ENABLE;
+ else
+ dma->enable_bit = UARTDM_DMEN_RX_BAM_ENABLE;
+
+ return;
+err:
+ kfree(dma->virt);
+rel_rx:
+ dma_release_channel(dma->chan);
+no_rx:
+ memset(dma, 0, sizeof(*dma));
+}
+
+static inline void msm_wait_for_xmitr(struct uart_port *port)
{
while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
@@ -78,17 +241,277 @@ static void msm_stop_tx(struct uart_port *port)
static void msm_start_tx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->tx_dma;
+
+ /* Already started in DMA mode */
+ if (dma->count)
+ return;
+
+ msm_port->imr |= UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_reset_dm_count(struct uart_port *port, int count)
+{
+ msm_wait_for_xmitr(port);
+ msm_write(port, count, UARTDM_NCF_TX);
+ msm_read(port, UARTDM_NCF_TX);
+}
+
+static void msm_complete_tx_dma(void *args)
+{
+ struct msm_port *msm_port = args;
+ struct uart_port *port = &msm_port->uart;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct msm_dma *dma = &msm_port->tx_dma;
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned long flags;
+ unsigned int count;
+ u32 val;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Already stopped */
+ if (!dma->count)
+ goto done;
+
+ status = dmaengine_tx_status(dma->chan, dma->cookie, &state);
+
+ dma_unmap_single(port->dev, dma->phys, dma->count, dma->dir);
+ val = msm_read(port, UARTDM_DMEN);
+ val &= ~dma->enable_bit;
+ msm_write(port, val, UARTDM_DMEN);
+
+ if (msm_port->is_uartdm > UARTDM_1P3) {
+ msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+ msm_write(port, UART_CR_TX_ENABLE, UART_CR);
+ }
+
+ count = dma->count - state.residue;
+ port->icount.tx += count;
+ dma->count = 0;
+
+ xmit->tail += count;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+
+ /* Restore "Tx FIFO below watermark" interrupt */
msm_port->imr |= UART_IMR_TXLEV;
msm_write(port, msm_port->imr, UART_IMR);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ msm_handle_tx(port);
+done:
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
+{
+ struct circ_buf *xmit = &msm_port->uart.state->xmit;
+ struct uart_port *port = &msm_port->uart;
+ struct msm_dma *dma = &msm_port->tx_dma;
+ void *cpu_addr;
+ int ret;
+ u32 val;
+
+ cpu_addr = &xmit->buf[xmit->tail];
+
+ dma->phys = dma_map_single(port->dev, cpu_addr, count, dma->dir);
+ ret = dma_mapping_error(port->dev, dma->phys);
+ if (ret)
+ return ret;
+
+ dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys,
+ count, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_PREP_FENCE);
+ if (!dma->desc) {
+ ret = -EIO;
+ goto unmap;
+ }
+
+ dma->desc->callback = msm_complete_tx_dma;
+ dma->desc->callback_param = msm_port;
+
+ dma->cookie = dmaengine_submit(dma->desc);
+ ret = dma_submit_error(dma->cookie);
+ if (ret)
+ goto unmap;
+
+ /*
+ * Using DMA complete for Tx FIFO reload, no need for
+ * "Tx FIFO below watermark" one, disable it
+ */
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+
+ dma->count = count;
+
+ val = msm_read(port, UARTDM_DMEN);
+ val |= dma->enable_bit;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ msm_write(port, val, UARTDM_DMEN);
+
+ msm_reset_dm_count(port, count);
+
+ if (msm_port->is_uartdm > UARTDM_1P3)
+ msm_write(port, val, UARTDM_DMEN);
+
+ dma_async_issue_pending(dma->chan);
+ return 0;
+unmap:
+ dma_unmap_single(port->dev, dma->phys, count, dma->dir);
+ return ret;
+}
+
+static void msm_complete_rx_dma(void *args)
+{
+ struct msm_port *msm_port = args;
+ struct uart_port *port = &msm_port->uart;
+ struct tty_port *tport = &port->state->port;
+ struct msm_dma *dma = &msm_port->rx_dma;
+ int count = 0, i, sysrq;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Already stopped */
+ if (!dma->count)
+ goto done;
+
+ val = msm_read(port, UARTDM_DMEN);
+ val &= ~dma->enable_bit;
+ msm_write(port, val, UARTDM_DMEN);
+
+ /* Restore interrupts */
+ msm_port->imr |= UART_IMR_RXLEV | UART_IMR_RXSTALE;
+ msm_write(port, msm_port->imr, UART_IMR);
+
+ if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ count = msm_read(port, UARTDM_RX_TOTAL_SNAP);
+
+ port->icount.rx += count;
+
+ dma->count = 0;
+
+ dma_unmap_single(port->dev, dma->phys, UARTDM_RX_SIZE, dma->dir);
+
+ for (i = 0; i < count; i++) {
+ char flag = TTY_NORMAL;
+
+ if (msm_port->break_detected && dma->virt[i] == 0) {
+ port->icount.brk++;
+ flag = TTY_BREAK;
+ msm_port->break_detected = false;
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ flag = TTY_NORMAL;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
+ spin_lock_irqsave(&port->lock, flags);
+ if (!sysrq)
+ tty_insert_flip_char(tport, dma->virt[i], flag);
+ }
+
+ msm_start_rx_dma(msm_port);
+done:
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (count)
+ tty_flip_buffer_push(tport);
+}
+
+static void msm_start_rx_dma(struct msm_port *msm_port)
+{
+ struct msm_dma *dma = &msm_port->rx_dma;
+ struct uart_port *uart = &msm_port->uart;
+ u32 val;
+ int ret;
+
+ if (!dma->chan)
+ return;
+
+ dma->phys = dma_map_single(uart->dev, dma->virt,
+ UARTDM_RX_SIZE, dma->dir);
+ ret = dma_mapping_error(uart->dev, dma->phys);
+ if (ret)
+ return;
+
+ dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys,
+ UARTDM_RX_SIZE, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!dma->desc)
+ goto unmap;
+
+ dma->desc->callback = msm_complete_rx_dma;
+ dma->desc->callback_param = msm_port;
+
+ dma->cookie = dmaengine_submit(dma->desc);
+ ret = dma_submit_error(dma->cookie);
+ if (ret)
+ goto unmap;
+ /*
+ * Using DMA for FIFO off-load, no need for "Rx FIFO over
+ * watermark" or "stale" interrupts, disable them
+ */
+ msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+
+ /*
+ * Well, when DMA is ADM3 engine(implied by <= UARTDM v1.3),
+ * we need RXSTALE to flush input DMA fifo to memory
+ */
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ msm_port->imr |= UART_IMR_RXSTALE;
+
+ msm_write(uart, msm_port->imr, UART_IMR);
+
+ dma->count = UARTDM_RX_SIZE;
+
+ dma_async_issue_pending(dma->chan);
+
+ msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+
+ val = msm_read(uart, UARTDM_DMEN);
+ val |= dma->enable_bit;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ msm_write(uart, val, UARTDM_DMEN);
+
+ msm_write(uart, UARTDM_RX_SIZE, UARTDM_DMRX);
+
+ if (msm_port->is_uartdm > UARTDM_1P3)
+ msm_write(uart, val, UARTDM_DMEN);
+
+ return;
+unmap:
+ dma_unmap_single(uart->dev, dma->phys, UARTDM_RX_SIZE, dma->dir);
}
static void msm_stop_rx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->rx_dma;
msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
msm_write(port, msm_port->imr, UART_IMR);
+
+ if (dma->chan)
+ msm_stop_dma(port, dma);
}
static void msm_enable_ms(struct uart_port *port)
@@ -99,7 +522,7 @@ static void msm_enable_ms(struct uart_port *port)
msm_write(port, msm_port->imr, UART_IMR);
}
-static void handle_rx_dm(struct uart_port *port, unsigned int misr)
+static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
{
struct tty_port *tport = &port->state->port;
unsigned int sr;
@@ -169,9 +592,12 @@ static void handle_rx_dm(struct uart_port *port, unsigned int misr)
msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+
+ /* Try to use DMA */
+ msm_start_rx_dma(msm_port);
}
-static void handle_rx(struct uart_port *port)
+static void msm_handle_rx(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
unsigned int sr;
@@ -224,18 +650,11 @@ static void handle_rx(struct uart_port *port)
spin_lock(&port->lock);
}
-static void reset_dm_count(struct uart_port *port, int count)
-{
- wait_for_xmitr(port);
- msm_write(port, count, UARTDM_NCF_TX);
- msm_read(port, UARTDM_NCF_TX);
-}
-
-static void handle_tx(struct uart_port *port)
+static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
{
struct circ_buf *xmit = &port->state->xmit;
struct msm_port *msm_port = UART_TO_MSM(port);
- unsigned int tx_count, num_chars;
+ unsigned int num_chars;
unsigned int tf_pointer = 0;
void __iomem *tf;
@@ -244,20 +663,8 @@ static void handle_tx(struct uart_port *port)
else
tf = port->membase + UART_TF;
- tx_count = uart_circ_chars_pending(xmit);
- tx_count = min3(tx_count, (unsigned int)UART_XMIT_SIZE - xmit->tail,
- port->fifosize);
-
- if (port->x_char) {
- if (msm_port->is_uartdm)
- reset_dm_count(port, tx_count + 1);
-
- iowrite8_rep(tf, &port->x_char, 1);
- port->icount.tx++;
- port->x_char = 0;
- } else if (tx_count && msm_port->is_uartdm) {
- reset_dm_count(port, tx_count);
- }
+ if (tx_count && msm_port->is_uartdm)
+ msm_reset_dm_count(port, tx_count);
while (tf_pointer < tx_count) {
int i;
@@ -290,7 +697,60 @@ static void handle_tx(struct uart_port *port)
uart_write_wakeup(port);
}
-static void handle_delta_cts(struct uart_port *port)
+static void msm_handle_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ struct circ_buf *xmit = &msm_port->uart.state->xmit;
+ struct msm_dma *dma = &msm_port->tx_dma;
+ unsigned int pio_count, dma_count, dma_min;
+ void __iomem *tf;
+ int err = 0;
+
+ if (port->x_char) {
+ if (msm_port->is_uartdm)
+ tf = port->membase + UARTDM_TF;
+ else
+ tf = port->membase + UART_TF;
+
+ if (msm_port->is_uartdm)
+ msm_reset_dm_count(port, 1);
+
+ iowrite8_rep(tf, &port->x_char, 1);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ msm_stop_tx(port);
+ return;
+ }
+
+ pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ dma_min = 1; /* Always DMA */
+ if (msm_port->is_uartdm > UARTDM_1P3) {
+ dma_count = UARTDM_TX_AIGN(dma_count);
+ dma_min = UARTDM_BURST_SIZE;
+ } else {
+ if (dma_count > UARTDM_TX_MAX)
+ dma_count = UARTDM_TX_MAX;
+ }
+
+ if (pio_count > port->fifosize)
+ pio_count = port->fifosize;
+
+ if (!dma->chan || dma_count < dma_min)
+ msm_handle_tx_pio(port, pio_count);
+ else
+ err = msm_handle_tx_dma(msm_port, dma_count);
+
+ if (err) /* fall back to PIO mode */
+ msm_handle_tx_pio(port, pio_count);
+}
+
+static void msm_handle_delta_cts(struct uart_port *port)
{
msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
port->icount.cts++;
@@ -301,9 +761,12 @@ static irqreturn_t msm_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->rx_dma;
+ unsigned long flags;
unsigned int misr;
+ u32 val;
- spin_lock(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
misr = msm_read(port, UART_MISR);
msm_write(port, 0, UART_IMR); /* disable interrupt */
@@ -313,18 +776,29 @@ static irqreturn_t msm_irq(int irq, void *dev_id)
}
if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
- if (msm_port->is_uartdm)
- handle_rx_dm(port, misr);
- else
- handle_rx(port);
+ if (dma->count) {
+ val = UART_CR_CMD_STALE_EVENT_DISABLE;
+ msm_write(port, val, UART_CR);
+ val = UART_CR_CMD_RESET_STALE_INT;
+ msm_write(port, val, UART_CR);
+ /*
+ * Flush DMA input fifo to memory, this will also
+ * trigger DMA RX completion
+ */
+ dmaengine_terminate_all(dma->chan);
+ } else if (msm_port->is_uartdm) {
+ msm_handle_rx_dm(port, misr);
+ } else {
+ msm_handle_rx(port);
+ }
}
if (misr & UART_IMR_TXLEV)
- handle_tx(port);
+ msm_handle_tx(port);
if (misr & UART_IMR_DELTA_CTS)
- handle_delta_cts(port);
+ msm_handle_delta_cts(port);
msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
- spin_unlock(&port->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
}
@@ -408,6 +882,7 @@ msm_find_best_baud(struct uart_port *port, unsigned int baud)
{ 3, 0xdd, 8 },
{ 2, 0xee, 16 },
{ 1, 0xff, 31 },
+ { 0, 0xff, 31 },
};
divisor = uart_get_divisor(port, baud);
@@ -419,21 +894,41 @@ msm_find_best_baud(struct uart_port *port, unsigned int baud)
return entry; /* Default to smallest divider */
}
-static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
+static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
+ unsigned long *saved_flags)
{
- unsigned int rxstale, watermark;
+ unsigned int rxstale, watermark, mask;
struct msm_port *msm_port = UART_TO_MSM(port);
const struct msm_baud_map *entry;
+ unsigned long flags;
entry = msm_find_best_baud(port, baud);
msm_write(port, entry->code, UART_CSR);
+ if (baud > 460800)
+ port->uartclk = baud * 16;
+
+ flags = *saved_flags;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ clk_set_rate(msm_port->clk, port->uartclk);
+
+ spin_lock_irqsave(&port->lock, flags);
+ *saved_flags = flags;
+
/* RX stale watermark */
rxstale = entry->rxstale;
watermark = UART_IPR_STALE_LSB & rxstale;
- watermark |= UART_IPR_RXSTALE_LAST;
- watermark |= UART_IPR_STALE_TIMEOUT_MSB & (rxstale << 2);
+ if (msm_port->is_uartdm) {
+ mask = UART_DM_IPR_STALE_TIMEOUT_MSB;
+ } else {
+ watermark |= UART_IPR_RXSTALE_LAST;
+ mask = UART_IPR_STALE_TIMEOUT_MSB;
+ }
+
+ watermark |= mask & (rxstale << 2);
+
msm_write(port, watermark, UART_IPR);
/* set RX watermark */
@@ -476,7 +971,7 @@ static void msm_init_clock(struct uart_port *port)
static int msm_startup(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
- unsigned int data, rfr_level;
+ unsigned int data, rfr_level, mask;
int ret;
snprintf(msm_port->name, sizeof(msm_port->name),
@@ -496,11 +991,23 @@ static int msm_startup(struct uart_port *port)
/* set automatic RFR level */
data = msm_read(port, UART_MR1);
- data &= ~UART_MR1_AUTO_RFR_LEVEL1;
+
+ if (msm_port->is_uartdm)
+ mask = UART_DM_MR1_AUTO_RFR_LEVEL1;
+ else
+ mask = UART_MR1_AUTO_RFR_LEVEL1;
+
+ data &= ~mask;
data &= ~UART_MR1_AUTO_RFR_LEVEL0;
- data |= UART_MR1_AUTO_RFR_LEVEL1 & (rfr_level << 2);
+ data |= mask & (rfr_level << 2);
data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
msm_write(port, data, UART_MR1);
+
+ if (msm_port->is_uartdm) {
+ msm_request_tx_dma(msm_port, msm_port->uart.mapbase);
+ msm_request_rx_dma(msm_port, msm_port->uart.mapbase);
+ }
+
return 0;
}
@@ -511,6 +1018,9 @@ static void msm_shutdown(struct uart_port *port)
msm_port->imr = 0;
msm_write(port, 0, UART_IMR); /* disable interrupts */
+ if (msm_port->is_uartdm)
+ msm_release_dma(msm_port);
+
clk_disable_unprepare(msm_port->clk);
free_irq(port->irq, port);
@@ -519,14 +1029,19 @@ static void msm_shutdown(struct uart_port *port)
static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->rx_dma;
unsigned long flags;
unsigned int baud, mr;
spin_lock_irqsave(&port->lock, flags);
+ if (dma->chan) /* Terminate if any */
+ msm_stop_dma(port, dma);
+
/* calculate and set baud rate */
- baud = uart_get_baud_rate(port, termios, old, 300, 115200);
- baud = msm_set_baud_rate(port, baud);
+ baud = uart_get_baud_rate(port, termios, old, 300, 4000000);
+ baud = msm_set_baud_rate(port, baud, &flags);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
@@ -588,6 +1103,9 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
uart_update_timeout(port, termios->c_cflag, baud);
+ /* Try to use DMA */
+ msm_start_rx_dma(msm_port);
+
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -765,7 +1283,7 @@ static void msm_poll_put_char(struct uart_port *port, unsigned char c)
msm_write(port, 0, UART_IMR);
if (msm_port->is_uartdm)
- reset_dm_count(port, 1);
+ msm_reset_dm_count(port, 1);
/* Wait until FIFO is empty */
while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
@@ -839,7 +1357,7 @@ static struct msm_port msm_uart_ports[] = {
#define UART_NR ARRAY_SIZE(msm_uart_ports)
-static inline struct uart_port *get_port_from_line(unsigned int line)
+static inline struct uart_port *msm_get_port_from_line(unsigned int line)
{
return &msm_uart_ports[line].uart;
}
@@ -866,7 +1384,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
spin_lock(&port->lock);
if (is_uartdm)
- reset_dm_count(port, count);
+ msm_reset_dm_count(port, count);
i = 0;
while (i < count) {
@@ -911,7 +1429,7 @@ static void msm_console_write(struct console *co, const char *s,
BUG_ON(co->index < 0 || co->index >= UART_NR);
- port = get_port_from_line(co->index);
+ port = msm_get_port_from_line(co->index);
msm_port = UART_TO_MSM(port);
__msm_console_write(port, s, count, msm_port->is_uartdm);
@@ -928,7 +1446,7 @@ static int __init msm_console_setup(struct console *co, char *options)
if (unlikely(co->index >= UART_NR || co->index < 0))
return -ENXIO;
- port = get_port_from_line(co->index);
+ port = msm_get_port_from_line(co->index);
if (unlikely(!port->membase))
return -ENXIO;
@@ -1043,7 +1561,7 @@ static int msm_serial_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "msm_serial: detected port #%d\n", line);
- port = get_port_from_line(line);
+ port = msm_get_port_from_line(line);
port->dev = &pdev->dev;
msm_port = UART_TO_MSM(port);
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 737f69fe7113..bc1d7b39eba8 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -20,11 +20,12 @@
#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
-#define UART_MR1_RX_RDY_CTL (1 << 7)
-#define UART_MR1_CTS_CTL (1 << 6)
+#define UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
+#define UART_MR1_RX_RDY_CTL BIT(7)
+#define UART_MR1_CTS_CTL BIT(6)
#define UART_MR2 0x0004
-#define UART_MR2_ERROR_MODE (1 << 6)
+#define UART_MR2_ERROR_MODE BIT(6)
#define UART_MR2_BITS_PER_CHAR 0x30
#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
@@ -58,26 +59,28 @@
#define UART_CR_CMD_SET_RFR (13 << 4)
#define UART_CR_CMD_RESET_RFR (14 << 4)
#define UART_CR_CMD_PROTECTION_EN (16 << 4)
-#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
+#define UART_CR_CMD_STALE_EVENT_ENABLE (5 << 8)
#define UART_CR_CMD_FORCE_STALE (4 << 8)
#define UART_CR_CMD_RESET_TX_READY (3 << 8)
-#define UART_CR_TX_DISABLE (1 << 3)
-#define UART_CR_TX_ENABLE (1 << 2)
-#define UART_CR_RX_DISABLE (1 << 1)
-#define UART_CR_RX_ENABLE (1 << 0)
+#define UART_CR_TX_DISABLE BIT(3)
+#define UART_CR_TX_ENABLE BIT(2)
+#define UART_CR_RX_DISABLE BIT(1)
+#define UART_CR_RX_ENABLE BIT(0)
#define UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
#define UART_IMR 0x0014
-#define UART_IMR_TXLEV (1 << 0)
-#define UART_IMR_RXSTALE (1 << 3)
-#define UART_IMR_RXLEV (1 << 4)
-#define UART_IMR_DELTA_CTS (1 << 5)
-#define UART_IMR_CURRENT_CTS (1 << 6)
-#define UART_IMR_RXBREAK_START (1 << 10)
+#define UART_IMR_TXLEV BIT(0)
+#define UART_IMR_RXSTALE BIT(3)
+#define UART_IMR_RXLEV BIT(4)
+#define UART_IMR_DELTA_CTS BIT(5)
+#define UART_IMR_CURRENT_CTS BIT(6)
+#define UART_IMR_RXBREAK_START BIT(10)
#define UART_IPR_RXSTALE_LAST 0x20
#define UART_IPR_STALE_LSB 0x1F
#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+#define UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
#define UART_IPR 0x0018
#define UART_TFWR 0x001C
@@ -96,20 +99,20 @@
#define UART_TEST_CTRL 0x0050
#define UART_SR 0x0008
-#define UART_SR_HUNT_CHAR (1 << 7)
-#define UART_SR_RX_BREAK (1 << 6)
-#define UART_SR_PAR_FRAME_ERR (1 << 5)
-#define UART_SR_OVERRUN (1 << 4)
-#define UART_SR_TX_EMPTY (1 << 3)
-#define UART_SR_TX_READY (1 << 2)
-#define UART_SR_RX_FULL (1 << 1)
-#define UART_SR_RX_READY (1 << 0)
+#define UART_SR_HUNT_CHAR BIT(7)
+#define UART_SR_RX_BREAK BIT(6)
+#define UART_SR_PAR_FRAME_ERR BIT(5)
+#define UART_SR_OVERRUN BIT(4)
+#define UART_SR_TX_EMPTY BIT(3)
+#define UART_SR_TX_READY BIT(2)
+#define UART_SR_RX_FULL BIT(1)
+#define UART_SR_RX_READY BIT(0)
#define UART_RF 0x000C
#define UARTDM_RF 0x0070
#define UART_MISR 0x0010
#define UART_ISR 0x0014
-#define UART_ISR_TX_READY (1 << 7)
+#define UART_ISR_TX_READY BIT(7)
#define UARTDM_RXFS 0x50
#define UARTDM_RXFS_BUF_SHIFT 0x7
@@ -119,6 +122,12 @@
#define UARTDM_DMEN_RX_SC_ENABLE BIT(5)
#define UARTDM_DMEN_TX_SC_ENABLE BIT(4)
+#define UARTDM_DMEN_TX_BAM_ENABLE BIT(2) /* UARTDM_1P4 */
+#define UARTDM_DMEN_TX_DM_ENABLE BIT(0) /* < UARTDM_1P4 */
+
+#define UARTDM_DMEN_RX_BAM_ENABLE BIT(3) /* UARTDM_1P4 */
+#define UARTDM_DMEN_RX_DM_ENABLE BIT(1) /* < UARTDM_1P4 */
+
#define UARTDM_DMRX 0x34
#define UARTDM_NCF_TX 0x40
#define UARTDM_RX_TOTAL_SNAP 0x38