aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2017-07-24 13:45:42 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2017-07-24 13:45:42 +1000
commitc402a00b70c14f1a381a54431264ca78fb524290 (patch)
tree1fa1318bacdec662d5ad0a70415015f943b54d57
parentfcb562096b909cad5f8f080973530bdfbebdc9c1 (diff)
parentd4381f3cff0b2812044d79ce5ae867503cc48e0a (diff)
Merge remote-tracking branch 'slave-dma/next'
-rw-r--r--drivers/dma/Kconfig6
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/altera-msgdma.c1031
-rw-r--r--drivers/dma/bcm-sba-raid.c6
-rw-r--r--drivers/dma/dmatest.c88
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/of-dma.c8
-rw-r--r--drivers/dma/ppc4xx/adma.c37
-rw-r--r--drivers/dma/qcom/hidma.c37
-rw-r--r--drivers/dma/qcom/hidma.h7
-rw-r--r--drivers/dma/qcom/hidma_ll.c11
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c14
12 files changed, 1181 insertions, 67 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fa8f9c07ce73..fadc4d8783bd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -56,6 +56,12 @@ config DMA_OF
select DMA_ENGINE
#devices
+config ALTERA_MSGDMA
+ tristate "Altera / Intel mSGDMA Engine"
+ select DMA_ENGINE
+ help
+ Enable support for Altera / Intel mSGDMA controller.
+
config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
depends on ARM_AMBA
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index d12ab2985ed1..f08f8de1b567 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_DMA_OF) += of-dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
#devices
+obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
new file mode 100644
index 000000000000..33b87b413793
--- /dev/null
+++ b/drivers/dma/altera-msgdma.c
@@ -0,0 +1,1031 @@
+/*
+ * DMA driver for Altera mSGDMA IP core
+ *
+ * Copyright (C) 2017 Stefan Roese <sr@denx.de>
+ *
+ * Based on drivers/dma/xilinx/zynqmp_dma.c, which is:
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+
+#define MSGDMA_MAX_TRANS_LEN U32_MAX
+#define MSGDMA_DESC_NUM 1024
+
+/**
+ * struct msgdma_extended_desc - implements an extended descriptor
+ * @read_addr_lo: data buffer source address low bits
+ * @write_addr_lo: data buffer destination address low bits
+ * @len: the number of bytes to transfer per descriptor
+ * @burst_seq_num: bit 31:24 write burst
+ * bit 23:16 read burst
+ * bit 15:00 sequence number
+ * @stride: bit 31:16 write stride
+ * bit 15:00 read stride
+ * @read_addr_hi: data buffer source address high bits
+ * @write_addr_hi: data buffer destination address high bits
+ * @control: characteristics of the transfer
+ */
+struct msgdma_extended_desc {
+ u32 read_addr_lo;
+ u32 write_addr_lo;
+ u32 len;
+ u32 burst_seq_num;
+ u32 stride;
+ u32 read_addr_hi;
+ u32 write_addr_hi;
+ u32 control;
+};
+
+/* mSGDMA descriptor control field bit definitions */
+#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
+#define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
+#define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
+#define MSGDMA_DESC_CTL_PARK_READS BIT(10)
+#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
+#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
+#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
+#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
+#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
+#define MSGDMA_DESC_CTL_TR_ERR_IRQ GENMASK(23, 16)
+#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
+
+/*
+ * Writing "1" the "go" bit commits the entire descriptor into the
+ * descriptor FIFO(s)
+ */
+#define MSGDMA_DESC_CTL_GO BIT(31)
+
+/* Tx buffer control flags */
+#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
+ MSGDMA_DESC_CTL_GEN_EOP | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
+ MSGDMA_DESC_CTL_END_ON_LEN | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_EARLY_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+/* mSGDMA extended descriptor stride definitions */
+#define MSGDMA_DESC_STRIDE_RD 0x00000001
+#define MSGDMA_DESC_STRIDE_WR 0x00010000
+#define MSGDMA_DESC_STRIDE_RW 0x00010001
+
+/**
+ * struct msgdma_csr - mSGDMA dispatcher control and status register map
+ * @status: Read/Clear
+ * @control: Read/Write
+ * @rw_fill_level: bit 31:16 - write fill level
+ * bit 15:00 - read fill level
+ * @resp_fill_level: bit 15:00 - response FIFO fill level
+ * @rw_seq_num: bit 31:16 - write sequence number
+ * bit 15:00 - read sequence number
+ * @pad: reserved
+ */
+struct msgdma_csr {
+ u32 status;
+ u32 control;
+ u32 rw_fill_level;
+ u32 resp_fill_level;
+ u32 rw_seq_num;
+ u32 pad[3];
+};
+
+/* mSGDMA CSR status register bit definitions */
+#define MSGDMA_CSR_STAT_BUSY BIT(0)
+#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
+#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
+#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
+#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
+#define MSGDMA_CSR_STAT_STOPPED BIT(5)
+#define MSGDMA_CSR_STAT_RESETTING BIT(6)
+#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
+#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
+#define MSGDMA_CSR_STAT_IRQ BIT(9)
+#define MSGDMA_CSR_STAT_MASK GENMASK(9, 0)
+#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ GENMASK(8, 0)
+
+#define DESC_EMPTY (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
+ MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
+
+/* mSGDMA CSR control register bit definitions */
+#define MSGDMA_CSR_CTL_STOP BIT(0)
+#define MSGDMA_CSR_CTL_RESET BIT(1)
+#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
+#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
+#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
+#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
+
+/* mSGDMA CSR fill level bits */
+#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
+#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
+#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
+
+#define MSGDMA_CSR_SEQ_NUM_GET(v) (((v) & 0xffff0000) >> 16)
+
+/* mSGDMA response register map */
+struct msgdma_response {
+ u32 bytes_transferred;
+ u32 status;
+};
+
+/* mSGDMA response register bit definitions */
+#define MSGDMA_RESP_EARLY_TERM BIT(8)
+#define MSGDMA_RESP_ERR_MASK 0xff
+
+/**
+ * struct msgdma_sw_desc - implements a sw descriptor
+ * @async_tx: support for the async_tx api
+ * @hw_desc: assosiated HW descriptor
+ * @free_list: node of the free SW descriprots list
+ */
+struct msgdma_sw_desc {
+ struct dma_async_tx_descriptor async_tx;
+ struct msgdma_extended_desc hw_desc;
+ struct list_head node;
+ struct list_head tx_list;
+};
+
+/**
+ * struct msgdma_device - DMA device structure
+ */
+struct msgdma_device {
+ spinlock_t lock;
+ struct device *dev;
+ struct tasklet_struct irq_tasklet;
+ struct list_head pending_list;
+ struct list_head free_list;
+ struct list_head active_list;
+ struct list_head done_list;
+ u32 desc_free_cnt;
+ bool idle;
+
+ struct dma_device dmadev;
+ struct dma_chan dmachan;
+ dma_addr_t hw_desq;
+ struct msgdma_sw_desc *sw_desq;
+ unsigned int npendings;
+
+ struct dma_slave_config slave_cfg;
+
+ int irq;
+
+ /* mSGDMA controller */
+ struct msgdma_csr *csr;
+
+ /* mSGDMA descriptors */
+ struct msgdma_extended_desc *desc;
+
+ /* mSGDMA response */
+ struct msgdma_response *resp;
+};
+
+#define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan)
+#define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx)
+
+/**
+ * msgdma_get_descriptor - Get the sw descriptor from the pool
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ *
+ * Return: The sw descriptor
+ */
+static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
+{
+ struct msgdma_sw_desc *desc;
+
+ spin_lock_bh(&mdev->lock);
+ desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
+ list_del(&desc->node);
+ spin_unlock_bh(&mdev->lock);
+
+ INIT_LIST_HEAD(&desc->tx_list);
+
+ return desc;
+}
+
+/**
+ * msgdma_free_descriptor - Issue pending transactions
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ * @desc: Transaction descriptor pointer
+ */
+static void msgdma_free_descriptor(struct msgdma_device *mdev,
+ struct msgdma_sw_desc *desc)
+{
+ struct msgdma_sw_desc *child, *next;
+
+ mdev->desc_free_cnt++;
+ list_add_tail(&desc->node, &mdev->free_list);
+ list_for_each_entry_safe(child, next, &desc->tx_list, node) {
+ mdev->desc_free_cnt++;
+ list_move_tail(&child->node, &mdev->free_list);
+ }
+}
+
+/**
+ * msgdma_free_desc_list - Free descriptors list
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ * @list: List to parse and delete the descriptor
+ */
+static void msgdma_free_desc_list(struct msgdma_device *mdev,
+ struct list_head *list)
+{
+ struct msgdma_sw_desc *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node)
+ msgdma_free_descriptor(mdev, desc);
+}
+
+/**
+ * msgdma_desc_config - Configure the descriptor
+ * @desc: Hw descriptor pointer
+ * @dst: Destination buffer address
+ * @src: Source buffer address
+ * @len: Transfer length
+ */
+static void msgdma_desc_config(struct msgdma_extended_desc *desc,
+ dma_addr_t dst, dma_addr_t src, size_t len,
+ u32 stride)
+{
+ /* Set lower 32bits of src & dst addresses in the descriptor */
+ desc->read_addr_lo = lower_32_bits(src);
+ desc->write_addr_lo = lower_32_bits(dst);
+
+ /* Set upper 32bits of src & dst addresses in the descriptor */
+ desc->read_addr_hi = upper_32_bits(src);
+ desc->write_addr_hi = upper_32_bits(dst);
+
+ desc->len = len;
+ desc->stride = stride;
+ desc->burst_seq_num = 0; /* 0 will result in max burst length */
+
+ /*
+ * Don't set interrupt on xfer end yet, this will be done later
+ * for the "last" descriptor
+ */
+ desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
+ MSGDMA_DESC_CTL_END_ON_LEN;
+}
+
+/**
+ * msgdma_desc_config_eod - Mark the descriptor as end descriptor
+ * @desc: Hw descriptor pointer
+ */
+static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
+{
+ desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
+}
+
+/**
+ * msgdma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor pointer
+ *
+ * Return: cookie value
+ */
+static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct msgdma_device *mdev = to_mdev(tx->chan);
+ struct msgdma_sw_desc *new;
+ dma_cookie_t cookie;
+
+ new = tx_to_desc(tx);
+ spin_lock_bh(&mdev->lock);
+ cookie = dma_cookie_assign(tx);
+
+ list_add_tail(&new->node, &mdev->pending_list);
+ spin_unlock_bh(&mdev->lock);
+
+ return cookie;
+}
+
+/**
+ * msgdma_prep_memcpy - prepare descriptors for memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: Destination buffer address
+ * @dma_src: Source buffer address
+ * @len: Transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, ulong flags)
+{
+ struct msgdma_device *mdev = to_mdev(dchan);
+ struct msgdma_sw_desc *new, *first = NULL;
+ struct msgdma_extended_desc *desc;
+ size_t copy;
+ u32 desc_cnt;
+
+ desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&mdev->lock);
+ if (desc_cnt > mdev->desc_free_cnt) {
+ spin_unlock_bh(&mdev->lock);
+ dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
+ return NULL;
+ }
+ mdev->desc_free_cnt -= desc_cnt;
+ spin_unlock_bh(&mdev->lock);
+
+ do {
+ /* Allocate and populate the descriptor */
+ new = msgdma_get_descriptor(mdev);
+
+ copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
+ desc = &new->hw_desc;
+ msgdma_desc_config(desc, dma_dst, dma_src, copy,
+ MSGDMA_DESC_STRIDE_RW);
+ len -= copy;
+ dma_src += copy;
+ dma_dst += copy;
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ msgdma_desc_config_eod(desc);
+ async_tx_ack(&first->async_tx);
+ first->async_tx.flags = flags;
+
+ return &first->async_tx;
+}
+
+/**
+ * msgdma_prep_sg - prepare descriptors for a memory sg transaction
+ * @dchan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+msgdma_prep_sg(struct dma_chan *dchan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct msgdma_device *mdev = to_mdev(dchan);
+ struct msgdma_sw_desc *new, *first = NULL;
+ void *desc = NULL;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+ u32 desc_cnt = 0, i;
+ struct scatterlist *sg;
+
+ for_each_sg(src_sg, sg, src_sg_len, i)
+ desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&mdev->lock);
+ if (desc_cnt > mdev->desc_free_cnt) {
+ spin_unlock_bh(&mdev->lock);
+ dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
+ return NULL;
+ }
+ mdev->desc_free_cnt -= desc_cnt;
+ spin_unlock_bh(&mdev->lock);
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ new = msgdma_get_descriptor(mdev);
+
+ desc = &new->hw_desc;
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
+ if (len == 0)
+ goto fetch;
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+
+ msgdma_desc_config(desc, dma_dst, dma_src, len,
+ MSGDMA_DESC_STRIDE_RW);
+ dst_avail -= len;
+ src_avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ msgdma_desc_config_eod(desc);
+ first->async_tx.flags = flags;
+
+ return &first->async_tx;
+}
+
+/**
+ * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction
+ *
+ * @dchan: DMA channel
+ * @sgl: Destination scatter list
+ * @sg_len: Number of entries in destination scatter list
+ * @dir: DMA transfer direction
+ * @flags: transfer ack flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *
+msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+
+{
+ struct msgdma_device *mdev = to_mdev(dchan);
+ struct dma_slave_config *cfg = &mdev->slave_cfg;
+ struct msgdma_sw_desc *new, *first = NULL;
+ void *desc = NULL;
+ size_t len, avail;
+ dma_addr_t dma_dst, dma_src;
+ u32 desc_cnt = 0, i;
+ struct scatterlist *sg;
+ u32 stride;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&mdev->lock);
+ if (desc_cnt > mdev->desc_free_cnt) {
+ spin_unlock_bh(&mdev->lock);
+ dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
+ return NULL;
+ }
+ mdev->desc_free_cnt -= desc_cnt;
+ spin_unlock_bh(&mdev->lock);
+
+ avail = sg_dma_len(sgl);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ new = msgdma_get_descriptor(mdev);
+
+ desc = &new->hw_desc;
+ len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
+ dma_dst = cfg->dst_addr;
+ stride = MSGDMA_DESC_STRIDE_RD;
+ } else {
+ dma_src = cfg->src_addr;
+ dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
+ stride = MSGDMA_DESC_STRIDE_WR;
+ }
+ msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
+ avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+
+ /* Fetch the next scatterlist entry */
+ if (avail == 0) {
+ if (sg_len == 0)
+ break;
+ sgl = sg_next(sgl);
+ if (sgl == NULL)
+ break;
+ sg_len--;
+ avail = sg_dma_len(sgl);
+ }
+ }
+
+ msgdma_desc_config_eod(desc);
+ first->async_tx.flags = flags;
+
+ return &first->async_tx;
+}
+
+static int msgdma_dma_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct msgdma_device *mdev = to_mdev(dchan);
+
+ memcpy(&mdev->slave_cfg, config, sizeof(*config));
+
+ return 0;
+}
+
+static void msgdma_reset(struct msgdma_device *mdev)
+{
+ u32 val;
+ int ret;
+
+ /* Reset mSGDMA */
+ iowrite32(MSGDMA_CSR_STAT_MASK, &mdev->csr->status);
+ iowrite32(MSGDMA_CSR_CTL_RESET, &mdev->csr->control);
+
+ ret = readl_poll_timeout(&mdev->csr->status, val,
+ (val & MSGDMA_CSR_STAT_RESETTING) == 0,
+ 1, 10000);
+ if (ret)
+ dev_err(mdev->dev, "DMA channel did not reset\n");
+
+ /* Clear all status bits */
+ iowrite32(MSGDMA_CSR_STAT_MASK, &mdev->csr->status);
+
+ /* Enable the DMA controller including interrupts */
+ iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
+ MSGDMA_CSR_CTL_GLOBAL_INTR, &mdev->csr->control);
+
+ mdev->idle = true;
+};
+
+static void msgdma_copy_one(struct msgdma_device *mdev,
+ struct msgdma_sw_desc *desc)
+{
+ struct msgdma_extended_desc *hw_desc = mdev->desc;
+
+ /*
+ * Check if the DESC FIFO it not full. If its full, we need to wait
+ * for at least one entry to become free again
+ */
+ while (ioread32(&mdev->csr->status) & MSGDMA_CSR_STAT_DESC_BUF_FULL)
+ mdelay(1);
+
+ /*
+ * The descriptor needs to get copied into the descriptor FIFO
+ * of the DMA controller. The descriptor will get flushed to the
+ * FIFO, once the last word (control word) is written. Since we
+ * are not 100% sure that memcpy() writes all word in the "correct"
+ * oder (address from low to high) on all architectures, we make
+ * sure this control word is written last by single coding it and
+ * adding some write-barriers here.
+ */
+ memcpy(hw_desc, &desc->hw_desc, sizeof(desc->hw_desc) - sizeof(u32));
+
+ /* Write control word last to flush this descriptor into the FIFO */
+ mdev->idle = false;
+ wmb();
+ iowrite32(desc->hw_desc.control, &hw_desc->control);
+ wmb();
+}
+
+/**
+ * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ * @desc: Transaction descriptor pointer
+ */
+static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
+ struct msgdma_sw_desc *desc)
+{
+ struct msgdma_sw_desc *sdesc, *next;
+
+ msgdma_copy_one(mdev, desc);
+
+ list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
+ msgdma_copy_one(mdev, sdesc);
+}
+
+/**
+ * msgdma_start_transfer - Initiate the new transfer
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ */
+static void msgdma_start_transfer(struct msgdma_device *mdev)
+{
+ struct msgdma_sw_desc *desc;
+
+ if (!mdev->idle)
+ return;
+
+ desc = list_first_entry_or_null(&mdev->pending_list,
+ struct msgdma_sw_desc, node);
+ if (!desc)
+ return;
+
+ list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
+ msgdma_copy_desc_to_fifo(mdev, desc);
+}
+
+/**
+ * msgdma_issue_pending - Issue pending transactions
+ * @chan: DMA channel pointer
+ */
+static void msgdma_issue_pending(struct dma_chan *chan)
+{
+ struct msgdma_device *mdev = to_mdev(chan);
+
+ spin_lock_bh(&mdev->lock);
+ msgdma_start_transfer(mdev);
+ spin_unlock_bh(&mdev->lock);
+}
+
+/**
+ * msgdma_chan_desc_cleanup - Cleanup the completed descriptors
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ */
+static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
+{
+ struct msgdma_sw_desc *desc, *next;
+
+ list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ list_del(&desc->node);
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock(&mdev->lock);
+ callback(callback_param);
+ spin_lock(&mdev->lock);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+ msgdma_free_descriptor(mdev, desc);
+ }
+}
+
+/**
+ * msgdma_complete_descriptor - Mark the active descriptor as complete
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ */
+static void msgdma_complete_descriptor(struct msgdma_device *mdev)
+{
+ struct msgdma_sw_desc *desc;
+
+ desc = list_first_entry_or_null(&mdev->active_list,
+ struct msgdma_sw_desc, node);
+ if (!desc)
+ return;
+ list_del(&desc->node);
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &mdev->done_list);
+}
+
+/**
+ * msgdma_free_descriptors - Free channel descriptors
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ */
+static void msgdma_free_descriptors(struct msgdma_device *mdev)
+{
+ msgdma_free_desc_list(mdev, &mdev->active_list);
+ msgdma_free_desc_list(mdev, &mdev->pending_list);
+ msgdma_free_desc_list(mdev, &mdev->done_list);
+}
+
+/**
+ * msgdma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel pointer
+ */
+static void msgdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct msgdma_device *mdev = to_mdev(dchan);
+
+ spin_lock_bh(&mdev->lock);
+ msgdma_free_descriptors(mdev);
+ spin_unlock_bh(&mdev->lock);
+ kfree(mdev->sw_desq);
+}
+
+/**
+ * msgdma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: Number of descriptors on success and failure value on error
+ */
+static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct msgdma_device *mdev = to_mdev(dchan);
+ struct msgdma_sw_desc *desc;
+ int i;
+
+ mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
+ if (!mdev->sw_desq)
+ return -ENOMEM;
+
+ mdev->idle = true;
+ mdev->desc_free_cnt = MSGDMA_DESC_NUM;
+
+ INIT_LIST_HEAD(&mdev->free_list);
+
+ for (i = 0; i < MSGDMA_DESC_NUM; i++) {
+ desc = mdev->sw_desq + i;
+ dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
+ desc->async_tx.tx_submit = msgdma_tx_submit;
+ list_add_tail(&desc->node, &mdev->free_list);
+ }
+
+ return MSGDMA_DESC_NUM;
+}
+
+/**
+ * msgdma_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Altera sSGDMA channel structure
+ */
+static void msgdma_tasklet(unsigned long data)
+{
+ struct msgdma_device *mdev = (struct msgdma_device *)data;
+ u32 count;
+ u32 size;
+ u32 status;
+
+ spin_lock(&mdev->lock);
+
+ /* Read number of responses that are available */
+ count = ioread32(&mdev->csr->resp_fill_level);
+ dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
+ __func__, __LINE__, count);
+
+ while (count--) {
+ /*
+ * Read both longwords to purge this response from the FIFO
+ * On Avalon-MM implementations, size and status do not
+ * have any real values, like transferred bytes or error
+ * bits. So we need to just drop these values.
+ */
+ size = ioread32(&mdev->resp->bytes_transferred);
+ status = ioread32(&mdev->resp->status);
+
+ msgdma_complete_descriptor(mdev);
+ msgdma_chan_desc_cleanup(mdev);
+ }
+
+ spin_unlock(&mdev->lock);
+}
+
+/**
+ * msgdma_irq_handler - Altera mSGDMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Altera mSGDMA device structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t msgdma_irq_handler(int irq, void *data)
+{
+ struct msgdma_device *mdev = data;
+ u32 status;
+
+ status = ioread32(&mdev->csr->status);
+ if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
+ /* Start next transfer if the DMA controller is idle */
+ spin_lock(&mdev->lock);
+ mdev->idle = true;
+ msgdma_start_transfer(mdev);
+ spin_unlock(&mdev->lock);
+ }
+
+ tasklet_schedule(&mdev->irq_tasklet);
+
+ /* Clear interrupt in mSGDMA controller */
+ iowrite32(MSGDMA_CSR_STAT_IRQ, &mdev->csr->status);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * msgdma_chan_remove - Channel remove function
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ */
+static void msgdma_dev_remove(struct msgdma_device *mdev)
+{
+ if (!mdev)
+ return;
+
+ devm_free_irq(mdev->dev, mdev->irq, mdev);
+ tasklet_kill(&mdev->irq_tasklet);
+ list_del(&mdev->dmachan.device_node);
+}
+
+static int request_and_map(struct platform_device *pdev, const char *name,
+ struct resource **res, void __iomem **ptr)
+{
+ struct resource *region;
+ struct device *device = &pdev->dev;
+
+ *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (*res == NULL) {
+ dev_err(device, "resource %s not defined\n", name);
+ return -ENODEV;
+ }
+
+ region = devm_request_mem_region(device, (*res)->start,
+ resource_size(*res), dev_name(device));
+ if (region == NULL) {
+ dev_err(device, "unable to request %s\n", name);
+ return -EBUSY;
+ }
+
+ *ptr = devm_ioremap_nocache(device, region->start,
+ resource_size(region));
+ if (*ptr == NULL) {
+ dev_err(device, "ioremap_nocache of %s failed!", name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * msgdma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int msgdma_probe(struct platform_device *pdev)
+{
+ struct msgdma_device *mdev;
+ struct dma_device *dma_dev;
+ struct resource *dma_res;
+ int ret;
+
+ mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->dev = &pdev->dev;
+
+ /* Map CSR space */
+ ret = request_and_map(pdev, "csr", &dma_res, (void **)&mdev->csr);
+ if (ret)
+ return ret;
+
+ /* Map (extended) descriptor space */
+ ret = request_and_map(pdev, "desc", &dma_res, (void **)&mdev->desc);
+ if (ret)
+ return ret;
+
+ /* Map response space */
+ ret = request_and_map(pdev, "resp", &dma_res, (void **)&mdev->resp);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mdev);
+
+ /* Get interrupt nr from platform data */
+ mdev->irq = platform_get_irq(pdev, 0);
+ if (mdev->irq < 0)
+ return -ENXIO;
+
+ ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
+ 0, dev_name(&pdev->dev), mdev);
+ if (ret)
+ return ret;
+
+ tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev);
+
+ dma_cookie_init(&mdev->dmachan);
+
+ spin_lock_init(&mdev->lock);
+
+ INIT_LIST_HEAD(&mdev->active_list);
+ INIT_LIST_HEAD(&mdev->pending_list);
+ INIT_LIST_HEAD(&mdev->done_list);
+ INIT_LIST_HEAD(&mdev->free_list);
+
+ dma_dev = &mdev->dmadev;
+
+ /* Set DMA capabilities */
+ dma_cap_zero(dma_dev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SG, dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
+ BIT(DMA_MEM_TO_MEM);
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ /* Init DMA link list */
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* Set base routines */
+ dma_dev->device_tx_status = dma_cookie_status;
+ dma_dev->device_issue_pending = msgdma_issue_pending;
+ dma_dev->dev = &pdev->dev;
+
+ dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
+ dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
+ dma_dev->device_prep_dma_sg = msgdma_prep_sg;
+ dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
+ dma_dev->device_config = msgdma_dma_config;
+
+ dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
+
+ mdev->dmachan.device = dma_dev;
+ list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
+
+ /* Set DMA mask to 64 bits */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_warn(&pdev->dev, "unable to set coherent mask to 64");
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
+ }
+
+ msgdma_reset(mdev);
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto fail;
+
+ dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
+
+ return 0;
+
+fail:
+ msgdma_dev_remove(mdev);
+
+ return ret;
+}
+
+/**
+ * msgdma_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int msgdma_remove(struct platform_device *pdev)
+{
+ struct msgdma_device *mdev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&mdev->dmadev);
+ msgdma_dev_remove(mdev);
+
+ dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
+
+ return 0;
+}
+
+static struct platform_driver msgdma_driver = {
+ .driver = {
+ .name = "altera-msgdma",
+ },
+ .probe = msgdma_probe,
+ .remove = msgdma_remove,
+};
+
+module_platform_driver(msgdma_driver);
+
+MODULE_ALIAS("platform:altera-msgdma");
+MODULE_DESCRIPTION("Altera mSGDMA driver");
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index e41bbc7cb094..8246c26eabe5 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -782,7 +782,7 @@ static void sba_fillup_xor_msg(struct sba_request *req,
msg->error = 0;
}
-struct sba_request *
+static struct sba_request *
sba_prep_dma_xor_req(struct sba_device *sba,
dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
u32 src_cnt, size_t len, unsigned long flags)
@@ -991,7 +991,7 @@ static void sba_fillup_pq_msg(struct sba_request *req,
msg->error = 0;
}
-struct sba_request *
+static struct sba_request *
sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
@@ -1243,7 +1243,7 @@ skip_q:
msg->error = 0;
}
-struct sba_request *
+static struct sba_request *
sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
dma_addr_t *dst_p, dma_addr_t *dst_q,
dma_addr_t src, u8 scf, size_t len,
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a07ef3d6b3ec..35cb83b39192 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -60,7 +60,7 @@ MODULE_PARM_DESC(sg_buffers,
static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dmatest,
- "dmatest 0-memcpy 1-slave_sg (default: 0)");
+ "dmatest 0-memcpy 1-slave_sg 2-memset (default: 0)");
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
@@ -158,6 +158,7 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
#define PATTERN_COPY 0x40
#define PATTERN_OVERWRITE 0x20
#define PATTERN_COUNT_MASK 0x1f
+#define PATTERN_MEMSET_IDX 0x01
struct dmatest_thread {
struct list_head node;
@@ -239,46 +240,62 @@ static unsigned long dmatest_random(void)
return buf;
}
+static inline u8 gen_inv_idx(u8 index, bool is_memset)
+{
+ u8 val = is_memset ? PATTERN_MEMSET_IDX : index;
+
+ return ~val & PATTERN_COUNT_MASK;
+}
+
+static inline u8 gen_src_value(u8 index, bool is_memset)
+{
+ return PATTERN_SRC | gen_inv_idx(index, is_memset);
+}
+
+static inline u8 gen_dst_value(u8 index, bool is_memset)
+{
+ return PATTERN_DST | gen_inv_idx(index, is_memset);
+}
+
static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
- unsigned int buf_size)
+ unsigned int buf_size, bool is_memset)
{
unsigned int i;
u8 *buf;
for (; (buf = *bufs); bufs++) {
for (i = 0; i < start; i++)
- buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_src_value(i, is_memset);
for ( ; i < start + len; i++)
- buf[i] = PATTERN_SRC | PATTERN_COPY
- | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
for ( ; i < buf_size; i++)
- buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_src_value(i, is_memset);
buf++;
}
}
static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
- unsigned int buf_size)
+ unsigned int buf_size, bool is_memset)
{
unsigned int i;
u8 *buf;
for (; (buf = *bufs); bufs++) {
for (i = 0; i < start; i++)
- buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_dst_value(i, is_memset);
for ( ; i < start + len; i++)
- buf[i] = PATTERN_DST | PATTERN_OVERWRITE
- | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_dst_value(i, is_memset) |
+ PATTERN_OVERWRITE;
for ( ; i < buf_size; i++)
- buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_dst_value(i, is_memset);
}
}
static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
- unsigned int counter, bool is_srcbuf)
+ unsigned int counter, bool is_srcbuf, bool is_memset)
{
u8 diff = actual ^ pattern;
- u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ u8 expected = pattern | gen_inv_idx(counter, is_memset);
const char *thread_name = current->comm;
if (is_srcbuf)
@@ -298,7 +315,7 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
unsigned int end, unsigned int counter, u8 pattern,
- bool is_srcbuf)
+ bool is_srcbuf, bool is_memset)
{
unsigned int i;
unsigned int error_count = 0;
@@ -311,11 +328,12 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
counter = counter_orig;
for (i = start; i < end; i++) {
actual = buf[i];
- expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ expected = pattern | gen_inv_idx(counter, is_memset);
if (actual != expected) {
if (error_count < MAX_ERROR_COUNT)
dmatest_mismatch(actual, pattern, i,
- counter, is_srcbuf);
+ counter, is_srcbuf,
+ is_memset);
error_count++;
}
counter++;
@@ -435,6 +453,7 @@ static int dmatest_func(void *data)
s64 runtime = 0;
unsigned long long total_len = 0;
u8 align = 0;
+ bool is_memset = false;
set_freezable();
@@ -448,6 +467,10 @@ static int dmatest_func(void *data)
if (thread->type == DMA_MEMCPY) {
align = dev->copy_align;
src_cnt = dst_cnt = 1;
+ } else if (thread->type == DMA_MEMSET) {
+ align = dev->fill_align;
+ src_cnt = dst_cnt = 1;
+ is_memset = true;
} else if (thread->type == DMA_SG) {
align = dev->copy_align;
src_cnt = dst_cnt = sg_buffers;
@@ -571,9 +594,9 @@ static int dmatest_func(void *data)
dst_off = (dst_off >> align) << align;
dmatest_init_srcs(thread->srcs, src_off, len,
- params->buf_size);
+ params->buf_size, is_memset);
dmatest_init_dsts(thread->dsts, dst_off, len,
- params->buf_size);
+ params->buf_size, is_memset);
diff = ktime_sub(ktime_get(), start);
filltime = ktime_add(filltime, diff);
@@ -640,6 +663,11 @@ static int dmatest_func(void *data)
tx = dev->device_prep_dma_memcpy(chan,
dsts[0] + dst_off,
srcs[0], len, flags);
+ else if (thread->type == DMA_MEMSET)
+ tx = dev->device_prep_dma_memset(chan,
+ dsts[0] + dst_off,
+ *(thread->srcs[0] + src_off),
+ len, flags);
else if (thread->type == DMA_SG)
tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
rx_sg, src_cnt, flags);
@@ -722,23 +750,25 @@ static int dmatest_func(void *data)
start = ktime_get();
pr_debug("%s: verifying source buffer...\n", current->comm);
error_count = dmatest_verify(thread->srcs, 0, src_off,
- 0, PATTERN_SRC, true);
+ 0, PATTERN_SRC, true, is_memset);
error_count += dmatest_verify(thread->srcs, src_off,
src_off + len, src_off,
- PATTERN_SRC | PATTERN_COPY, true);
+ PATTERN_SRC | PATTERN_COPY, true, is_memset);
error_count += dmatest_verify(thread->srcs, src_off + len,
params->buf_size, src_off + len,
- PATTERN_SRC, true);
+ PATTERN_SRC, true, is_memset);
pr_debug("%s: verifying dest buffer...\n", current->comm);
error_count += dmatest_verify(thread->dsts, 0, dst_off,
- 0, PATTERN_DST, false);
+ 0, PATTERN_DST, false, is_memset);
+
error_count += dmatest_verify(thread->dsts, dst_off,
dst_off + len, src_off,
- PATTERN_SRC | PATTERN_COPY, false);
+ PATTERN_SRC | PATTERN_COPY, false, is_memset);
+
error_count += dmatest_verify(thread->dsts, dst_off + len,
params->buf_size, dst_off + len,
- PATTERN_DST, false);
+ PATTERN_DST, false, is_memset);
diff = ktime_sub(ktime_get(), start);
comparetime = ktime_add(comparetime, diff);
@@ -821,6 +851,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
if (type == DMA_MEMCPY)
op = "copy";
+ else if (type == DMA_MEMSET)
+ op = "set";
else if (type == DMA_SG)
op = "sg";
else if (type == DMA_XOR)
@@ -883,6 +915,13 @@ static int dmatest_add_channel(struct dmatest_info *info,
}
}
+ if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
+ if (dmatest == 2) {
+ cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
+ thread_count += cnt > 0 ? cnt : 0;
+ }
+ }
+
if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
if (dmatest == 1) {
cnt = dmatest_add_threads(info, dtc, DMA_SG);
@@ -961,6 +1000,7 @@ static void run_threaded_test(struct dmatest_info *info)
params->noverify = noverify;
request_channels(info, DMA_MEMCPY);
+ request_channels(info, DMA_MEMSET);
request_channels(info, DMA_XOR);
request_channels(info, DMA_SG);
request_channels(info, DMA_PQ);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index ed8ed1192775..93e006c3441d 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -39,7 +39,7 @@ MODULE_VERSION(IOAT_DMA_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
-static struct pci_device_id ioat_pci_tbl[] = {
+static const struct pci_device_id ioat_pci_tbl[] = {
/* I/OAT v3 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index faae0bfe1109..91fd395c90c4 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -38,8 +38,8 @@ static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
if (ofdma->of_node == dma_spec->np)
return ofdma;
- pr_debug("%s: can't find DMA controller %s\n", __func__,
- dma_spec->np->full_name);
+ pr_debug("%s: can't find DMA controller %pOF\n", __func__,
+ dma_spec->np);
return NULL;
}
@@ -255,8 +255,8 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
count = of_property_count_strings(np, "dma-names");
if (count < 0) {
- pr_err("%s: dma-names property of node '%s' missing or empty\n",
- __func__, np->full_name);
+ pr_err("%s: dma-names property of node '%pOF' missing or empty\n",
+ __func__, np);
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index b1535b1fe95c..4cf0d4d0cecf 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4040,9 +4040,9 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
/* it is DMA0 or DMA1 */
idx = of_get_property(np, "cell-index", &len);
if (!idx || (len != sizeof(u32))) {
- dev_err(&ofdev->dev, "Device node %s has missing "
+ dev_err(&ofdev->dev, "Device node %pOF has missing "
"or invalid cell-index property\n",
- np->full_name);
+ np);
return -EINVAL;
}
id = *idx;
@@ -4307,7 +4307,7 @@ static int ppc440spe_adma_remove(struct platform_device *ofdev)
* "poly" allows setting/checking used polynomial (for PPC440SPe only).
*/
-static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
+static ssize_t devices_show(struct device_driver *dev, char *buf)
{
ssize_t size = 0;
int i;
@@ -4321,16 +4321,17 @@ static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
}
return size;
}
+static DRIVER_ATTR_RO(devices);
-static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf)
+static ssize_t enable_show(struct device_driver *dev, char *buf)
{
return snprintf(buf, PAGE_SIZE,
"PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
ppc440spe_r6_enabled ? "EN" : "DIS");
}
-static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
- const char *buf, size_t count)
+static ssize_t enable_store(struct device_driver *dev, const char *buf,
+ size_t count)
{
unsigned long val;
@@ -4357,8 +4358,9 @@ static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
}
return count;
}
+static DRIVER_ATTR_RW(enable);
-static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
+static ssize_t poly_store(struct device_driver *dev, char *buf)
{
ssize_t size = 0;
u32 reg;
@@ -4377,8 +4379,8 @@ static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
return size;
}
-static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
- const char *buf, size_t count)
+static ssize_t poly_store(struct device_driver *dev, const char *buf,
+ size_t count)
{
unsigned long reg, val;
@@ -4404,12 +4406,7 @@ static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
return count;
}
-
-static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL);
-static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable,
- store_ppc440spe_r6enable);
-static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly,
- store_ppc440spe_r6poly);
+static DRIVER_ATTR_RW(poly);
/*
* Common initialisation for RAID engines; allocate memory for
@@ -4448,8 +4445,7 @@ static int ppc440spe_configure_raid_devices(void)
dcr_base = dcr_resource_start(np, 0);
dcr_len = dcr_resource_len(np, 0);
if (!dcr_base && !dcr_len) {
- pr_err("%s: can't get DCR registers base/len!\n",
- np->full_name);
+ pr_err("%pOF: can't get DCR registers base/len!\n", np);
of_node_put(np);
iounmap(i2o_reg);
return -ENODEV;
@@ -4457,7 +4453,7 @@ static int ppc440spe_configure_raid_devices(void)
i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
if (!DCR_MAP_OK(i2o_dcr_host)) {
- pr_err("%s: failed to map DCRs!\n", np->full_name);
+ pr_err("%pOF: failed to map DCRs!\n", np);
of_node_put(np);
iounmap(i2o_reg);
return -ENODEV;
@@ -4518,15 +4514,14 @@ static int ppc440spe_configure_raid_devices(void)
dcr_base = dcr_resource_start(np, 0);
dcr_len = dcr_resource_len(np, 0);
if (!dcr_base && !dcr_len) {
- pr_err("%s: can't get DCR registers base/len!\n",
- np->full_name);
+ pr_err("%pOF: can't get DCR registers base/len!\n", np);
ret = -ENODEV;
goto out_mq;
}
ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
- pr_err("%s: failed to map DCRs!\n", np->full_name);
+ pr_err("%pOF: failed to map DCRs!\n", np);
ret = -ENODEV;
goto out_mq;
}
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 34fb6afd229b..e3669850aef4 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -411,7 +411,40 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
return NULL;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
- src, dest, len, flags);
+ src, dest, len, flags,
+ HIDMA_TRE_MEMCPY);
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_add_tail(&mdesc->node, &mchan->prepared);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ return &mdesc->desc;
+}
+
+static struct dma_async_tx_descriptor *
+hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
+ size_t len, unsigned long flags)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_desc *mdesc = NULL;
+ struct hidma_dev *mdma = mchan->dmadev;
+ unsigned long irqflags;
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ if (!list_empty(&mchan->free)) {
+ mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
+ list_del(&mdesc->node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ if (!mdesc)
+ return NULL;
+
+ hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
+ value, dest, len, flags,
+ HIDMA_TRE_MEMSET);
/* Place descriptor in prepared list */
spin_lock_irqsave(&mchan->lock, irqflags);
@@ -776,6 +809,7 @@ static int hidma_probe(struct platform_device *pdev)
pm_runtime_get_sync(dmadev->ddev.dev);
dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
+ dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
if (WARN_ON(!pdev->dev.dma_mask)) {
rc = -ENXIO;
goto dmafree;
@@ -786,6 +820,7 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->dev_trca = trca;
dmadev->trca_resource = trca_resource;
dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
+ dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
dmadev->ddev.device_tx_status = hidma_tx_status;
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index 41e0aa283828..5f9966e82c0b 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -28,6 +28,11 @@
#define HIDMA_TRE_DEST_LOW_IDX 4
#define HIDMA_TRE_DEST_HI_IDX 5
+enum tre_type {
+ HIDMA_TRE_MEMCPY = 3,
+ HIDMA_TRE_MEMSET = 4,
+};
+
struct hidma_tre {
atomic_t allocated; /* if this channel is allocated */
bool queued; /* flag whether this is pending */
@@ -150,7 +155,7 @@ void hidma_ll_start(struct hidma_lldev *llhndl);
int hidma_ll_disable(struct hidma_lldev *lldev);
int hidma_ll_enable(struct hidma_lldev *llhndl);
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
- dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
+ dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype);
void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
int hidma_ll_setup(struct hidma_lldev *lldev);
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 1530a661518d..4999e266b2de 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -105,10 +105,6 @@ enum ch_state {
HIDMA_CH_STOPPED = 4,
};
-enum tre_type {
- HIDMA_TRE_MEMCPY = 3,
-};
-
enum err_code {
HIDMA_EVRE_STATUS_COMPLETE = 1,
HIDMA_EVRE_STATUS_ERROR = 4,
@@ -174,8 +170,7 @@ int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
tre->err_info = 0;
tre->lldev = lldev;
tre_local = &tre->tre_local[0];
- tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY;
- tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8;
+ tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */
*tre_ch = i;
if (callback)
@@ -607,7 +602,7 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
dma_addr_t src, dma_addr_t dest, u32 len,
- u32 flags)
+ u32 flags, u32 txntype)
{
struct hidma_tre *tre;
u32 *tre_local;
@@ -626,6 +621,8 @@ void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
}
tre_local = &tre->tre_local[0];
+ tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0);
+ tre_local[HIDMA_TRE_CFG_IDX] |= txntype;
tre_local[HIDMA_TRE_LEN_IDX] = len;
tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 5a0991bc4787..c45e244b2d99 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -28,7 +28,7 @@
#include "hidma_mgmt.h"
-#define HIDMA_QOS_N_OFFSET 0x300
+#define HIDMA_QOS_N_OFFSET 0x700
#define HIDMA_CFG_OFFSET 0x400
#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
#define HIDMA_MAX_XACTIONS_OFFSET 0x420
@@ -227,7 +227,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
goto out;
}
- if (max_write_request) {
+ if (max_write_request &&
+ (max_write_request != mgmtdev->max_write_request)) {
dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
max_write_request);
mgmtdev->max_write_request = max_write_request;
@@ -240,7 +241,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
goto out;
}
- if (max_read_request) {
+ if (max_read_request &&
+ (max_read_request != mgmtdev->max_read_request)) {
dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
max_read_request);
mgmtdev->max_read_request = max_read_request;
@@ -253,7 +255,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-write-transactions missing\n");
goto out;
}
- if (max_wr_xactions) {
+ if (max_wr_xactions &&
+ (max_wr_xactions != mgmtdev->max_wr_xactions)) {
dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
max_wr_xactions);
mgmtdev->max_wr_xactions = max_wr_xactions;
@@ -266,7 +269,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-read-transactions missing\n");
goto out;
}
- if (max_rd_xactions) {
+ if (max_rd_xactions &&
+ (max_rd_xactions != mgmtdev->max_rd_xactions)) {
dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
max_rd_xactions);
mgmtdev->max_rd_xactions = max_rd_xactions;