aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJon Medhurst <tixy@linaro.org>2015-08-04 16:13:56 +0100
committerJon Medhurst <tixy@linaro.org>2015-08-04 16:13:56 +0100
commitb76dbc19ecdad60742bd51fba34092a0c2d81dc6 (patch)
tree853765b1f3e315e4057ed9efd3726efb93c76d70
parent611ee64d3560a082b41f74fc3921cbe42be951ba (diff)
parent7c4deebb459fa66380f061727c4a8b8c5e3d6990 (diff)
Merge branch 'lsk-3.10-armlt-dma' into integration-lsk-3.10-juno-android
Conflicts: linaro/configs/vexpress64.conf
-rw-r--r--drivers/dma/dmaengine.c26
-rw-r--r--drivers/dma/pl330.c312
-rw-r--r--include/linux/dmaengine.h73
-rw-r--r--linaro/configs/vexpress64.conf2
4 files changed, 251 insertions, 162 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 93f7992bee5c..78dbbe09d7b1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -504,6 +504,32 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
}
/**
+ * dma_request_channel - try to get specific channel exclusively
+ * @chan: target channel
+ */
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
+{
+ int err = -EBUSY;
+
+ /* lock against __dma_request_channel */
+ mutex_lock(&dma_list_mutex);
+
+ if (chan->client_count == 0)
+ err = dma_chan_get(chan);
+ else
+ chan = NULL;
+
+ mutex_unlock(&dma_list_mutex);
+
+ if (err)
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+
+/**
* dma_request_channel - try to allocate an exclusive channel
* @mask: capabilities that the channel must satisfy
* @fn: optional callback to disposition available channels
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 4c2f465be339..3c5efbeb38d9 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -157,7 +157,6 @@ enum pl330_reqtype {
#define PERIPH_REV_R0P0 0
#define PERIPH_REV_R1P0 1
#define PERIPH_REV_R1P1 2
-#define PCELL_ID 0xff0
#define CR0_PERIPH_REQ_SET (1 << 0)
#define CR0_BOOT_EN_SET (1 << 1)
@@ -193,8 +192,6 @@ enum pl330_reqtype {
#define INTEG_CFG 0x0
#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
-#define PCELL_ID_VAL 0xb105f00d
-
#define PL330_STATE_STOPPED (1 << 0)
#define PL330_STATE_EXECUTING (1 << 1)
#define PL330_STATE_WFE (1 << 2)
@@ -292,11 +289,10 @@ static unsigned cmd_line;
/* Populated by the PL330 core driver for DMA API driver's info */
struct pl330_config {
u32 periph_id;
- u32 pcell_id;
#define DMAC_MODE_NS (1 << 0)
unsigned int mode;
unsigned int data_bus_width:10; /* In number of bits */
- unsigned int data_buf_dep:10;
+ unsigned int data_buf_dep:11;
unsigned int num_chan:4;
unsigned int num_peri:6;
u32 peri_ns;
@@ -505,7 +501,7 @@ struct pl330_dmac {
/* Maximum possible events/irqs */
int events[32];
/* BUS address of MicroCode buffer */
- u32 mcode_bus;
+ dma_addr_t mcode_bus;
/* CPU address of MicroCode buffer */
void *mcode_cpu;
/* List of all Channel threads */
@@ -547,8 +543,12 @@ struct dma_pl330_chan {
/* DMA-Engine Channel */
struct dma_chan chan;
- /* List of to be xfered descriptors */
+ /* List of submitted descriptors */
+ struct list_head submitted_list;
+ /* List of issued descriptors */
struct list_head work_list;
+ /* List of completed descriptors */
+ struct list_head completed_list;
/* Pointer to the DMAC that manages this channel,
* NULL if the channel is available to be acquired.
@@ -580,12 +580,16 @@ struct dma_pl330_dmac {
/* DMA-Engine Device */
struct dma_device ddma;
+ /* Holds info about sg limitations */
+ struct device_dma_parameters dma_parms;
+
/* Pool of descriptors available for the DMAC's channels */
struct list_head desc_pool;
/* To protect desc_pool manipulation */
spinlock_t pool_lock;
/* Peripheral channels connected to this DMAC */
+ unsigned int num_peripherals;
struct dma_pl330_chan *peripherals; /* keep at end */
};
@@ -608,11 +612,6 @@ struct dma_pl330_desc {
struct dma_pl330_chan *pchan;
};
-struct dma_pl330_filter_args {
- struct dma_pl330_dmac *pdmac;
- unsigned int chan_id;
-};
-
static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
{
if (r && r->xfer_cb)
@@ -650,19 +649,6 @@ static inline bool _manager_ns(struct pl330_thread *thrd)
return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
}
-static inline u32 get_id(struct pl330_info *pi, u32 off)
-{
- void __iomem *regs = pi->base;
- u32 id = 0;
-
- id |= (readb(regs + off + 0x0) << 0);
- id |= (readb(regs + off + 0x4) << 8);
- id |= (readb(regs + off + 0x8) << 16);
- id |= (readb(regs + off + 0xc) << 24);
-
- return id;
-}
-
static inline u32 get_revision(u32 periph_id)
{
return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
@@ -1986,9 +1972,6 @@ static void read_dmac_config(struct pl330_info *pi)
pi->pcfg.num_events = val;
pi->pcfg.irq_ns = readl(regs + CR3);
-
- pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
- pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
}
static inline void _reset_thread(struct pl330_thread *thrd)
@@ -2098,10 +2081,8 @@ static int pl330_add(struct pl330_info *pi)
regs = pi->base;
/* Check if we can handle this DMAC */
- if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
- || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
- dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
- get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
+ if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
+ dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id);
return -EINVAL;
}
@@ -2220,66 +2201,6 @@ to_desc(struct dma_async_tx_descriptor *tx)
return container_of(tx, struct dma_pl330_desc, txd);
}
-static inline void free_desc_list(struct list_head *list)
-{
- struct dma_pl330_dmac *pdmac;
- struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch = NULL;
- unsigned long flags;
-
- /* Finish off the work list */
- list_for_each_entry(desc, list, node) {
- dma_async_tx_callback callback;
- void *param;
-
- /* All desc in a list belong to same channel */
- pch = desc->pchan;
- callback = desc->txd.callback;
- param = desc->txd.callback_param;
-
- if (callback)
- callback(param);
-
- desc->pchan = NULL;
- }
-
- /* pch will be unset if list was empty */
- if (!pch)
- return;
-
- pdmac = pch->dmac;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
- list_splice_tail_init(list, &pdmac->desc_pool);
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
-}
-
-static inline void handle_cyclic_desc_list(struct list_head *list)
-{
- struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch = NULL;
- unsigned long flags;
-
- list_for_each_entry(desc, list, node) {
- dma_async_tx_callback callback;
-
- /* Change status to reload it */
- desc->status = PREP;
- pch = desc->pchan;
- callback = desc->txd.callback;
- if (callback)
- callback(desc->txd.callback_param);
- }
-
- /* pch will be unset if list was empty */
- if (!pch)
- return;
-
- spin_lock_irqsave(&pch->lock, flags);
- list_splice_tail_init(list, &pch->work_list);
- spin_unlock_irqrestore(&pch->lock, flags);
-}
-
static inline void fill_queue(struct dma_pl330_chan *pch)
{
struct dma_pl330_desc *desc;
@@ -2313,7 +2234,6 @@ static void pl330_tasklet(unsigned long data)
struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
- LIST_HEAD(list);
spin_lock_irqsave(&pch->lock, flags);
@@ -2322,7 +2242,7 @@ static void pl330_tasklet(unsigned long data)
if (desc->status == DONE) {
if (!pch->cyclic)
dma_cookie_complete(&desc->txd);
- list_move_tail(&desc->node, &list);
+ list_move_tail(&desc->node, &pch->completed_list);
}
/* Try to submit a req imm. next to the last completed cookie */
@@ -2331,12 +2251,31 @@ static void pl330_tasklet(unsigned long data)
/* Make sure the PL330 Channel thread is active */
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
- spin_unlock_irqrestore(&pch->lock, flags);
+ while (!list_empty(&pch->completed_list)) {
+ dma_async_tx_callback callback;
+ void *callback_param;
- if (pch->cyclic)
- handle_cyclic_desc_list(&list);
- else
- free_desc_list(&list);
+ desc = list_first_entry(&pch->completed_list,
+ struct dma_pl330_desc, node);
+
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
+
+ if (pch->cyclic) {
+ desc->status = PREP;
+ list_move_tail(&desc->node, &pch->work_list);
+ } else {
+ desc->status = FREE;
+ list_move_tail(&desc->node, &pch->dmac->desc_pool);
+ }
+
+ if (callback) {
+ spin_unlock_irqrestore(&pch->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&pch->lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&pch->lock, flags);
}
static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -2358,16 +2297,6 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
-static bool pl330_dt_filter(struct dma_chan *chan, void *param)
-{
- struct dma_pl330_filter_args *fargs = param;
-
- if (chan->device != &fargs->pdmac->ddma)
- return false;
-
- return (chan->chan_id == fargs->chan_id);
-}
-
bool pl330_filter(struct dma_chan *chan, void *param)
{
u8 *peri_id;
@@ -2376,7 +2305,7 @@ bool pl330_filter(struct dma_chan *chan, void *param)
return false;
peri_id = chan->private;
- return *peri_id == (unsigned)param;
+ return *peri_id == (unsigned long)param;
}
EXPORT_SYMBOL(pl330_filter);
@@ -2385,23 +2314,16 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
{
int count = dma_spec->args_count;
struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
- struct dma_pl330_filter_args fargs;
- dma_cap_mask_t cap;
-
- if (!pdmac)
- return NULL;
+ unsigned int chan_id;
if (count != 1)
return NULL;
- fargs.pdmac = pdmac;
- fargs.chan_id = dma_spec->args[0];
-
- dma_cap_zero(cap);
- dma_cap_set(DMA_SLAVE, cap);
- dma_cap_set(DMA_CYCLIC, cap);
+ chan_id = dma_spec->args[0];
+ if (chan_id >= pdmac->num_peripherals)
+ return NULL;
- return dma_request_channel(cap, pl330_dt_filter, &fargs);
+ return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan);
}
static int pl330_alloc_chan_resources(struct dma_chan *chan)
@@ -2431,7 +2353,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
{
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_desc *desc, *_dt;
+ struct dma_pl330_desc *desc;
unsigned long flags;
struct dma_pl330_dmac *pdmac = pch->dmac;
struct dma_slave_config *slave_config;
@@ -2445,12 +2367,24 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
/* Mark all desc done */
- list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
- desc->status = DONE;
- list_move_tail(&desc->node, &list);
+ list_for_each_entry(desc, &pch->submitted_list, node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
+ }
+
+ list_for_each_entry(desc, &pch->work_list , node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
+ }
+
+ list_for_each_entry(desc, &pch->completed_list , node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
}
- list_splice_tail_init(&list, &pdmac->desc_pool);
+ list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
+ list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
+ list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
break;
case DMA_SLAVE_CONFIG:
@@ -2507,7 +2441,14 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
static void pl330_issue_pending(struct dma_chan *chan)
{
- pl330_tasklet((unsigned long) to_pchan(chan));
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+ list_splice_tail_init(&pch->submitted_list, &pch->work_list);
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ pl330_tasklet((unsigned long)pch);
}
/*
@@ -2534,11 +2475,11 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
dma_cookie_assign(&desc->txd);
- list_move_tail(&desc->node, &pch->work_list);
+ list_move_tail(&desc->node, &pch->submitted_list);
}
cookie = dma_cookie_assign(&last->txd);
- list_add_tail(&last->node, &pch->work_list);
+ list_add_tail(&last->node, &pch->submitted_list);
spin_unlock_irqrestore(&pch->lock, flags);
return cookie;
@@ -2546,12 +2487,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
static inline void _init_desc(struct dma_pl330_desc *desc)
{
- desc->pchan = NULL;
desc->req.x = &desc->px;
desc->req.token = desc;
desc->rqcfg.swap = SWAP_NO;
- desc->rqcfg.privileged = 0;
- desc->rqcfg.insnaccess = 0;
desc->rqcfg.scctl = SCCTRL0;
desc->rqcfg.dcctl = DCCTRL0;
desc->req.cfg = &desc->rqcfg;
@@ -2571,7 +2509,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
if (!pdmac)
return 0;
- desc = kmalloc(count * sizeof(*desc), flg);
+ desc = kcalloc(count, sizeof(*desc), flg);
if (!desc)
return 0;
@@ -2836,6 +2774,28 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
return &desc->txd;
}
+static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
+ struct dma_pl330_desc *first)
+{
+ unsigned long flags;
+ struct dma_pl330_desc *desc;
+
+ if (!first)
+ return;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ while (!list_empty(&first->node)) {
+ desc = list_entry(first->node.next,
+ struct dma_pl330_desc, node);
+ list_move_tail(&desc->node, &pdmac->desc_pool);
+ }
+
+ list_move_tail(&first->node, &pdmac->desc_pool);
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+}
+
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -2844,7 +2804,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_pl330_desc *first, *desc = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
struct scatterlist *sg;
- unsigned long flags;
int i;
dma_addr_t addr;
@@ -2864,20 +2823,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_err(pch->dmac->pif.dev,
"%s:%d Unable to fetch desc\n",
__func__, __LINE__);
- if (!first)
- return NULL;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
-
- while (!list_empty(&first->node)) {
- desc = list_entry(first->node.next,
- struct dma_pl330_desc, node);
- list_move_tail(&desc->node, &pdmac->desc_pool);
- }
-
- list_move_tail(&first->node, &pdmac->desc_pool);
-
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ __pl330_giveback_desc(pdmac, first);
return NULL;
}
@@ -2918,6 +2864,26 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
return IRQ_NONE;
}
+#define PL330_DMA_BUSWIDTHS \
+ BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = false;
+ caps->cmd_terminate = true;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ return 0;
+}
+
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
@@ -2932,6 +2898,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pdat = adev->dev.platform_data;
+ ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
/* Allocate a new DMAC and its Channels */
pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
@@ -2951,15 +2921,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pdmac);
- irq = adev->irq[0];
- ret = request_irq(irq, pl330_irq_handler, 0,
- dev_name(&adev->dev), pi);
- if (ret)
- return ret;
+ for (i = 0; i < AMBA_NR_IRQS; i++) {
+ irq = adev->irq[i];
+ if (irq) {
+ ret = devm_request_irq(&adev->dev, irq,
+ pl330_irq_handler, 0,
+ dev_name(&adev->dev), pi);
+ if (ret)
+ return ret;
+ } else {
+ break;
+ }
+ }
+ pi->pcfg.periph_id = adev->periphid;
ret = pl330_add(pi);
if (ret)
- goto probe_err1;
+ return ret;
INIT_LIST_HEAD(&pdmac->desc_pool);
spin_lock_init(&pdmac->pool_lock);
@@ -2977,6 +2955,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
else
num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
+ pdmac->num_peripherals = num_chan;
+
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
if (!pdmac->peripherals) {
ret = -ENOMEM;
@@ -2991,7 +2971,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
else
pch->chan.private = adev->dev.of_node;
+ INIT_LIST_HEAD(&pch->submitted_list);
INIT_LIST_HEAD(&pch->work_list);
+ INIT_LIST_HEAD(&pch->completed_list);
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
pch->chan.device = pd;
@@ -3021,6 +3003,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->device_prep_slave_sg = pl330_prep_slave_sg;
pd->device_control = pl330_control;
pd->device_issue_pending = pl330_issue_pending;
+ pd->device_slave_caps = pl330_dma_device_slave_caps;
ret = dma_async_device_register(pd);
if (ret) {
@@ -3037,8 +3020,19 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
}
}
+ adev->dev.dma_parms = &pdmac->dma_parms;
+
+ /*
+ * This is the limit for transfers with a buswidth of 1, larger
+ * buswidths will have larger limits.
+ */
+ ret = dma_set_max_seg_size(&adev->dev, 1900800);
+ if (ret)
+ dev_err(&adev->dev, "unable to set the seg size\n");
+
+
dev_info(&adev->dev,
- "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
+ "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
dev_info(&adev->dev,
"\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
pi->pcfg.data_buf_dep,
@@ -3062,8 +3056,6 @@ probe_err3:
}
probe_err2:
pl330_del(pi);
-probe_err1:
- free_irq(irq, pi);
return ret;
}
@@ -3073,7 +3065,6 @@ static int pl330_remove(struct amba_device *adev)
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
struct pl330_info *pi;
- int irq;
if (!pdmac)
return 0;
@@ -3100,9 +3091,6 @@ static int pl330_remove(struct amba_device *adev)
pl330_del(pi);
- irq = adev->irq[0];
- free_irq(irq, pi);
-
return 0;
}
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 96d3e4ab11a9..162242bd6f33 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -371,6 +371,61 @@ struct dma_slave_config {
unsigned int slave_id;
};
+/**
+ * enum dma_residue_granularity - Granularity of the reported transfer residue
+ * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The
+ * DMA channel is only able to tell whether a descriptor has been completed or
+ * not, which means residue reporting is not supported by this channel. The
+ * residue field of the dma_tx_state field will always be 0.
+ * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully
+ * completed segment of the transfer (For cyclic transfers this is after each
+ * period). This is typically implemented by having the hardware generate an
+ * interrupt after each transferred segment and then the drivers updates the
+ * outstanding residue by the size of the segment. Another possibility is if
+ * the hardware supports scatter-gather and the segment descriptor has a field
+ * which gets set after the segment has been completed. The driver then counts
+ * the number of segments without the flag set to compute the residue.
+ * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred
+ * burst. This is typically only supported if the hardware has a progress
+ * register of some sort (E.g. a register with the current read/write address
+ * or a register with the amount of bursts/beats/bytes that have been
+ * transferred or still need to be transferred).
+ */
+enum dma_residue_granularity {
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
+ DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
+ DMA_RESIDUE_GRANULARITY_BURST = 2,
+};
+
+/* struct dma_slave_caps - expose capabilities of a slave channel only
+ *
+ * @src_addr_widths: bit mask of src addr widths the channel supports
+ * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
+ * @directions: bit mask of slave direction the channel supported
+ * since the enum dma_transfer_direction is not defined as bits for each
+ * type of direction, the dma controller should fill (1 << <TYPE>) and same
+ * should be checked by controller as well
+ * @cmd_pause: true, if pause and thereby resume is supported
+ * @cmd_terminate: true, if terminate cmd is supported
+ *
+ * @max_sg_nr: maximum number of SG segments supported
+ * 0 for no maximum
+ * @max_sg_len: maximum length of a SG segment supported
+ * 0 for no maximum
+ * @residue_granularity: granularity of the reported transfer residue
+ */
+struct dma_slave_caps {
+ u32 src_addr_widths;
+ u32 dstn_addr_widths;
+ u32 directions;
+ bool cmd_pause;
+ bool cmd_terminate;
+
+ u32 max_sg_nr;
+ u32 max_sg_len;
+ enum dma_residue_granularity residue_granularity;
+};
+
static inline const char *dma_chan_name(struct dma_chan *chan)
{
return dev_name(&chan->dev->device);
@@ -534,6 +589,7 @@ struct dma_tx_state {
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
+ * @device_slave_caps: return the slave channel capabilities
*/
struct dma_device {
@@ -602,6 +658,7 @@ struct dma_device {
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
+ int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
};
static inline int dmaengine_device_control(struct dma_chan *chan,
@@ -675,6 +732,21 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
+{
+ if (!chan || !caps)
+ return -EINVAL;
+
+ /* check if the channel supports slave transactions */
+ if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
+ return -ENXIO;
+
+ if (chan->device->device_slave_caps)
+ return chan->device->device_slave_caps(chan, caps);
+
+ return -ENXIO;
+}
+
static inline int dmaengine_terminate_all(struct dma_chan *chan)
{
return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
@@ -1000,6 +1072,7 @@ int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
diff --git a/linaro/configs/vexpress64.conf b/linaro/configs/vexpress64.conf
index e0fb35a95867..d110bcc2b426 100644
--- a/linaro/configs/vexpress64.conf
+++ b/linaro/configs/vexpress64.conf
@@ -82,3 +82,5 @@ CONFIG_MALI_PLATFORM_THIRDPARTY=y
CONFIG_MALI_EXPERT=y
CONFIG_MALI_PLATFORM_THIRDPARTY_NAME="juno_soc"
CONFIG_MALI_PLATFORM_FAKE=y
+CONFIG_DMADEVICES=y
+CONFIG_PL330_DMA=y