aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma/fsldma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/fsldma.c')
-rw-r--r--drivers/dma/fsldma.c108
1 files changed, 46 insertions, 62 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 526579df603..d300de456c9 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -882,65 +882,15 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
}
/**
- * fsl_chan_ld_cleanup - Clean up link descriptors
- * @chan : Freescale DMA channel
- *
- * This function is run after the queue of running descriptors has been
- * executed by the DMA engine. It will run any callbacks, and then free
- * the descriptors.
- *
- * HARDWARE STATE: idle
- */
-static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
-{
- struct fsl_desc_sw *desc, *_desc;
- LIST_HEAD(ld_cleanup);
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
-
- /* update the cookie if we have some descriptors to cleanup */
- if (!list_empty(&chan->ld_running)) {
- dma_cookie_t cookie;
-
- desc = to_fsl_desc(chan->ld_running.prev);
- cookie = desc->async_tx.cookie;
-
- chan->completed_cookie = cookie;
- chan_dbg(chan, "completed cookie=%d\n", cookie);
- }
-
- /*
- * move the descriptors to a temporary list so we can drop the lock
- * during the entire cleanup operation
- */
- list_splice_tail_init(&chan->ld_running, &ld_cleanup);
-
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- /* Run the callback for each descriptor, in order */
- list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
-
- /* Remove from the list of transactions */
- list_del(&desc->node);
-
- /* Run all cleanup for this descriptor */
- fsldma_cleanup_descriptor(chan, desc);
- }
-}
-
-/**
* fsl_chan_xfer_ld_queue - transfer any pending transactions
* @chan : Freescale DMA channel
*
* HARDWARE STATE: idle
+ * LOCKING: must hold chan->desc_lock
*/
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
struct fsl_desc_sw *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
/*
* If the list of pending descriptors is empty, then we
@@ -948,7 +898,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
*/
if (list_empty(&chan->ld_pending)) {
chan_dbg(chan, "no pending LDs\n");
- goto out_unlock;
+ return;
}
/*
@@ -958,7 +908,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
*/
if (!chan->idle) {
chan_dbg(chan, "DMA controller still busy\n");
- goto out_unlock;
+ return;
}
/*
@@ -996,9 +946,6 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
dma_start(chan);
chan->idle = false;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
@@ -1008,7 +955,11 @@ out_unlock:
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
fsl_chan_xfer_ld_queue(chan);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
@@ -1109,20 +1060,53 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
static void dma_do_tasklet(unsigned long data)
{
struct fsldma_chan *chan = (struct fsldma_chan *)data;
+ struct fsl_desc_sw *desc, *_desc;
+ LIST_HEAD(ld_cleanup);
unsigned long flags;
chan_dbg(chan, "tasklet entry\n");
- /* run all callbacks, free all used descriptors */
- fsl_chan_ld_cleanup(chan);
-
- /* the channel is now idle */
spin_lock_irqsave(&chan->desc_lock, flags);
+
+ /* update the cookie if we have some descriptors to cleanup */
+ if (!list_empty(&chan->ld_running)) {
+ dma_cookie_t cookie;
+
+ desc = to_fsl_desc(chan->ld_running.prev);
+ cookie = desc->async_tx.cookie;
+
+ chan->completed_cookie = cookie;
+ chan_dbg(chan, "completed_cookie=%d\n", cookie);
+ }
+
+ /*
+ * move the descriptors to a temporary list so we can drop the lock
+ * during the entire cleanup operation
+ */
+ list_splice_tail_init(&chan->ld_running, &ld_cleanup);
+
+ /* the hardware is now idle and ready for more */
chan->idle = true;
- spin_unlock_irqrestore(&chan->desc_lock, flags);
- /* start any pending transactions automatically */
+ /*
+ * Start any pending transactions automatically
+ *
+ * In the ideal case, we keep the DMA controller busy while we go
+ * ahead and free the descriptors below.
+ */
fsl_chan_xfer_ld_queue(chan);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
+
+ /* Remove from the list of transactions */
+ list_del(&desc->node);
+
+ /* Run all cleanup for this descriptor */
+ fsldma_cleanup_descriptor(chan, desc);
+ }
+
chan_dbg(chan, "tasklet exit\n");
}