diff options
author | Linus Walleij | 2011-04-19 08:31:32 +0800 |
---|---|---|
committer | Vinod Koul | 2011-05-13 19:40:15 +0530 |
commit | a7c57cf7d4327c41510f8cbf45b1b970e02c34f8 (patch) | |
tree | 2f1b7ae4940d9540d966f8a287a7337d14cb7c76 /drivers/dma | |
parent | 69cea5a00d3135677939fce1fefe54ed522055a0 (diff) |
dmaengine/dw_dmac: implement pause and resume in dwc_control
Some peripherals like amba-pl011 needs pause to be implemented in DMA controller
drivers. This also returns correct status from dwc_tx_status() in case chan is
paused.
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Viresh Kumar <viresh.kumar@st.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/dw_dmac.c | 59 | ||||
-rw-r--r-- | drivers/dma/dw_dmac_regs.h | 1 |
2 files changed, 40 insertions, 20 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 442b98b81e7c..eec675bf4f95 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -862,34 +862,50 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc, *_desc; unsigned long flags; + u32 cfglo; LIST_HEAD(list); - /* Only supports DMA_TERMINATE_ALL */ - if (cmd != DMA_TERMINATE_ALL) - return -ENXIO; + if (cmd == DMA_PAUSE) { + spin_lock_irqsave(&dwc->lock, flags); - /* - * This is only called when something went wrong elsewhere, so - * we don't really care about the data. Just disable the - * channel. We still have to poll the channel enable bit due - * to AHB/HSB limitations. - */ - spin_lock_irqsave(&dwc->lock, flags); + cfglo = channel_readl(dwc, CFG_LO); + channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); + while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) + cpu_relax(); - channel_clear_bit(dw, CH_EN, dwc->mask); + dwc->paused = true; + spin_unlock_irqrestore(&dwc->lock, flags); + } else if (cmd == DMA_RESUME) { + if (!dwc->paused) + return 0; - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); + spin_lock_irqsave(&dwc->lock, flags); - /* active_list entries will end up before queued entries */ - list_splice_init(&dwc->queue, &list); - list_splice_init(&dwc->active_list, &list); + cfglo = channel_readl(dwc, CFG_LO); + channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); + dwc->paused = false; - spin_unlock_irqrestore(&dwc->lock, flags); + spin_unlock_irqrestore(&dwc->lock, flags); + } else if (cmd == DMA_TERMINATE_ALL) { + spin_lock_irqsave(&dwc->lock, flags); - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - dwc_descriptor_complete(dwc, desc, false); + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); + + dwc->paused = false; + + /* active_list entries will end up before queued entries */ + list_splice_init(&dwc->queue, &list); + list_splice_init(&dwc->active_list, &list); + + spin_unlock_irqrestore(&dwc->lock, flags); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + dwc_descriptor_complete(dwc, desc, false); + } else + return -ENXIO; return 0; } @@ -923,6 +939,9 @@ dwc_tx_status(struct dma_chan *chan, else dma_set_tx_state(txstate, last_complete, last_used, 0); + if (dwc->paused) + return DMA_PAUSED; + return ret; } diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 720f821527f8..c968597c32ab 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -138,6 +138,7 @@ struct dw_dma_chan { void __iomem *ch_regs; u8 mask; u8 priority; + bool paused; spinlock_t lock; |