diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 1 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 52 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 15 | ||||
-rw-r--r-- | drivers/dma/at_hdmac_regs.h | 21 | ||||
-rw-r--r-- | drivers/dma/coh901318.c | 2 | ||||
-rw-r--r-- | drivers/dma/coh901318_lli.c | 4 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 28 | ||||
-rw-r--r-- | drivers/dma/ep93xx_dma.c | 117 | ||||
-rw-r--r-- | drivers/dma/imx-dma.c | 12 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 114 | ||||
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 8 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_idmac.c | 6 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 15 | ||||
-rw-r--r-- | drivers/dma/mv_xor.h | 1 | ||||
-rw-r--r-- | drivers/dma/mxs-dma.c | 194 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 31 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 2 |
18 files changed, 428 insertions, 197 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index ef378b5b17e4..aadeb5be9dba 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -238,6 +238,7 @@ config IMX_DMA config MXS_DMA bool "MXS DMA support" depends on SOC_IMX23 || SOC_IMX28 + select STMP_DEVICE select DMA_ENGINE help Support the MXS DMA engine. This engine including APBH-DMA diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 3d704abd7912..49ecbbb8932d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -95,10 +95,14 @@ static struct amba_driver pl08x_amba_driver; * struct vendor_data - vendor-specific config parameters for PL08x derivatives * @channels: the number of channels available in this variant * @dualmaster: whether this version supports dual AHB masters or not. + * @nomadik: whether the channels have Nomadik security extension bits + * that need to be checked for permission before use and some registers are + * missing */ struct vendor_data { u8 channels; bool dualmaster; + bool nomadik; }; /* @@ -385,7 +389,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, spin_lock_irqsave(&ch->lock, flags); - if (!ch->serving) { + if (!ch->locked && !ch->serving) { ch->serving = virt_chan; ch->signal = -1; spin_unlock_irqrestore(&ch->lock, flags); @@ -1324,7 +1328,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( int ret, tmp; dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", - __func__, sgl->length, plchan->name); + __func__, sg_dma_len(sgl), plchan->name); txd = pl08x_get_txd(plchan, flags); if (!txd) { @@ -1378,11 +1382,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( dsg->len = sg_dma_len(sg); if (direction == DMA_MEM_TO_DEV) { - dsg->src_addr = sg_phys(sg); + dsg->src_addr = sg_dma_address(sg); dsg->dst_addr = slave_addr; } else { dsg->src_addr = slave_addr; - dsg->dst_addr = sg_phys(sg); + dsg->dst_addr = sg_dma_address(sg); } } @@ -1484,6 +1488,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) */ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) { + /* The Nomadik variant does not have the config register */ + if (pl08x->vd->nomadik) + return; writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } @@ -1616,7 +1623,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) __func__, err); writel(err, pl08x->base + PL080_ERR_CLEAR); } - tc = readl(pl08x->base + PL080_INT_STATUS); + tc = readl(pl08x->base + PL080_TC_STATUS); if (tc) writel(tc, pl08x->base + PL080_TC_CLEAR); @@ -1773,8 +1780,10 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) spin_lock_irqsave(&ch->lock, flags); virt_chan = ch->serving; - seq_printf(s, "%d\t\t%s\n", - ch->id, virt_chan ? virt_chan->name : "(none)"); + seq_printf(s, "%d\t\t%s%s\n", + ch->id, + virt_chan ? virt_chan->name : "(none)", + ch->locked ? " LOCKED" : ""); spin_unlock_irqrestore(&ch->lock, flags); } @@ -1918,7 +1927,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) } /* Initialize physical channels */ - pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)), + pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), GFP_KERNEL); if (!pl08x->phy_chans) { dev_err(&adev->dev, "%s failed to allocate " @@ -1933,8 +1942,23 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ch->id = i; ch->base = pl08x->base + PL080_Cx_BASE(i); spin_lock_init(&ch->lock); - ch->serving = NULL; ch->signal = -1; + + /* + * Nomadik variants can have channels that are locked + * down for the secure world only. Lock up these channels + * by perpetually serving a dummy virtual channel. + */ + if (vd->nomadik) { + u32 val; + + val = readl(ch->base + PL080_CH_CONFIG); + if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { + dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); + ch->locked = true; + } + } + dev_dbg(&adev->dev, "physical channel %d is %s\n", i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); } @@ -2017,6 +2041,12 @@ static struct vendor_data vendor_pl080 = { .dualmaster = true, }; +static struct vendor_data vendor_nomadik = { + .channels = 8, + .dualmaster = true, + .nomadik = true, +}; + static struct vendor_data vendor_pl081 = { .channels = 2, .dualmaster = false, @@ -2037,9 +2067,9 @@ static struct amba_id pl08x_ids[] = { }, /* Nomadik 8815 PL080 variant */ { - .id = 0x00280880, + .id = 0x00280080, .mask = 0x00ffffff, - .data = &vendor_pl080, + .data = &vendor_nomadik, }, { 0, 0 }, }; diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index bf0d7e4e345b..7292aa87b2dd 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -39,7 +39,6 @@ */ #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) -#define ATC_DEFAULT_CTRLA (0) #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ |ATC_DIF(AT_DMA_MEM_IF)) @@ -574,7 +573,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, return NULL; } - ctrla = ATC_DEFAULT_CTRLA; ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | ATC_SRC_ADDR_MODE_INCR | ATC_DST_ADDR_MODE_INCR @@ -585,13 +583,13 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, * of the most common optimization. */ if (!((src | dest | len) & 3)) { - ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; + ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; src_width = dst_width = 2; } else if (!((src | dest | len) & 1)) { - ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; + ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; src_width = dst_width = 1; } else { - ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; + ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; src_width = dst_width = 0; } @@ -668,7 +666,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, return NULL; } - ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; + ctrla = ATC_SCSIZE(sconfig->src_maxburst) + | ATC_DCSIZE(sconfig->dst_maxburst); ctrlb = ATC_IEN; switch (direction) { @@ -796,12 +795,12 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, enum dma_transfer_direction direction) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_dma_slave *atslave = chan->private; struct dma_slave_config *sconfig = &atchan->dma_sconfig; u32 ctrla; /* prepare common CRTLA value */ - ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla + ctrla = ATC_SCSIZE(sconfig->src_maxburst) + | ATC_DCSIZE(sconfig->dst_maxburst) | ATC_DST_WIDTH(reg_width) | ATC_SRC_WIDTH(reg_width) | period_len >> reg_width; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 897a8bcaec90..8a6c8e8b2940 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -87,7 +87,26 @@ /* Bitfields in CTRLA */ #define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */ #define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */ -/* Chunck Tranfer size definitions are in at_hdmac.h */ +#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */ +#define ATC_SCSIZE(x) (ATC_SCSIZE_MASK & ((x) << 16)) +#define ATC_SCSIZE_1 (0x0 << 16) +#define ATC_SCSIZE_4 (0x1 << 16) +#define ATC_SCSIZE_8 (0x2 << 16) +#define ATC_SCSIZE_16 (0x3 << 16) +#define ATC_SCSIZE_32 (0x4 << 16) +#define ATC_SCSIZE_64 (0x5 << 16) +#define ATC_SCSIZE_128 (0x6 << 16) +#define ATC_SCSIZE_256 (0x7 << 16) +#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */ +#define ATC_DCSIZE(x) (ATC_DCSIZE_MASK & ((x) << 20)) +#define ATC_DCSIZE_1 (0x0 << 20) +#define ATC_DCSIZE_4 (0x1 << 20) +#define ATC_DCSIZE_8 (0x2 << 20) +#define ATC_DCSIZE_16 (0x3 << 20) +#define ATC_DCSIZE_32 (0x4 << 20) +#define ATC_DCSIZE_64 (0x5 << 20) +#define ATC_DCSIZE_128 (0x6 << 20) +#define ATC_DCSIZE_256 (0x7 << 20) #define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */ #define ATC_SRC_WIDTH(x) ((x) << 24) #define ATC_SRC_WIDTH_BYTE (0x0 << 24) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 750925f9638b..e67b4e06a918 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1033,7 +1033,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (!sgl) goto out; - if (sgl->length == 0) + if (sg_dma_len(sgl) == 0) goto out; spin_lock_irqsave(&cohc->lock, flg); diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index 6c0e2d4c6682..780e0429b38c 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c @@ -270,10 +270,10 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, if (dir == DMA_MEM_TO_DEV) /* increment source address */ - src = sg_phys(sg); + src = sg_dma_address(sg); else /* increment destination address */ - dst = sg_phys(sg); + dst = sg_dma_address(sg); bytes_to_transfer = sg_dma_len(sg); diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 7439079f5eed..721296157577 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/of.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -742,7 +743,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct dw_desc *desc; u32 len, dlen, mem; - mem = sg_phys(sg); + mem = sg_dma_address(sg); len = sg_dma_len(sg); if (!((mem | len) & 7)) @@ -809,7 +810,7 @@ slave_sg_todev_fill_desc: struct dw_desc *desc; u32 len, dlen, mem; - mem = sg_phys(sg); + mem = sg_dma_address(sg); len = sg_dma_len(sg); if (!((mem | len) & 7)) @@ -1429,7 +1430,7 @@ static int __init dw_probe(struct platform_device *pdev) err = PTR_ERR(dw->clk); goto err_clk; } - clk_enable(dw->clk); + clk_prepare_enable(dw->clk); /* force dma off, just in case */ dw_dma_off(dw); @@ -1510,7 +1511,7 @@ static int __init dw_probe(struct platform_device *pdev) return 0; err_irq: - clk_disable(dw->clk); + clk_disable_unprepare(dw->clk); clk_put(dw->clk); err_clk: iounmap(dw->regs); @@ -1540,7 +1541,7 @@ static int __exit dw_remove(struct platform_device *pdev) channel_clear_bit(dw, CH_EN, dwc->mask); } - clk_disable(dw->clk); + clk_disable_unprepare(dw->clk); clk_put(dw->clk); iounmap(dw->regs); @@ -1559,7 +1560,7 @@ static void dw_shutdown(struct platform_device *pdev) struct dw_dma *dw = platform_get_drvdata(pdev); dw_dma_off(platform_get_drvdata(pdev)); - clk_disable(dw->clk); + clk_disable_unprepare(dw->clk); } static int dw_suspend_noirq(struct device *dev) @@ -1568,7 +1569,7 @@ static int dw_suspend_noirq(struct device *dev) struct dw_dma *dw = platform_get_drvdata(pdev); dw_dma_off(platform_get_drvdata(pdev)); - clk_disable(dw->clk); + clk_disable_unprepare(dw->clk); return 0; } @@ -1578,7 +1579,7 @@ static int dw_resume_noirq(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct dw_dma *dw = platform_get_drvdata(pdev); - clk_enable(dw->clk); + clk_prepare_enable(dw->clk); dma_writel(dw, CFG, DW_CFG_DMA_EN); return 0; } @@ -1592,12 +1593,21 @@ static const struct dev_pm_ops dw_dev_pm_ops = { .poweroff_noirq = dw_suspend_noirq, }; +#ifdef CONFIG_OF +static const struct of_device_id dw_dma_id_table[] = { + { .compatible = "snps,dma-spear1340" }, + {} +}; +MODULE_DEVICE_TABLE(of, dw_dma_id_table); +#endif + static struct platform_driver dw_driver = { .remove = __exit_p(dw_remove), .shutdown = dw_shutdown, .driver = { .name = "dw_dmac", .pm = &dw_dev_pm_ops, + .of_match_table = of_match_ptr(dw_dma_id_table), }, }; @@ -1616,4 +1626,4 @@ module_exit(dw_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); -MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); +MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index f6e9b572b998..c64917ec313d 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -71,6 +71,7 @@ #define M2M_CONTROL_TM_SHIFT 13 #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) +#define M2M_CONTROL_NFBINT BIT(21) #define M2M_CONTROL_RSS_SHIFT 22 #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) @@ -79,7 +80,22 @@ #define M2M_CONTROL_PWSC_SHIFT 25 #define M2M_INTERRUPT 0x0004 -#define M2M_INTERRUPT_DONEINT BIT(1) +#define M2M_INTERRUPT_MASK 6 + +#define M2M_STATUS 0x000c +#define M2M_STATUS_CTL_SHIFT 1 +#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_BUF_SHIFT 4 +#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT) +#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT) +#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT) +#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT) +#define M2M_STATUS_DONE BIT(6) #define M2M_BCR0 0x0010 #define M2M_BCR1 0x0014 @@ -426,15 +442,6 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) /* * M2M DMA implementation - * - * For the M2M transfers we don't use NFB at all. This is because it simply - * doesn't work well with memcpy transfers. When you submit both buffers it is - * extremely unlikely that you get an NFB interrupt, but it instead reports - * DONE interrupt and both buffers are already transferred which means that we - * weren't able to update the next buffer. - * - * So for now we "simulate" NFB by just submitting buffer after buffer - * without double buffering. */ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) @@ -543,6 +550,11 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) m2m_fill_desc(edmac); control |= M2M_CONTROL_DONEINT; + if (ep93xx_dma_advance_active(edmac)) { + m2m_fill_desc(edmac); + control |= M2M_CONTROL_NFBINT; + } + /* * Now we can finally enable the channel. For M2M channel this must be * done _after_ the BCRx registers are programmed. @@ -560,32 +572,89 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) } } +/* + * According to EP93xx User's Guide, we should receive DONE interrupt when all + * M2M DMA controller transactions complete normally. This is not always the + * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel + * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel + * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation). + * In effect, disabling the channel when only DONE bit is set could stop + * currently running DMA transfer. To avoid this, we use Buffer FSM and + * Control FSM to check current state of DMA channel. + */ static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) { + u32 status = readl(edmac->regs + M2M_STATUS); + u32 ctl_fsm = status & M2M_STATUS_CTL_MASK; + u32 buf_fsm = status & M2M_STATUS_BUF_MASK; + bool done = status & M2M_STATUS_DONE; + bool last_done; u32 control; + struct ep93xx_dma_desc *desc; - if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) + /* Accept only DONE and NFB interrupts */ + if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK)) return INTERRUPT_UNKNOWN; - /* Clear the DONE bit */ - writel(0, edmac->regs + M2M_INTERRUPT); + if (done) { + /* Clear the DONE bit */ + writel(0, edmac->regs + M2M_INTERRUPT); + } - /* Disable interrupts and the channel */ - control = readl(edmac->regs + M2M_CONTROL); - control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); - writel(control, edmac->regs + M2M_CONTROL); + /* + * Check whether we are done with descriptors or not. This, together + * with DMA channel state, determines action to take in interrupt. + */ + desc = ep93xx_dma_get_active(edmac); + last_done = !desc || desc->txd.cookie; /* - * Since we only get DONE interrupt we have to find out ourselves - * whether there still is something to process. So we try to advance - * the chain an see whether it succeeds. + * Use M2M DMA Buffer FSM and Control FSM to check current state of + * DMA channel. Using DONE and NFB bits from channel status register + * or bits from channel interrupt register is not reliable. */ - if (ep93xx_dma_advance_active(edmac)) { - edmac->edma->hw_submit(edmac); - return INTERRUPT_NEXT_BUFFER; + if (!last_done && + (buf_fsm == M2M_STATUS_BUF_NO || + buf_fsm == M2M_STATUS_BUF_ON)) { + /* + * Two buffers are ready for update when Buffer FSM is in + * DMA_NO_BUF state. Only one buffer can be prepared without + * disabling the channel or polling the DONE bit. + * To simplify things, always prepare only one buffer. + */ + if (ep93xx_dma_advance_active(edmac)) { + m2m_fill_desc(edmac); + if (done && !edmac->chan.private) { + /* Software trigger for memcpy channel */ + control = readl(edmac->regs + M2M_CONTROL); + control |= M2M_CONTROL_START; + writel(control, edmac->regs + M2M_CONTROL); + } + return INTERRUPT_NEXT_BUFFER; + } else { + last_done = true; + } + } + + /* + * Disable the channel only when Buffer FSM is in DMA_NO_BUF state + * and Control FSM is in DMA_STALL state. + */ + if (last_done && + buf_fsm == M2M_STATUS_BUF_NO && + ctl_fsm == M2M_STATUS_CTL_STALL) { + /* Disable interrupts and the channel */ + control = readl(edmac->regs + M2M_CONTROL); + control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT + | M2M_CONTROL_ENABLE); + writel(control, edmac->regs + M2M_CONTROL); + return INTERRUPT_DONE; } - return INTERRUPT_DONE; + /* + * Nothing to do this time. + */ + return INTERRUPT_NEXT_BUFFER; } /* diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index bb787d8e1529..fcfeb3cd8d31 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -227,7 +227,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d) struct scatterlist *sg = d->sg; unsigned long now; - now = min(d->len, sg->length); + now = min(d->len, sg_dma_len(sg)); if (d->len != IMX_DMA_LENGTH_LOOP) d->len -= now; @@ -763,16 +763,16 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); for_each_sg(sgl, sg, sg_len, i) { - dma_length += sg->length; + dma_length += sg_dma_len(sg); } switch (imxdmac->word_size) { case DMA_SLAVE_BUSWIDTH_4_BYTES: - if (sgl->length & 3 || sgl->dma_address & 3) + if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3) return NULL; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: - if (sgl->length & 1 || sgl->dma_address & 1) + if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1) return NULL; break; case DMA_SLAVE_BUSWIDTH_1_BYTE: @@ -831,13 +831,13 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( imxdmac->sg_list[i].page_link = 0; imxdmac->sg_list[i].offset = 0; imxdmac->sg_list[i].dma_address = dma_addr; - imxdmac->sg_list[i].length = period_len; + sg_dma_len(&imxdmac->sg_list[i]) = period_len; dma_addr += period_len; } /* close the loop */ imxdmac->sg_list[periods].offset = 0; - imxdmac->sg_list[periods].length = 0; + sg_dma_len(&imxdmac->sg_list[periods]) = 0; imxdmac->sg_list[periods].page_link = ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d3e38e28bb6b..1dc2a4ad0026 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -24,7 +24,7 @@ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/clk.h> -#include <linux/wait.h> +#include <linux/delay.h> #include <linux/sched.h> #include <linux/semaphore.h> #include <linux/spinlock.h> @@ -271,6 +271,7 @@ struct sdma_channel { enum dma_status status; unsigned int chn_count; unsigned int chn_real_count; + struct tasklet_struct tasklet; }; #define IMX_DMA_SG_LOOP BIT(0) @@ -322,8 +323,9 @@ struct sdma_engine { struct sdma_context_data *context; dma_addr_t context_phys; struct dma_device dma_device; - struct clk *clk; - struct mutex channel_0_lock; + struct clk *clk_ipg; + struct clk *clk_ahb; + spinlock_t channel_0_lock; struct sdma_script_start_addrs *script_addrs; }; @@ -401,19 +403,27 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel) } /* - * sdma_run_channel - run a channel and wait till it's done + * sdma_run_channel0 - run a channel and wait till it's done */ -static int sdma_run_channel(struct sdma_channel *sdmac) +static int sdma_run_channel0(struct sdma_engine *sdma) { - struct sdma_engine *sdma = sdmac->sdma; - int channel = sdmac->channel; int ret; + unsigned long timeout = 500; - init_completion(&sdmac->done); + sdma_enable_channel(sdma, 0); - sdma_enable_channel(sdma, channel); + while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) { + if (timeout-- <= 0) + break; + udelay(1); + } - ret = wait_for_completion_timeout(&sdmac->done, HZ); + if (ret) { + /* Clear the interrupt status */ + writel_relaxed(ret, sdma->regs + SDMA_H_INTR); + } else { + dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); + } return ret ? 0 : -ETIMEDOUT; } @@ -425,17 +435,17 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, void *buf_virt; dma_addr_t buf_phys; int ret; - - mutex_lock(&sdma->channel_0_lock); + unsigned long flags; buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL); if (!buf_virt) { - ret = -ENOMEM; - goto err_out; + return -ENOMEM; } + spin_lock_irqsave(&sdma->channel_0_lock, flags); + bd0->mode.command = C0_SETPM; bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; bd0->mode.count = size / 2; @@ -444,12 +454,11 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, memcpy(buf_virt, buf, size); - ret = sdma_run_channel(&sdma->channel[0]); + ret = sdma_run_channel0(sdma); - dma_free_coherent(NULL, size, buf_virt, buf_phys); + spin_unlock_irqrestore(&sdma->channel_0_lock, flags); -err_out: - mutex_unlock(&sdma->channel_0_lock); + dma_free_coherent(NULL, size, buf_virt, buf_phys); return ret; } @@ -534,13 +543,11 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) sdmac->desc.callback(sdmac->desc.callback_param); } -static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) +static void sdma_tasklet(unsigned long data) { - complete(&sdmac->done); + struct sdma_channel *sdmac = (struct sdma_channel *) data; - /* not interested in channel 0 interrupts */ - if (sdmac->channel == 0) - return; + complete(&sdmac->done); if (sdmac->flags & IMX_DMA_SG_LOOP) sdma_handle_channel_loop(sdmac); @@ -554,13 +561,15 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id) unsigned long stat; stat = readl_relaxed(sdma->regs + SDMA_H_INTR); + /* not interested in channel 0 interrupts */ + stat &= ~1; writel_relaxed(stat, sdma->regs + SDMA_H_INTR); while (stat) { int channel = fls(stat) - 1; struct sdma_channel *sdmac = &sdma->channel[channel]; - mxc_sdma_handle_channel(sdmac); + tasklet_schedule(&sdmac->tasklet); __clear_bit(channel, &stat); } @@ -659,6 +668,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) struct sdma_context_data *context = sdma->context; struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; int ret; + unsigned long flags; if (sdmac->direction == DMA_DEV_TO_MEM) { load_address = sdmac->pc_from_device; @@ -676,7 +686,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); - mutex_lock(&sdma->channel_0_lock); + spin_lock_irqsave(&sdma->channel_0_lock, flags); memset(context, 0, sizeof(*context)); context->channel_state.pc = load_address; @@ -695,10 +705,9 @@ static int sdma_load_context(struct sdma_channel *sdmac) bd0->mode.count = sizeof(*context) / 4; bd0->buffer_addr = sdma->context_phys; bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; + ret = sdma_run_channel0(sdma); - ret = sdma_run_channel(&sdma->channel[0]); - - mutex_unlock(&sdma->channel_0_lock); + spin_unlock_irqrestore(&sdma->channel_0_lock, flags); return ret; } @@ -806,8 +815,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac) init_completion(&sdmac->done); - sdmac->buf_tail = 0; - return 0; out: @@ -859,7 +866,8 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) sdmac->peripheral_type = data->peripheral_type; sdmac->event_id0 = data->dma_request; - clk_enable(sdmac->sdma->clk); + clk_enable(sdmac->sdma->clk_ipg); + clk_enable(sdmac->sdma->clk_ahb); ret = sdma_request_channel(sdmac); if (ret) @@ -896,7 +904,8 @@ static void sdma_free_chan_resources(struct dma_chan *chan) dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys); - clk_disable(sdma->clk); + clk_disable(sdma->clk_ipg); + clk_disable(sdma->clk_ahb); } static struct dma_async_tx_descriptor *sdma_prep_slave_sg( @@ -916,6 +925,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( sdmac->flags = 0; + sdmac->buf_tail = 0; + dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", sg_len, channel); @@ -938,7 +949,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( bd->buffer_addr = sg->dma_address; - count = sg->length; + count = sg_dma_len(sg); if (count > 0xffff) { dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", @@ -1016,6 +1027,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( sdmac->status = DMA_IN_PROGRESS; + sdmac->buf_tail = 0; + sdmac->flags |= IMX_DMA_SG_LOOP; sdmac->direction = direction; ret = sdma_load_context(sdmac); @@ -1169,12 +1182,14 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) addr = (void *)header + header->script_addrs_start; ram_code = (void *)header + header->ram_code_start; - clk_enable(sdma->clk); + clk_enable(sdma->clk_ipg); + clk_enable(sdma->clk_ahb); /* download the RAM image for SDMA */ sdma_load_script(sdma, ram_code, header->ram_code_size, addr->ram_code_start_addr); - clk_disable(sdma->clk); + clk_disable(sdma->clk_ipg); + clk_disable(sdma->clk_ahb); sdma_add_scripts(sdma, addr); @@ -1216,7 +1231,8 @@ static int __init sdma_init(struct sdma_engine *sdma) return -ENODEV; } - clk_enable(sdma->clk); + clk_enable(sdma->clk_ipg); + clk_enable(sdma->clk_ahb); /* Be sure SDMA has not started yet */ writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); @@ -1269,12 +1285,14 @@ static int __init sdma_init(struct sdma_engine *sdma) /* Initializes channel's priorities */ sdma_set_channel_priority(&sdma->channel[0], 7); - clk_disable(sdma->clk); + clk_disable(sdma->clk_ipg); + clk_disable(sdma->clk_ahb); return 0; err_dma_alloc: - clk_disable(sdma->clk); + clk_disable(sdma->clk_ipg); + clk_disable(sdma->clk_ahb); dev_err(sdma->dev, "initialisation failed with %d\n", ret); return ret; } @@ -1297,7 +1315,7 @@ static int __init sdma_probe(struct platform_device *pdev) if (!sdma) return -ENOMEM; - mutex_init(&sdma->channel_0_lock); + spin_lock_init(&sdma->channel_0_lock); sdma->dev = &pdev->dev; @@ -1313,12 +1331,21 @@ static int __init sdma_probe(struct platform_device *pdev) goto err_request_region; } - sdma->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(sdma->clk)) { - ret = PTR_ERR(sdma->clk); + sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(sdma->clk_ipg)) { + ret = PTR_ERR(sdma->clk_ipg); goto err_clk; } + sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(sdma->clk_ahb)) { + ret = PTR_ERR(sdma->clk_ahb); + goto err_clk; + } + + clk_prepare(sdma->clk_ipg); + clk_prepare(sdma->clk_ahb); + sdma->regs = ioremap(iores->start, resource_size(iores)); if (!sdma->regs) { ret = -ENOMEM; @@ -1359,6 +1386,8 @@ static int __init sdma_probe(struct platform_device *pdev) dma_cookie_init(&sdmac->chan); sdmac->channel = i; + tasklet_init(&sdmac->tasklet, sdma_tasklet, + (unsigned long) sdmac); /* * Add the channel to the DMAC list. Do not add channel 0 though * because we need it internally in the SDMA driver. This also means @@ -1426,7 +1455,6 @@ err_alloc: err_request_irq: iounmap(sdma->regs); err_ioremap: - clk_put(sdma->clk); err_clk: release_mem_region(iores->start, resource_size(iores)); err_request_region: diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index c900ca7aaec4..222e907bfaaa 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -394,11 +394,11 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, } } /*Populate CTL_HI values*/ - ctl_hi.ctlx.block_ts = get_block_ts(sg->length, + ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg), desc->width, midc->dma->block_size); /*Populate SAR and DAR values*/ - sg_phy_addr = sg_phys(sg); + sg_phy_addr = sg_dma_address(sg); if (desc->dirn == DMA_MEM_TO_DEV) { lli_bloc_desc->sar = sg_phy_addr; lli_bloc_desc->dar = mids->dma_slave.dst_addr; @@ -747,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( txd = intel_mid_dma_prep_memcpy(chan, mids->dma_slave.dst_addr, mids->dma_slave.src_addr, - sgl->length, + sg_dma_len(sgl), flags); return txd; } else { @@ -759,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", sg_len, direction, flags); - txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); + txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags); if (NULL == txd) { pr_err("MDMA: Prep memcpy failed\n"); return NULL; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 62e3f8ec2461..5ec72044ea4c 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1715,7 +1715,7 @@ static int __init ipu_probe(struct platform_device *pdev) } /* Make sure IPU HSP clock is running */ - clk_enable(ipu_data.ipu_clk); + clk_prepare_enable(ipu_data.ipu_clk); /* Disable all interrupts */ idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1); @@ -1747,7 +1747,7 @@ static int __init ipu_probe(struct platform_device *pdev) err_idmac_init: err_attach_irq: ipu_irq_detach_irq(&ipu_data, pdev); - clk_disable(ipu_data.ipu_clk); + clk_disable_unprepare(ipu_data.ipu_clk); clk_put(ipu_data.ipu_clk); err_clk_get: iounmap(ipu_data.reg_ic); @@ -1765,7 +1765,7 @@ static int __exit ipu_remove(struct platform_device *pdev) ipu_idmac_exit(ipu); ipu_irq_detach_irq(ipu, pdev); - clk_disable(ipu->ipu_clk); + clk_disable_unprepare(ipu->ipu_clk); clk_put(ipu->ipu_clk); iounmap(ipu->reg_ic); iounmap(ipu->reg_ipu); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index fa5d55fea46c..0b12e68bf79c 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/memory.h> +#include <linux/clk.h> #include <plat/mv_xor.h> #include "dmaengine.h" @@ -1307,11 +1308,25 @@ static int mv_xor_shared_probe(struct platform_device *pdev) if (dram) mv_xor_conf_mbus_windows(msp, dram); + /* Not all platforms can gate the clock, so it is not + * an error if the clock does not exists. + */ + msp->clk = clk_get(&pdev->dev, NULL); + if (!IS_ERR(msp->clk)) + clk_prepare_enable(msp->clk); + return 0; } static int mv_xor_shared_remove(struct platform_device *pdev) { + struct mv_xor_shared_private *msp = platform_get_drvdata(pdev); + + if (!IS_ERR(msp->clk)) { + clk_disable_unprepare(msp->clk); + clk_put(msp->clk); + } + return 0; } diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 654876b7ba1d..a5b422f5a8ab 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h @@ -55,6 +55,7 @@ struct mv_xor_shared_private { void __iomem *xor_base; void __iomem *xor_high_base; + struct clk *clk; }; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 655d4ce6ed0d..c96ab15319f2 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -22,11 +22,14 @@ #include <linux/platform_device.h> #include <linux/dmaengine.h> #include <linux/delay.h> +#include <linux/module.h> #include <linux/fsl/mxs-dma.h> +#include <linux/stmp_device.h> +#include <linux/of.h> +#include <linux/of_device.h> #include <asm/irq.h> #include <mach/mxs.h> -#include <mach/common.h> #include "dmaengine.h" @@ -36,12 +39,8 @@ * dma can program the controller registers of peripheral devices. */ -#define MXS_DMA_APBH 0 -#define MXS_DMA_APBX 1 -#define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH) - -#define APBH_VERSION_LATEST 3 -#define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST) +#define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH) +#define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA) #define HW_APBHX_CTRL0 0x000 #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) @@ -51,13 +50,14 @@ #define HW_APBHX_CTRL2 0x020 #define HW_APBHX_CHANNEL_CTRL 0x030 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 -#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800) -#define HW_APBX_VERSION 0x800 -#define BP_APBHX_VERSION_MAJOR 24 -#define HW_APBHX_CHn_NXTCMDAR(n) \ - (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70) -#define HW_APBHX_CHn_SEMA(n) \ - (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70) +/* + * The offset of NXTCMDAR register is different per both dma type and version, + * while stride for each channel is all the same 0x70. + */ +#define HW_APBHX_CHn_NXTCMDAR(d, n) \ + (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) +#define HW_APBHX_CHn_SEMA(d, n) \ + (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) /* * ccw bits definitions @@ -121,9 +121,19 @@ struct mxs_dma_chan { #define MXS_DMA_CHANNELS 16 #define MXS_DMA_CHANNELS_MASK 0xffff +enum mxs_dma_devtype { + MXS_DMA_APBH, + MXS_DMA_APBX, +}; + +enum mxs_dma_id { + IMX23_DMA, + IMX28_DMA, +}; + struct mxs_dma_engine { - int dev_id; - unsigned int version; + enum mxs_dma_id dev_id; + enum mxs_dma_devtype type; void __iomem *base; struct clk *clk; struct dma_device dma_device; @@ -131,17 +141,86 @@ struct mxs_dma_engine { struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; }; +struct mxs_dma_type { + enum mxs_dma_id id; + enum mxs_dma_devtype type; +}; + +static struct mxs_dma_type mxs_dma_types[] = { + { + .id = IMX23_DMA, + .type = MXS_DMA_APBH, + }, { + .id = IMX23_DMA, + .type = MXS_DMA_APBX, + }, { + .id = IMX28_DMA, + .type = MXS_DMA_APBH, + }, { + .id = IMX28_DMA, + .type = MXS_DMA_APBX, + } +}; + +static struct platform_device_id mxs_dma_ids[] = { + { + .name = "imx23-dma-apbh", + .driver_data = (kernel_ulong_t) &mxs_dma_types[0], + }, { + .name = "imx23-dma-apbx", + .driver_data = (kernel_ulong_t) &mxs_dma_types[1], + }, { + .name = "imx28-dma-apbh", + .driver_data = (kernel_ulong_t) &mxs_dma_types[2], + }, { + .name = "imx28-dma-apbx", + .driver_data = (kernel_ulong_t) &mxs_dma_types[3], + }, { + /* end of list */ + } +}; + +static const struct of_device_id mxs_dma_dt_ids[] = { + { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], }, + { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], }, + { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], }, + { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids); + +static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct mxs_dma_chan, chan); +} + +int mxs_dma_is_apbh(struct dma_chan *chan) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + + return dma_is_apbh(mxs_dma); +} + +int mxs_dma_is_apbx(struct dma_chan *chan) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + + return !dma_is_apbh(mxs_dma); +} + static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) { struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; - if (dma_is_apbh() && apbh_is_old()) + if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), - mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); else writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), - mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); } static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) @@ -151,10 +230,10 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) /* set cmd_addr up */ writel(mxs_chan->ccw_phys, - mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); + mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); /* write 1 to SEMA to kick off the channel */ - writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); + writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); } static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) @@ -168,12 +247,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) int chan_id = mxs_chan->chan.chan_id; /* freeze the channel */ - if (dma_is_apbh() && apbh_is_old()) + if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) writel(1 << chan_id, - mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); else writel(1 << chan_id, - mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); mxs_chan->status = DMA_PAUSED; } @@ -184,21 +263,16 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) int chan_id = mxs_chan->chan.chan_id; /* unfreeze the channel */ - if (dma_is_apbh() && apbh_is_old()) + if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) writel(1 << chan_id, - mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); + mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR); else writel(1 << chan_id, - mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR); + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); mxs_chan->status = DMA_IN_PROGRESS; } -static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) -{ - return container_of(chan, struct mxs_dma_chan, chan); -} - static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) { return dma_cookie_assign(tx); @@ -220,11 +294,11 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) /* completion status */ stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); stat1 &= MXS_DMA_CHANNELS_MASK; - writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR); + writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); /* error status */ stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); - writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR); + writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); /* * When both completion and error of termination bits set at the @@ -415,9 +489,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); } else { for_each_sg(sgl, sg, sg_len, i) { - if (sg->length > MAX_XFER_BYTES) { + if (sg_dma_len(sg) > MAX_XFER_BYTES) { dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", - sg->length, MAX_XFER_BYTES); + sg_dma_len(sg), MAX_XFER_BYTES); goto err_out; } @@ -425,7 +499,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; ccw->bufaddr = sg->dma_address; - ccw->xfer_bytes = sg->length; + ccw->xfer_bytes = sg_dma_len(sg); ccw->bits = 0; ccw->bits |= CCW_CHAIN; @@ -567,27 +641,21 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) if (ret) return ret; - ret = mxs_reset_block(mxs_dma->base); + ret = stmp_reset_block(mxs_dma->base); if (ret) goto err_out; - /* only major version matters */ - mxs_dma->version = readl(mxs_dma->base + - ((mxs_dma->dev_id == MXS_DMA_APBX) ? - HW_APBX_VERSION : HW_APBH_VERSION)) >> - BP_APBHX_VERSION_MAJOR; - /* enable apbh burst */ - if (dma_is_apbh()) { + if (dma_is_apbh(mxs_dma)) { writel(BM_APBH_CTRL0_APB_BURST_EN, - mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); writel(BM_APBH_CTRL0_APB_BURST8_EN, - mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); } /* enable irq for all the channels */ writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, - mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); + mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET); err_out: clk_disable_unprepare(mxs_dma->clk); @@ -596,8 +664,9 @@ err_out: static int __init mxs_dma_probe(struct platform_device *pdev) { - const struct platform_device_id *id_entry = - platform_get_device_id(pdev); + const struct platform_device_id *id_entry; + const struct of_device_id *of_id; + const struct mxs_dma_type *dma_type; struct mxs_dma_engine *mxs_dma; struct resource *iores; int ret, i; @@ -606,7 +675,15 @@ static int __init mxs_dma_probe(struct platform_device *pdev) if (!mxs_dma) return -ENOMEM; - mxs_dma->dev_id = id_entry->driver_data; + of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev); + if (of_id) + id_entry = of_id->data; + else + id_entry = platform_get_device_id(pdev); + + dma_type = (struct mxs_dma_type *)id_entry->driver_data; + mxs_dma->type = dma_type->type; + mxs_dma->dev_id = dma_type->id; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -689,23 +766,12 @@ err_request_region: return ret; } -static struct platform_device_id mxs_dma_type[] = { - { - .name = "mxs-dma-apbh", - .driver_data = MXS_DMA_APBH, - }, { - .name = "mxs-dma-apbx", - .driver_data = MXS_DMA_APBX, - }, { - /* end of list */ - } -}; - static struct platform_driver mxs_dma_driver = { .driver = { .name = "mxs-dma", + .of_match_table = mxs_dma_dt_ids, }, - .id_table = mxs_dma_type, + .id_table = mxs_dma_ids, }; static int __init mxs_dma_module_init(void) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 65c0495a6d40..987ab5cd2617 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -621,7 +621,7 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, goto err_desc_get; desc->regs.dev_addr = reg; - desc->regs.mem_addr = sg_phys(sg); + desc->regs.mem_addr = sg_dma_address(sg); desc->regs.size = sg_dma_len(sg); desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index fa3fb21e60be..e4feba6b03c0 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -21,7 +21,6 @@ #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> -#include <linux/interrupt.h> #include <linux/amba/bus.h> #include <linux/amba/pl330.h> #include <linux/pm_runtime.h> @@ -393,6 +392,8 @@ struct pl330_req { struct pl330_reqcfg *cfg; /* Pointer to first xfer in the request. */ struct pl330_xfer *x; + /* Hook to attach to DMAC's list of reqs with due callback */ + struct list_head rqd; }; /* @@ -462,8 +463,6 @@ struct _pl330_req { /* Number of bytes taken to setup MC for the req */ u32 mc_len; struct pl330_req *r; - /* Hook to attach to DMAC's list of reqs with due callback */ - struct list_head rqd; }; /* ToBeDone for tasklet */ @@ -1684,7 +1683,7 @@ static void pl330_dotask(unsigned long data) /* Returns 1 if state was updated, 0 otherwise */ static int pl330_update(const struct pl330_info *pi) { - struct _pl330_req *rqdone; + struct pl330_req *rqdone, *tmp; struct pl330_dmac *pl330; unsigned long flags; void __iomem *regs; @@ -1751,7 +1750,10 @@ static int pl330_update(const struct pl330_info *pi) if (active == -1) /* Aborted */ continue; - rqdone = &thrd->req[active]; + /* Detach the req */ + rqdone = thrd->req[active].r; + thrd->req[active].r = NULL; + mark_free(thrd, active); /* Get going again ASAP */ @@ -1763,20 +1765,11 @@ static int pl330_update(const struct pl330_info *pi) } /* Now that we are in no hurry, do the callbacks */ - while (!list_empty(&pl330->req_done)) { - struct pl330_req *r; - - rqdone = container_of(pl330->req_done.next, - struct _pl330_req, rqd); - - list_del_init(&rqdone->rqd); - - /* Detach the req */ - r = rqdone->r; - rqdone->r = NULL; + list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) { + list_del(&rqdone->rqd); spin_unlock_irqrestore(&pl330->lock, flags); - _callback(r, PL330_ERR_NONE); + _callback(rqdone, PL330_ERR_NONE); spin_lock_irqsave(&pl330->lock, flags); } @@ -2322,7 +2315,7 @@ static void pl330_tasklet(unsigned long data) /* Pick up ripe tomatoes */ list_for_each_entry_safe(desc, _dt, &pch->work_list, node) if (desc->status == DONE) { - if (pch->cyclic) + if (!pch->cyclic) dma_cookie_complete(&desc->txd); list_move_tail(&desc->node, &list); } @@ -2540,7 +2533,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc) } /* Returns the number of descriptors added to the DMAC pool */ -int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) +static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) { struct dma_pl330_desc *desc; unsigned long flags; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 2ed1ac3513f3..000d309602b2 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2362,7 +2362,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, } sg[periods].offset = 0; - sg[periods].length = 0; + sg_dma_len(&sg[periods]) = 0; sg[periods].page_link = ((unsigned long)sg | 0x01) & ~0x02; |