From ae128293d97404f491dc76f1843c7adacfec3441 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 15 Jun 2015 17:25:16 +0900 Subject: dmaengine: pl330: Fix overflow when reporting residue in memcpy During memcpy operations the residue was always set to an u32 overflowed value. In pl330_tx_status() function number of currently transferred bytes was subtracted from internal "bytes_requested" field. However this "bytes_requested" was not initialized at start to length of memcpy buffer so transferred bytes were subtracted from 0 causing overflow. Signed-off-by: Krzysztof Kozlowski Cc: Fixes: aee4d1fac887 ("dmaengine: pl330: improve pl330_tx_status() function") Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f513f77b1d85..c535cd059724 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, desc->rqcfg.brst_len = 1; desc->rqcfg.brst_len = get_burst_len(desc, len); + desc->bytes_requested = len; desc->txd.flags = flags; -- cgit v1.2.3 From 5dd90e5b91e0f5c925b12b132c7cd27538870256 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 15 Jun 2015 23:00:09 +0900 Subject: dmaengine: pl330: Really fix choppy sound because of wrong residue calculation When pl330 driver was used during sound playback, after some time or after a number of plays the sound became choppy or totally noisy. For example on Odroid XU3 board the first four executions of aplay with small WAVE worked fine, but fifth was unrecognizable with errors: $ aplay /usr/share/sounds/alsa/Front_Right.wava underrun!!! (at least 0.095 ms long) Issue was caused by wrong residue reported by pl330 driver to pcm_dmaengine for its cyclic dma transfers. The pl330_tx_status(), residue reporting function, used a "last" flag in a descriptor to indicate that there is no more data to send. The pl330_tx_submit() iterated over descriptors trying to remove this flag from them and then mark last descriptor as "last". However when iterating it actually removed the flag not from descriptors but always from last of it (and then reset it). Thus effectively once some descriptor was marked as last, then it stayed like this forever causing residue to be reported too low. Signed-off-by: Krzysztof Kozlowski Fixes: aee4d1fac887 ("dmaengine: pl330: improve pl330_tx_status() function") Cc: Reported-by: gabriel@unseen.is Suggested-by: Marek Szyprowski Tested-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index c535cd059724..ecab4ea059b4 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) desc->txd.callback = last->txd.callback; desc->txd.callback_param = last->txd.callback_param; } - last->last = false; + desc->last = false; dma_cookie_assign(&desc->txd); -- cgit v1.2.3 From 20cadcb4df3eebd82410eab4aa91d5b083e16cd1 Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Wed, 17 Jun 2015 16:22:26 +0200 Subject: dmaengine: at_xdmac: fix bug about channel configuration When using descriptor view 2 or higher, we don't write the configuration into AT_XDMAC_CC register because this configuration will be fetch from the descriptor. Unfortunately, the PROT bit is not updated with this method, we have to do it manually before enabling the channel. Signed-off-by: Ludovic Desroches Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index cf1213de7865..52ca1ccefa3f 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, * descriptor view 2 since some fields of the configuration register * depend on transfer size and src/dest addresses. */ - if (at_xdmac_chan_is_cyclic(atchan)) { + if (at_xdmac_chan_is_cyclic(atchan)) reg = AT_XDMAC_CNDC_NDVIEW_NDV1; - at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); - } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) { + else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) reg = AT_XDMAC_CNDC_NDVIEW_NDV3; - } else { - /* - * No need to write AT_XDMAC_CC reg, it will be done when the - * descriptor is fecthed. - */ + else reg = AT_XDMAC_CNDC_NDVIEW_NDV2; - } + /* + * Even if the register will be updated from the configuration in the + * descriptor when using view 2 or higher, the PROT bit won't be set + * properly. This bit can be modified only by using the channel + * configuration register. + */ + at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); reg |= AT_XDMAC_CNDC_NDDUP | AT_XDMAC_CNDC_NDSUP -- cgit v1.2.3 From 93dce3a6434f632dbd684ec1d9c3dc66a14e27e1 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 18 Jun 2015 13:25:41 +0200 Subject: dmaengine: at_hdmac: fix residue computation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As claimed by the programmer datasheet and confirmed by the IP designer, the Block Transfer Size (BTSIZE) bitfield of the Channel x Control A Register (CTRLAx) always refers to a number of Source Width (SRC_WIDTH) transfers. Both the SRC_WIDTH and BTSIZE bitfields can be extacted from the CTRLAx register to compute the DMA residue. So the 'tx_width' field is useless and can be removed from the struct at_desc. Before this patch, atc_prep_slave_sg() was not consistent: BTSIZE was correctly initialized according to the SRC_WIDTH but 'tx_width' was always set to reg_width, which was incorrect for MEM_TO_DEV transfers. It led to bad DMA residue when 'tx_width' != SRC_WIDTH. Also the 'tx_width' field was mostly set only in the first and last descriptors. Depending on the kind of DMA transfer, this field remained uninitialized for intermediate descriptors. The accurate DMA residue was computed only when the currently processed descriptor was the first or the last of the chain. This algorithm was a little bit odd. An accurate DMA residue can always be computed using the SRC_WIDTH and BTSIZE bitfields in the CTRLAx register. Finally, the test to check whether the currently processed descriptor is the last of the chain was wrong: for cyclic transfer, last_desc->lli.dscr is NOT equal to zero, since set_desc_eol() is never called, but logically equal to first_desc->txd.phys. This bug has a side effect on the drivers/tty/serial/atmel_serial.c driver, which uses cyclic DMA transfer to receive data. Since the DMA residue was wrong each time the DMA transfer reaches the second (and last) period of the transfer, no more data were received by the USART driver till the cyclic DMA transfer loops back to the first period. Signed-off-by: Cyrille Pitchen Acked-by: Torsten Fleischer Tested-by: JirĂ­ Prchal Acked-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 132 +++++++++++++++++++++++++++++--------------- drivers/dma/at_hdmac_regs.h | 3 +- 2 files changed, 88 insertions(+), 47 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 59892126d175..d3629b7482dd 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -48,6 +48,8 @@ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) +#define ATC_MAX_DSCR_TRIALS 10 + /* * Initial number of descriptors to allocate for each channel. This could * be increased during dma usage. @@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, * * @current_len: the number of bytes left before reading CTRLA * @ctrla: the value of CTRLA - * @desc: the descriptor containing the transfer width */ -static inline int atc_calc_bytes_left(int current_len, u32 ctrla, - struct at_desc *desc) +static inline int atc_calc_bytes_left(int current_len, u32 ctrla) { - return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); -} + u32 btsize = (ctrla & ATC_BTSIZE_MAX); + u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla); -/** - * atc_calc_bytes_left_from_reg - calculates the number of bytes left according - * to the current value of CTRLA. - * - * @current_len: the number of bytes left before reading CTRLA - * @atchan: the channel to read CTRLA for - * @desc: the descriptor containing the transfer width - */ -static inline int atc_calc_bytes_left_from_reg(int current_len, - struct at_dma_chan *atchan, struct at_desc *desc) -{ - u32 ctrla = channel_readl(atchan, CTRLA); - - return atc_calc_bytes_left(current_len, ctrla, desc); + /* + * According to the datasheet, when reading the Control A Register + * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the + * number of transfers completed on the Source Interface. + * So btsize is always a number of source width transfers. + */ + return current_len - (btsize << src_width); } /** @@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc; int ret; - u32 ctrla, dscr; + u32 ctrla, dscr, trials; /* * If the cookie doesn't match to the currently running transfer then @@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) * the channel's DSCR register and compare it against the value * of the hardware linked list structure of each child * descriptor. + * + * The CTRLA register provides us with the amount of data + * already read from the source for the current child + * descriptor. So we can compute a more accurate residue by also + * removing the number of bytes corresponding to this amount of + * data. + * + * However, the DSCR and CTRLA registers cannot be read both + * atomically. Hence a race condition may occur: the first read + * register may refer to one child descriptor whereas the second + * read may refer to a later child descriptor in the list + * because of the DMA transfer progression inbetween the two + * reads. + * + * One solution could have been to pause the DMA transfer, read + * the DSCR and CTRLA then resume the DMA transfer. Nonetheless, + * this approach presents some drawbacks: + * - If the DMA transfer is paused, RX overruns or TX underruns + * are more likey to occur depending on the system latency. + * Taking the USART driver as an example, it uses a cyclic DMA + * transfer to read data from the Receive Holding Register + * (RHR) to avoid RX overruns since the RHR is not protected + * by any FIFO on most Atmel SoCs. So pausing the DMA transfer + * to compute the residue would break the USART driver design. + * - The atc_pause() function masks interrupts but we'd rather + * avoid to do so for system latency purpose. + * + * Then we'd rather use another solution: the DSCR is read a + * first time, the CTRLA is read in turn, next the DSCR is read + * a second time. If the two consecutive read values of the DSCR + * are the same then we assume both refers to the very same + * child descriptor as well as the CTRLA value read inbetween + * does. For cyclic tranfers, the assumption is that a full loop + * is "not so fast". + * If the two DSCR values are different, we read again the CTRLA + * then the DSCR till two consecutive read values from DSCR are + * equal or till the maxium trials is reach. + * This algorithm is very unlikely not to find a stable value for + * DSCR. */ - ctrla = channel_readl(atchan, CTRLA); - rmb(); /* ensure CTRLA is read before DSCR */ dscr = channel_readl(atchan, DSCR); + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { + u32 new_dscr; + + rmb(); /* ensure DSCR is read after CTRLA */ + new_dscr = channel_readl(atchan, DSCR); + + /* + * If the DSCR register value has not changed inside the + * DMA controller since the previous read, we assume + * that both the dscr and ctrla values refers to the + * very same descriptor. + */ + if (likely(new_dscr == dscr)) + break; + + /* + * DSCR has changed inside the DMA controller, so the + * previouly read value of CTRLA may refer to an already + * processed descriptor hence could be outdated. + * We need to update ctrla to match the current + * descriptor. + */ + dscr = new_dscr; + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + } + if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) + return -ETIMEDOUT; /* for the first descriptor we can be more accurate */ if (desc_first->lli.dscr == dscr) - return atc_calc_bytes_left(ret, ctrla, desc_first); + return atc_calc_bytes_left(ret, ctrla); ret -= desc_first->len; list_for_each_entry(desc, &desc_first->tx_list, desc_node) { @@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) } /* - * For the last descriptor in the chain we can calculate + * For the current descriptor in the chain we can calculate * the remaining bytes using the channel's register. - * Note that the transfer width of the first and last - * descriptor may differ. */ - if (!desc->lli.dscr) - ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); + ret = atc_calc_bytes_left(ret, ctrla); } else { /* single transfer */ - ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); + ctrla = channel_readl(atchan, CTRLA); + ret = atc_calc_bytes_left(ret, ctrla); } return ret; @@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan, desc->txd.cookie = -EBUSY; desc->total_len = desc->len = len; - desc->tx_width = dwidth; /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, first->txd.cookie = -EBUSY; first->total_len = len; - /* set transfer width for the calculation of the residue */ - first->tx_width = src_width; - prev->tx_width = src_width; - /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, first->txd.cookie = -EBUSY; first->total_len = total_len; - /* set transfer width for the calculation of the residue */ - first->tx_width = reg_width; - prev->tx_width = reg_width; - /* first link descriptor of list is responsible of flags */ first->txd.flags = flags; /* client is in control of this ack */ @@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan, desc->txd.cookie = 0; desc->len = len; - /* - * Although we only need the transfer width for the first and - * the last descriptor, its easier to set it to all descriptors. - */ - desc->tx_width = src_width; - atc_desc_chain(&first, &prev, desc); /* update the lengths and addresses for the next loop cycle */ @@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; first->total_len = buf_len; - first->tx_width = reg_width; return &first->txd; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index bc8d5ebedd19..7f5a08230f76 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -112,6 +112,7 @@ #define ATC_SRC_WIDTH_BYTE (0x0 << 24) #define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) #define ATC_SRC_WIDTH_WORD (0x2 << 24) +#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3) #define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ #define ATC_DST_WIDTH(x) ((x) << 28) #define ATC_DST_WIDTH_BYTE (0x0 << 28) @@ -182,7 +183,6 @@ struct at_lli { * @txd: support for the async_tx api * @desc_node: node on the channed descriptors list * @len: descriptor byte count - * @tx_width: transfer width * @total_len: total transaction byte count */ struct at_desc { @@ -194,7 +194,6 @@ struct at_desc { struct dma_async_tx_descriptor txd; struct list_head desc_node; size_t len; - u32 tx_width; size_t total_len; /* Interleaved data */ -- cgit v1.2.3 From 1c8a38b1268aebc1a903b21b11575077e02d2cf7 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Tue, 30 Jun 2015 14:36:57 +0200 Subject: dmaengine: at_xdmac: fix transfer data width in at_xdmac_prep_slave_sg() This patch adds the missing update of the transfer data width in at_xdmac_prep_slave_sg(). Indeed, for each item in the scatter-gather list, we check whether the transfer length is aligned with the data width provided by dmaengine_slave_config(). If so, we directly use this data width for the current part of the transfer we are preparing. Otherwise, the data width is reduced to 8 bits (1 byte). Of course, the actual number of register accesses must also be updated to match the new data width. So one chunk was missing in the original patch (see Fixes tag below): the number of register accesses was correctly set to (len >> fixed_dwidth) in mbr_ubc but the real data width was not updated in mbr_cfg. Since mbr_cfg may change for each part of the scatter-gather transfer this also explains why the original patch used the Descriptor View 2 instead of the Descriptor View 1. Let's take the example of a DMA transfer to write 8bit data into an Atmel USART with FIFOs. When FIFOs are enabled in the USART, its Transmit Holding Register (THR) works in multidata mode, that is to say that up to 4 8bit data can be written into the THR in a single 32bit access and it is still possible to write only one data with a 8bit access. To take advantage of this new feature, the DMA driver was modified to allow multiple dwidths when doing slave transfers. For instance, when the total length is 22 bytes, the USART driver splits the transfer into 2 parts: First part: 20 bytes transferred through 5 32bit writes into THR Second part: 2 bytes transferred though 2 8bit writes into THR For the second part, the data width was first set to 4_BYTES by the USART driver thanks to dmaengine_slave_config() then at_xdmac_prep_slave_sg() reduces this data width to 1_BYTE because the 2 byte length is not aligned with the original 4_BYTES data width. Since the data width is modified, the actual number of writes into THR must be set accordingly. Signed-off-by: Cyrille Pitchen Fixes: 6d3a7d9e3ada ("dmaengine: at_xdmac: allow muliple dwidths when doing slave transfers") Cc: stable@vger.kernel.org #4.0 and later Acked-by: Nicolas Ferre Acked-by: Ludovic Desroches Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 52ca1ccefa3f..40afa2a16cfc 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -682,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc->lld.mbr_sa = mem; desc->lld.mbr_da = atchan->sconfig.dst_addr; } - desc->lld.mbr_cfg = atchan->cfg; - dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); + dwidth = at_xdmac_get_dwidth(atchan->cfg); fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) - ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) + ? dwidth : AT_XDMAC_CC_DWIDTH_BYTE; desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ | (len >> fixed_dwidth); /* microblock length */ + desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | + AT_XDMAC_CC_DWIDTH(fixed_dwidth); dev_dbg(chan2dev(chan), "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); -- cgit v1.2.3 From cda8e937191c100025168ba3e22ab316c7298007 Mon Sep 17 00:00:00 2001 From: Rameshwar Prasad Sahu Date: Tue, 7 Jul 2015 15:34:25 +0530 Subject: dmaengine: xgene-dma: Fix the resource map to handle overlapping There is an overlap in dma ring cmd csr region due to sharing of ethernet ring cmd csr region. This patch fix the resource overlapping by mapping the entire dma ring cmd csr region. Signed-off-by: Rameshwar Prasad Sahu Signed-off-by: Vinod Koul --- drivers/dma/xgene-dma.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 620fd55ec766..dff22ab01851 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -111,6 +111,7 @@ #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_BLK_MEM_RDY 0xD074 #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF +#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000 /* X-Gene SoC EFUSE csr register and bit defination */ #define XGENE_SOC_JTAG1_SHADOW 0x18 @@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev, return -ENOMEM; } + pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET; + /* Get efuse csr region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 3); if (!res) { -- cgit v1.2.3 From 0ec9ebc706fbd394bc233d87ac7aaad1c4f3ab54 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Wed, 8 Jul 2015 16:28:14 +0200 Subject: dmaengine: mv_xor: fix big endian operation in register mode Commit 6f166312c6ea2 ("dmaengine: mv_xor: add support for a38x command in descriptor mode") introduced the support for a feature that appeared in Armada 38x: specifying the operation to be performed in a per-descriptor basis rather than globally per channel. However, when doing so, it changed the function mv_chan_set_mode() to use: if (IS_ENABLED(__BIG_ENDIAN)) instead of: #if defined(__BIG_ENDIAN) While IS_ENABLED() is perfectly fine for CONFIG_* symbols, it is not for other symbols such as __BIG_ENDIAN that is provided directly by the compiler. Consequently, the commit broke support for big-endian, as the XOR_DESCRIPTOR_SWAP flag was not set in the XOR channel configuration register. The primarily visible effect was some nasty warnings and failures appearing during the self-test of the XOR unit: [ 1.197368] mv_xor d0060900.xor: error on chan 0. intr cause 0x00000082 [ 1.197393] mv_xor d0060900.xor: config 0x00008440 [ 1.197410] mv_xor d0060900.xor: activation 0x00000000 [ 1.197427] mv_xor d0060900.xor: intr cause 0x00000082 [ 1.197443] mv_xor d0060900.xor: intr mask 0x000003f7 [ 1.197460] mv_xor d0060900.xor: error cause 0x00000000 [ 1.197477] mv_xor d0060900.xor: error addr 0x00000000 [ 1.197491] ------------[ cut here ]------------ [ 1.197513] WARNING: CPU: 0 PID: 1 at ../drivers/dma/mv_xor.c:664 mv_xor_interrupt_handler+0x14c/0x170() See also: http://storage.kernelci.org/next/next-20150617/arm-mvebu_v7_defconfig+CONFIG_CPU_BIG_ENDIAN=y/lab-khilman/boot-armada-xp-openblocks-ax3-4.txt Signed-off-by: Thomas Petazzoni Fixes: 6f166312c6ea2 ("dmaengine: mv_xor: add support for a38x command in descriptor mode") Reviewed-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/mv_xor.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index fbaf1ead2597..f1325f62563e 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan, config &= ~0x7; config |= op_mode; - if (IS_ENABLED(__BIG_ENDIAN)) - config |= XOR_DESCRIPTOR_SWAP; - else - config &= ~XOR_DESCRIPTOR_SWAP; +#if defined(__BIG_ENDIAN) + config |= XOR_DESCRIPTOR_SWAP; +#else + config &= ~XOR_DESCRIPTOR_SWAP; +#endif writel_relaxed(config, XOR_CONFIG(chan)); chan->current_type = type; -- cgit v1.2.3 From 8c8fe97b2b8a216523e2faf1ccca66ddab634e3e Mon Sep 17 00:00:00 2001 From: Jun Nie Date: Fri, 10 Jul 2015 20:02:49 +0800 Subject: Revert "dmaengine: virt-dma: don't always free descriptor upon completion" This reverts commit b9855f03d560d351e95301b9de0bc3cad3b31fe9. The patch break existing DMA usage case. For example, audio SOC dmaengine never release channel and cause virt-dma to cache too much memory in descriptor to exhaust system memory. Signed-off-by: Vinod Koul --- drivers/dma/virt-dma.c | 19 ++++++------------- drivers/dma/virt-dma.h | 13 +------------ 2 files changed, 7 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 7d2c17d8d30f..6f80432a3f0a 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&vc->lock, flags); cookie = dma_cookie_assign(tx); - list_move_tail(&vd->node, &vc->desc_submitted); + list_add_tail(&vd->node, &vc->desc_submitted); spin_unlock_irqrestore(&vc->lock, flags); dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", @@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg) cb_data = vd->tx.callback_param; list_del(&vd->node); - if (async_tx_test_ack(&vd->tx)) - list_add(&vd->node, &vc->desc_allocated); - else - vc->desc_free(vd); + + vc->desc_free(vd); if (cb) cb(cb_data); @@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) while (!list_empty(head)) { struct virt_dma_desc *vd = list_first_entry(head, struct virt_dma_desc, node); - if (async_tx_test_ack(&vd->tx)) { - list_move_tail(&vd->node, &vc->desc_allocated); - } else { - dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); - list_del(&vd->node); - vc->desc_free(vd); - } + list_del(&vd->node); + dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); + vc->desc_free(vd); } } EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); @@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) dma_cookie_init(&vc->chan); spin_lock_init(&vc->lock); - INIT_LIST_HEAD(&vc->desc_allocated); INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_completed); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 189e75dbcb15..181b95267866 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -29,7 +29,6 @@ struct virt_dma_chan { spinlock_t lock; /* protected by vc.lock */ - struct list_head desc_allocated; struct list_head desc_submitted; struct list_head desc_issued; struct list_head desc_completed; @@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan struct virt_dma_desc *vd, unsigned long tx_flags) { extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); - unsigned long flags; dma_async_tx_descriptor_init(&vd->tx, &vc->chan); vd->tx.flags = tx_flags; vd->tx.tx_submit = vchan_tx_submit; - spin_lock_irqsave(&vc->lock, flags); - list_add_tail(&vd->node, &vc->desc_allocated); - spin_unlock_irqrestore(&vc->lock, flags); - return &vd->tx; } @@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) } /** - * vchan_get_all_descriptors - obtain all allocated, submitted and issued - * descriptors + * vchan_get_all_descriptors - obtain all submitted and issued descriptors * vc: virtual channel to get descriptors from * head: list of descriptors found * @@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, struct list_head *head) { - list_splice_tail_init(&vc->desc_allocated, head); list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_completed, head); @@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) { - struct virt_dma_desc *vd; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&vc->lock, flags); vchan_get_all_descriptors(vc, &head); - list_for_each_entry(vd, &head, node) - async_tx_clear_ack(&vd->tx); spin_unlock_irqrestore(&vc->lock, flags); vchan_dma_desc_free_list(vc, &head); -- cgit v1.2.3