summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/hsu/Kconfig14
-rw-r--r--drivers/dma/hsu/Makefile5
-rw-r--r--drivers/dma/hsu/hsu.c504
-rw-r--r--drivers/dma/hsu/hsu.h118
-rw-r--r--drivers/dma/hsu/pci.c123
7 files changed, 767 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a874b6ec6650..074ffad334a7 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -125,6 +125,8 @@ config FSL_DMA
EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
some Txxx and Bxxx parts.
+source "drivers/dma/hsu/Kconfig"
+
config MPC512X_DMA
tristate "Freescale MPC512x built-in DMA engine support"
depends on PPC_MPC512x || PPC_MPC831x
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index f915f61ec574..bf4485800c60 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
+obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_MV_XOR) += mv_xor.o
diff --git a/drivers/dma/hsu/Kconfig b/drivers/dma/hsu/Kconfig
new file mode 100644
index 000000000000..7e98eff7440e
--- /dev/null
+++ b/drivers/dma/hsu/Kconfig
@@ -0,0 +1,14 @@
+# DMA engine configuration for hsu
+config HSU_DMA
+ tristate "High Speed UART DMA support"
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
+config HSU_DMA_PCI
+ tristate "High Speed UART DMA PCI driver"
+ depends on PCI
+ select HSU_DMA
+ help
+ Support the High Speed UART DMA on the platfroms that
+ enumerate it as a PCI device. For example, Intel Medfield
+ has integrated this HSU DMA controller.
diff --git a/drivers/dma/hsu/Makefile b/drivers/dma/hsu/Makefile
new file mode 100644
index 000000000000..b8f9af032ef1
--- /dev/null
+++ b/drivers/dma/hsu/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_HSU_DMA) += hsu_dma.o
+hsu_dma-objs := hsu.o
+
+obj-$(CONFIG_HSU_DMA_PCI) += hsu_dma_pci.o
+hsu_dma_pci-objs := pci.o
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
new file mode 100644
index 000000000000..683ba9b62795
--- /dev/null
+++ b/drivers/dma/hsu/hsu.c
@@ -0,0 +1,504 @@
+/*
+ * Core driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * Partially based on the bits found in drivers/tty/serial/mfd.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * DMA channel allocation:
+ * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
+ * Write (UART RX).
+ * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
+ * port 3, and so on.
+ */
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "hsu.h"
+
+#define HSU_DMA_BUSWIDTHS \
+ BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
+
+static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
+{
+ hsu_chan_writel(hsuc, HSU_CH_CR, 0);
+}
+
+static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
+{
+ u32 cr = HSU_CH_CR_CHA;
+
+ if (hsuc->direction == DMA_MEM_TO_DEV)
+ cr &= ~HSU_CH_CR_CHD;
+ else if (hsuc->direction == DMA_DEV_TO_MEM)
+ cr |= HSU_CH_CR_CHD;
+
+ hsu_chan_writel(hsuc, HSU_CH_CR, cr);
+}
+
+static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
+{
+ struct dma_slave_config *config = &hsuc->config;
+ struct hsu_dma_desc *desc = hsuc->desc;
+ u32 bsr, mtsr;
+ u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
+ unsigned int i, count;
+
+ if (hsuc->direction == DMA_MEM_TO_DEV) {
+ bsr = config->dst_maxburst;
+ mtsr = config->dst_addr_width;
+ } else if (hsuc->direction == DMA_DEV_TO_MEM) {
+ bsr = config->src_maxburst;
+ mtsr = config->src_addr_width;
+ } else {
+ /* Not supported direction */
+ return;
+ }
+
+ hsu_chan_disable(hsuc);
+
+ hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
+ hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
+ hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
+
+ /* Set descriptors */
+ count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC;
+ for (i = 0; i < count; i++) {
+ hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
+ hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
+
+ /* Prepare value for DCR */
+ dcr |= HSU_CH_DCR_DESCA(i);
+ dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */
+
+ desc->active++;
+ }
+ /* Only for the last descriptor in the chain */
+ dcr |= HSU_CH_DCR_CHSOD(count - 1);
+ dcr |= HSU_CH_DCR_CHDI(count - 1);
+
+ hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
+
+ hsu_chan_enable(hsuc);
+}
+
+static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ hsu_chan_disable(hsuc);
+ hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+}
+
+static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ hsu_dma_chan_start(hsuc);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+}
+
+static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
+{
+ struct virt_dma_desc *vdesc;
+
+ /* Get the next descriptor */
+ vdesc = vchan_next_desc(&hsuc->vchan);
+ if (!vdesc) {
+ hsuc->desc = NULL;
+ return;
+ }
+
+ list_del(&vdesc->node);
+ hsuc->desc = to_hsu_dma_desc(vdesc);
+
+ /* Start the channel with a new descriptor */
+ hsu_dma_start_channel(hsuc);
+}
+
+static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
+{
+ unsigned long flags;
+ u32 sr;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ sr = hsu_chan_readl(hsuc, HSU_CH_SR);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+
+ return sr;
+}
+
+irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
+{
+ struct hsu_dma_chan *hsuc;
+ struct hsu_dma_desc *desc;
+ unsigned long flags;
+ u32 sr;
+
+ /* Sanity check */
+ if (nr >= chip->pdata->nr_channels)
+ return IRQ_NONE;
+
+ hsuc = &chip->hsu->chan[nr];
+
+ /*
+ * No matter what situation, need read clear the IRQ status
+ * There is a bug, see Errata 5, HSD 2900918
+ */
+ sr = hsu_dma_chan_get_sr(hsuc);
+ if (!sr)
+ return IRQ_NONE;
+
+ /* Timeout IRQ, need wait some time, see Errata 2 */
+ if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY))
+ udelay(2);
+
+ sr &= ~HSU_CH_SR_DESCTO_ANY;
+ if (!sr)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ desc = hsuc->desc;
+ if (desc) {
+ if (sr & HSU_CH_SR_CHE) {
+ desc->status = DMA_ERROR;
+ } else if (desc->active < desc->nents) {
+ hsu_dma_start_channel(hsuc);
+ } else {
+ vchan_cookie_complete(&desc->vdesc);
+ desc->status = DMA_COMPLETE;
+ hsu_dma_start_transfer(hsuc);
+ }
+ }
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(hsu_dma_irq);
+
+static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
+{
+ struct hsu_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
+ if (!desc)
+ return NULL;
+
+ desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_ATOMIC);
+ if (!desc->sg) {
+ kfree(desc);
+ return NULL;
+ }
+
+ return desc;
+}
+
+static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+ struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
+
+ kfree(desc->sg);
+ kfree(desc);
+}
+
+static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ struct hsu_dma_desc *desc;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ desc = hsu_dma_alloc_desc(sg_len);
+ if (!desc)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ desc->sg[i].addr = sg_dma_address(sg);
+ desc->sg[i].len = sg_dma_len(sg);
+ }
+
+ desc->nents = sg_len;
+ desc->direction = direction;
+ desc->active = 0;
+ desc->status = DMA_IN_PROGRESS;
+
+ return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
+}
+
+static void hsu_dma_issue_pending(struct dma_chan *chan)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
+ hsu_dma_start_transfer(hsuc);
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+}
+
+static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
+{
+ size_t bytes = 0;
+ unsigned int i;
+
+ for (i = desc->active; i < desc->nents; i++)
+ bytes += desc->sg[i].len;
+
+ return bytes;
+}
+
+static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
+{
+ struct hsu_dma_desc *desc = hsuc->desc;
+ size_t bytes = hsu_dma_desc_size(desc);
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ i = desc->active % HSU_DMA_CHAN_NR_DESC;
+ do {
+ bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
+ } while (--i >= 0);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+
+ return bytes;
+}
+
+static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ size_t bytes;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, state);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ vdesc = vchan_find_desc(&hsuc->vchan, cookie);
+ if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
+ bytes = hsu_dma_active_desc_size(hsuc);
+ dma_set_residue(state, bytes);
+ status = hsuc->desc->status;
+ } else if (vdesc) {
+ bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc));
+ dma_set_residue(state, bytes);
+ }
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+ return status;
+}
+
+static int hsu_dma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+
+ /* Check if chan will be configured for slave transfers */
+ if (!is_slave_direction(config->direction))
+ return -EINVAL;
+
+ memcpy(&hsuc->config, config, sizeof(hsuc->config));
+
+ return 0;
+}
+
+static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ hsu_chan_disable(hsuc);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+}
+
+static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->lock, flags);
+ hsu_chan_enable(hsuc);
+ spin_unlock_irqrestore(&hsuc->lock, flags);
+}
+
+static int hsu_dma_pause(struct dma_chan *chan)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
+ hsu_dma_chan_deactivate(hsuc);
+ hsuc->desc->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+ return 0;
+}
+
+static int hsu_dma_resume(struct dma_chan *chan)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
+ hsuc->desc->status = DMA_IN_PROGRESS;
+ hsu_dma_chan_activate(hsuc);
+ }
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+ return 0;
+}
+
+static int hsu_dma_terminate_all(struct dma_chan *chan)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+
+ hsu_dma_stop_channel(hsuc);
+ hsuc->desc = NULL;
+
+ vchan_get_all_descriptors(&hsuc->vchan, &head);
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+ vchan_dma_desc_free_list(&hsuc->vchan, &head);
+
+ return 0;
+}
+
+static int hsu_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void hsu_dma_free_chan_resources(struct dma_chan *chan)
+{
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+int hsu_dma_probe(struct hsu_dma_chip *chip)
+{
+ struct hsu_dma *hsu;
+ struct hsu_dma_platform_data *pdata = chip->pdata;
+ void __iomem *addr = chip->regs + chip->offset;
+ unsigned short i;
+ int ret;
+
+ hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
+ if (!hsu)
+ return -ENOMEM;
+
+ chip->hsu = hsu;
+
+ if (!pdata) {
+ pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ chip->pdata = pdata;
+
+ /* Guess nr_channels from the IO space length */
+ pdata->nr_channels = (chip->length - chip->offset) /
+ HSU_DMA_CHAN_LENGTH;
+ }
+
+ hsu->chan = devm_kcalloc(chip->dev, pdata->nr_channels,
+ sizeof(*hsu->chan), GFP_KERNEL);
+ if (!hsu->chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hsu->dma.channels);
+ for (i = 0; i < pdata->nr_channels; i++) {
+ struct hsu_dma_chan *hsuc = &hsu->chan[i];
+
+ hsuc->vchan.desc_free = hsu_dma_desc_free;
+ vchan_init(&hsuc->vchan, &hsu->dma);
+
+ hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+ hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
+
+ spin_lock_init(&hsuc->lock);
+ }
+
+ dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
+
+ hsu->dma.device_alloc_chan_resources = hsu_dma_alloc_chan_resources;
+ hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
+
+ hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
+
+ hsu->dma.device_issue_pending = hsu_dma_issue_pending;
+ hsu->dma.device_tx_status = hsu_dma_tx_status;
+
+ hsu->dma.device_config = hsu_dma_slave_config;
+ hsu->dma.device_pause = hsu_dma_pause;
+ hsu->dma.device_resume = hsu_dma_resume;
+ hsu->dma.device_terminate_all = hsu_dma_terminate_all;
+
+ hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
+ hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
+ hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ hsu->dma.dev = chip->dev;
+
+ ret = dma_async_device_register(&hsu->dma);
+ if (ret)
+ return ret;
+
+ dev_info(chip->dev, "Found HSU DMA, %d channels\n", pdata->nr_channels);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hsu_dma_probe);
+
+int hsu_dma_remove(struct hsu_dma_chip *chip)
+{
+ struct hsu_dma *hsu = chip->hsu;
+ unsigned short i;
+
+ dma_async_device_unregister(&hsu->dma);
+
+ for (i = 0; i < chip->pdata->nr_channels; i++) {
+ struct hsu_dma_chan *hsuc = &hsu->chan[i];
+
+ tasklet_kill(&hsuc->vchan.task);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hsu_dma_remove);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("High Speed UART DMA core driver");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
new file mode 100644
index 000000000000..0275233cf550
--- /dev/null
+++ b/drivers/dma/hsu/hsu.h
@@ -0,0 +1,118 @@
+/*
+ * Driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * Partially based on the bits found in drivers/tty/serial/mfd.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DMA_HSU_H__
+#define __DMA_HSU_H__
+
+#include <linux/spinlock.h>
+#include <linux/dma/hsu.h>
+
+#include "../virt-dma.h"
+
+#define HSU_CH_SR 0x00 /* channel status */
+#define HSU_CH_CR 0x04 /* channel control */
+#define HSU_CH_DCR 0x08 /* descriptor control */
+#define HSU_CH_BSR 0x10 /* FIFO buffer size */
+#define HSU_CH_MTSR 0x14 /* minimum transfer size */
+#define HSU_CH_DxSAR(x) (0x20 + 8 * (x)) /* desc start addr */
+#define HSU_CH_DxTSR(x) (0x24 + 8 * (x)) /* desc transfer size */
+#define HSU_CH_D0SAR 0x20 /* desc 0 start addr */
+#define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */
+#define HSU_CH_D1SAR 0x28
+#define HSU_CH_D1TSR 0x2c
+#define HSU_CH_D2SAR 0x30
+#define HSU_CH_D2TSR 0x34
+#define HSU_CH_D3SAR 0x38
+#define HSU_CH_D3TSR 0x3c
+
+#define HSU_DMA_CHAN_NR_DESC 4
+#define HSU_DMA_CHAN_LENGTH 0x40
+
+/* Bits in HSU_CH_SR */
+#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
+#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
+#define HSU_CH_SR_CHE BIT(15)
+
+/* Bits in HSU_CH_CR */
+#define HSU_CH_CR_CHA BIT(0)
+#define HSU_CH_CR_CHD BIT(1)
+
+/* Bits in HSU_CH_DCR */
+#define HSU_CH_DCR_DESCA(x) BIT(0 + (x))
+#define HSU_CH_DCR_CHSOD(x) BIT(8 + (x))
+#define HSU_CH_DCR_CHSOTO BIT(14)
+#define HSU_CH_DCR_CHSOE BIT(15)
+#define HSU_CH_DCR_CHDI(x) BIT(16 + (x))
+#define HSU_CH_DCR_CHEI BIT(23)
+#define HSU_CH_DCR_CHTOI(x) BIT(24 + (x))
+
+struct hsu_dma_sg {
+ dma_addr_t addr;
+ unsigned int len;
+};
+
+struct hsu_dma_desc {
+ struct virt_dma_desc vdesc;
+ enum dma_transfer_direction direction;
+ struct hsu_dma_sg *sg;
+ unsigned int nents;
+ unsigned int active;
+ enum dma_status status;
+};
+
+static inline struct hsu_dma_desc *to_hsu_dma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct hsu_dma_desc, vdesc);
+}
+
+struct hsu_dma_chan {
+ struct virt_dma_chan vchan;
+
+ void __iomem *reg;
+ spinlock_t lock;
+
+ /* hardware configuration */
+ enum dma_transfer_direction direction;
+ struct dma_slave_config config;
+
+ struct hsu_dma_desc *desc;
+};
+
+static inline struct hsu_dma_chan *to_hsu_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct hsu_dma_chan, vchan.chan);
+}
+
+static inline u32 hsu_chan_readl(struct hsu_dma_chan *hsuc, int offset)
+{
+ return readl(hsuc->reg + offset);
+}
+
+static inline void hsu_chan_writel(struct hsu_dma_chan *hsuc, int offset,
+ u32 value)
+{
+ writel(value, hsuc->reg + offset);
+}
+
+struct hsu_dma {
+ struct dma_device dma;
+
+ /* channels */
+ struct hsu_dma_chan *chan;
+};
+
+static inline struct hsu_dma *to_hsu_dma(struct dma_device *ddev)
+{
+ return container_of(ddev, struct hsu_dma, dma);
+}
+
+#endif /* __DMA_HSU_H__ */
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
new file mode 100644
index 000000000000..563b4685d766
--- /dev/null
+++ b/drivers/dma/hsu/pci.c
@@ -0,0 +1,123 @@
+/*
+ * PCI driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * Partially based on the bits found in drivers/tty/serial/mfd.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "hsu.h"
+
+#define HSU_PCI_DMASR 0x00
+#define HSU_PCI_DMAISR 0x04
+
+#define HSU_PCI_CHAN_OFFSET 0x100
+
+static irqreturn_t hsu_pci_irq(int irq, void *dev)
+{
+ struct hsu_dma_chip *chip = dev;
+ u32 dmaisr;
+ unsigned short i;
+ irqreturn_t ret = IRQ_NONE;
+
+ dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
+ for (i = 0; i < chip->pdata->nr_channels; i++) {
+ if (dmaisr & 0x1)
+ ret |= hsu_dma_irq(chip, i);
+ dmaisr >>= 1;
+ }
+
+ return ret;
+}
+
+static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hsu_dma_chip *chip;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "I/O memory remapping failed\n");
+ return ret;
+ }
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &pdev->dev;
+ chip->regs = pcim_iomap_table(pdev)[0];
+ chip->length = pci_resource_len(pdev, 0);
+ chip->offset = HSU_PCI_CHAN_OFFSET;
+ chip->irq = pdev->irq;
+
+ pci_enable_msi(pdev);
+
+ ret = hsu_dma_probe(chip);
+ if (ret)
+ return ret;
+
+ ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
+ if (ret)
+ goto err_register_irq;
+
+ pci_set_drvdata(pdev, chip);
+
+ return 0;
+
+err_register_irq:
+ hsu_dma_remove(chip);
+ return ret;
+}
+
+static void hsu_pci_remove(struct pci_dev *pdev)
+{
+ struct hsu_dma_chip *chip = pci_get_drvdata(pdev);
+
+ free_irq(chip->irq, chip);
+ hsu_dma_remove(chip);
+}
+
+static const struct pci_device_id hsu_pci_id_table[] = {
+ { PCI_VDEVICE(INTEL, 0x081e), 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, hsu_pci_id_table);
+
+static struct pci_driver hsu_pci_driver = {
+ .name = "hsu_dma_pci",
+ .id_table = hsu_pci_id_table,
+ .probe = hsu_pci_probe,
+ .remove = hsu_pci_remove,
+};
+
+module_pci_driver(hsu_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("High Speed UART DMA PCI driver");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");