summaryrefslogtreecommitdiff
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig4
-rw-r--r--arch/mips/bmips/dma.c2
-rw-r--r--arch/mips/include/asm/dma-direct.h8
-rw-r--r--arch/mips/jazz/jazzdma.c17
-rw-r--r--arch/mips/mm/dma-noncoherent.c18
-rw-r--r--arch/mips/pci/fixup-sb1250.c16
6 files changed, 25 insertions, 40 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 61b9269cdd3e..add388236f4e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1194,9 +1194,9 @@ config DMA_NONCOHERENT
select ARCH_HAS_DMA_WRITE_COMBINE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT
- select NEED_DMA_MAP_STATE
- select ARCH_HAS_DMA_COHERENT_TO_PFN
+ select DMA_NONCOHERENT_MMAP
select DMA_NONCOHERENT_CACHE_SYNC
+ select NEED_DMA_MAP_STATE
config SYS_HAS_EARLY_PRINTK
bool
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
index 3d13c77c125f..df56bf4179e3 100644
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -64,7 +64,7 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
return dma_addr;
}
-void arch_sync_dma_for_cpu_all(struct device *dev)
+void arch_sync_dma_for_cpu_all(void)
{
void __iomem *cbr = BMIPS_GET_CBR();
u32 cfg;
diff --git a/arch/mips/include/asm/dma-direct.h b/arch/mips/include/asm/dma-direct.h
index b5c240806e1b..14e352651ce9 100644
--- a/arch/mips/include/asm/dma-direct.h
+++ b/arch/mips/include/asm/dma-direct.h
@@ -2,14 +2,6 @@
#ifndef _MIPS_DMA_DIRECT_H
#define _MIPS_DMA_DIRECT_H 1
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index a01e14955187..c64a297e82b3 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -592,7 +592,7 @@ static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
phys_addr_t phys = page_to_phys(page) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
return vdma_alloc(phys, size);
}
@@ -600,7 +600,7 @@ static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
+ arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
vdma_free(dma_addr);
}
@@ -612,7 +612,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
+ arch_sync_dma_for_device(sg_phys(sg), sg->length,
dir);
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
if (sg->dma_address == DMA_MAPPING_ERROR)
@@ -631,8 +631,7 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
- dir);
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
vdma_free(sg->dma_address);
}
}
@@ -640,13 +639,13 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
static void jazz_dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
+ arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
}
static void jazz_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
+ arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
}
static void jazz_dma_sync_sg_for_device(struct device *dev,
@@ -656,7 +655,7 @@ static void jazz_dma_sync_sg_for_device(struct device *dev,
int i;
for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
static void jazz_dma_sync_sg_for_cpu(struct device *dev,
@@ -666,7 +665,7 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev,
int i;
for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
const struct dma_map_ops jazz_dma_ops = {
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 1d4d57dd9acf..dc42ffc83825 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -27,7 +27,7 @@
* R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
* SGI IP32 aka O2.
*/
-static inline bool cpu_needs_post_dma_flush(struct device *dev)
+static inline bool cpu_needs_post_dma_flush(void)
{
switch (boot_cpu_type()) {
case CPU_R10000:
@@ -59,12 +59,6 @@ void *cached_kernel_address(void *addr)
return __va(addr) - UNCAC_BASE;
}
-long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
- dma_addr_t dma_addr)
-{
- return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr)));
-}
-
static inline void dma_sync_virt(void *addr, size_t size,
enum dma_data_direction dir)
{
@@ -118,17 +112,17 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
} while (left);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
dma_sync_phys(paddr, size, dir);
}
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- if (cpu_needs_post_dma_flush(dev))
+ if (cpu_needs_post_dma_flush())
dma_sync_phys(paddr, size, dir);
}
#endif
diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c
index 8a41b359cf90..40efc990cdce 100644
--- a/arch/mips/pci/fixup-sb1250.c
+++ b/arch/mips/pci/fixup-sb1250.c
@@ -21,22 +21,22 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
/*
* The BCM1250, etc. PCI host bridge does not support DAC on its 32-bit
- * bus, so we set the bus's DMA mask accordingly. However the HT link
+ * bus, so we set the bus's DMA limit accordingly. However the HT link
* down the artificial PCI-HT bridge supports 40-bit addressing and the
* SP1011 HT-PCI bridge downstream supports both DAC and a 64-bit bus
* width, so we record the PCI-HT bridge's secondary and subordinate bus
- * numbers and do not set the mask for devices present in the inclusive
+ * numbers and do not set the limit for devices present in the inclusive
* range of those.
*/
-struct sb1250_bus_dma_mask_exclude {
+struct sb1250_bus_dma_limit_exclude {
bool set;
unsigned char start;
unsigned char end;
};
-static int sb1250_bus_dma_mask(struct pci_dev *dev, void *data)
+static int sb1250_bus_dma_limit(struct pci_dev *dev, void *data)
{
- struct sb1250_bus_dma_mask_exclude *exclude = data;
+ struct sb1250_bus_dma_limit_exclude *exclude = data;
bool exclude_this;
bool ht_bridge;
@@ -55,7 +55,7 @@ static int sb1250_bus_dma_mask(struct pci_dev *dev, void *data)
exclude->start, exclude->end);
} else {
dev_dbg(&dev->dev, "disabling DAC for device");
- dev->dev.bus_dma_mask = DMA_BIT_MASK(32);
+ dev->dev.bus_dma_limit = DMA_BIT_MASK(32);
}
return 0;
@@ -63,9 +63,9 @@ static int sb1250_bus_dma_mask(struct pci_dev *dev, void *data)
static void quirk_sb1250_pci_dac(struct pci_dev *dev)
{
- struct sb1250_bus_dma_mask_exclude exclude = { .set = false };
+ struct sb1250_bus_dma_limit_exclude exclude = { .set = false };
- pci_walk_bus(dev->bus, sb1250_bus_dma_mask, &exclude);
+ pci_walk_bus(dev->bus, sb1250_bus_dma_limit, &exclude);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
quirk_sb1250_pci_dac);