summaryrefslogtreecommitdiff
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c25
1 files changed, 1 insertions, 24 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8c398fedbbb6..4b6613b5e042 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -466,12 +466,6 @@ void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
void __init dma_contiguous_remap(void)
{
int i;
-
- if (!dma_mmu_remap_num)
- return;
-
- /* call flush_cache_all() since CMA area would be large enough */
- flush_cache_all();
for (i = 0; i < dma_mmu_remap_num; i++) {
phys_addr_t start = dma_mmu_remap[i].base;
phys_addr_t end = start + dma_mmu_remap[i].size;
@@ -504,15 +498,7 @@ void __init dma_contiguous_remap(void)
flush_tlb_kernel_range(__phys_to_virt(start),
__phys_to_virt(end));
- /*
- * All the memory in CMA region will be on ZONE_MOVABLE.
- * If that zone is considered as highmem, the memory in CMA
- * region is also considered as highmem even if it's
- * physical address belong to lowmem. In this case,
- * re-mapping isn't required.
- */
- if (!is_highmem_idx(ZONE_MOVABLE))
- iotable_init(&map, 1);
+ iotable_init(&map, 1);
}
}
@@ -1165,15 +1151,6 @@ int arm_dma_supported(struct device *dev, u64 mask)
return __dma_supported(dev, mask, false);
}
-#define PREALLOC_DMA_DEBUG_ENTRIES 4096
-
-static int __init dma_debug_do_init(void)
-{
- dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
- return 0;
-}
-core_initcall(dma_debug_do_init);
-
#ifdef CONFIG_ARM_DMA_USE_IOMMU
static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)