summaryrefslogtreecommitdiff
path: root/drivers/iommu/dma-iommu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-06 11:43:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-06 11:43:23 -0700
commit1ee18de92927f37e6948d5a6fc73cbf89f806905 (patch)
tree2521e9884c1349187b9169a5ebf1eead6b6fd66d /drivers/iommu/dma-iommu.c
parente542e0dc3ee3eafc46dd8e3073388079d69cace0 (diff)
parent298f3db6ee690259927b105d5ad1079563361323 (diff)
Merge tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - enhance the dma pool to allow atomic allocation on x86 with AMD SEV (David Rientjes) - two small cleanups (Jason Yan and Peter Collingbourne) * tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping: dma-contiguous: fix comment for dma_release_from_contiguous dma-pool: scale the default DMA coherent pool size with memory capacity x86/mm: unencrypted non-blocking DMA allocations use coherent pools dma-pool: add pool sizes to debugfs dma-direct: atomic allocations must come from atomic coherent pools dma-pool: dynamically expanding atomic pools dma-pool: add additional coherent pools to map to gfp mask dma-remap: separate DMA atomic pools from direct remap code dma-debug: make __dma_entry_alloc_check_leak() static
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r--drivers/iommu/dma-iommu.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ba128d1cdaee..4959f5df21bd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -952,7 +952,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
/* Non-coherent atomic allocation? Easy */
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- dma_free_from_pool(cpu_addr, alloc_size))
+ dma_free_from_pool(dev, cpu_addr, alloc_size))
return;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
@@ -1035,7 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
- cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+ cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
+ gfp);
else
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
if (!cpu_addr)