diff options
author | Christoph Hellwig <hch@lst.de> | 2017-05-23 14:57:57 +0200 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2017-06-28 06:54:53 -0700 |
commit | 7eb8a7a9e854640508fefc5a6d0bb50de243610b (patch) | |
tree | bf15ecdcd43459b1b336aaf0f601ac76180556bc /arch/powerpc/platforms/cell | |
parent | 228a5e1a875006588f394345e96c64215f8b132e (diff) |
powerpc/cell: use the dma_supported method for ops switching
Besides removing the last instance of the set_dma_mask method this also
reduced the code duplication.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r-- | arch/powerpc/platforms/cell/iommu.c | 25 |
1 files changed, 9 insertions, 16 deletions
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 497bfbdbd967..29d4f96ed33e 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -644,20 +644,14 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, direction, attrs); } -static int dma_fixed_dma_supported(struct device *dev, u64 mask) -{ - return mask == DMA_BIT_MASK(64); -} - -static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); +static int dma_suported_and_switch(struct device *dev, u64 dma_mask); static const struct dma_map_ops dma_iommu_fixed_ops = { .alloc = dma_fixed_alloc_coherent, .free = dma_fixed_free_coherent, .map_sg = dma_fixed_map_sg, .unmap_sg = dma_fixed_unmap_sg, - .dma_supported = dma_fixed_dma_supported, - .set_dma_mask = dma_set_mask_and_switch, + .dma_supported = dma_suported_and_switch, .map_page = dma_fixed_map_page, .unmap_page = dma_fixed_unmap_page, .mapping_error = dma_iommu_mapping_error, @@ -952,11 +946,8 @@ out: return dev_addr; } -static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) +static int dma_suported_and_switch(struct device *dev, u64 dma_mask) { - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - if (dma_mask == DMA_BIT_MASK(64) && cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) { u64 addr = cell_iommu_get_fixed_address(dev) + @@ -965,14 +956,16 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); set_dma_ops(dev, &dma_iommu_fixed_ops); set_dma_offset(dev, addr); - } else { + return 1; + } + + if (dma_iommu_dma_supported(dev, dma_mask)) { dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); set_dma_ops(dev, get_pci_dma_ops()); cell_dma_dev_setup(dev); + return 1; } - *dev->dma_mask = dma_mask; - return 0; } @@ -1127,7 +1120,7 @@ static int __init cell_iommu_fixed_mapping_init(void) cell_iommu_setup_window(iommu, np, dbase, dsize, 0); } - dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch; + dma_iommu_ops.dma_supported = dma_suported_and_switch; set_pci_dma_ops(&dma_iommu_ops); return 0; |