diff options
author | Stephen Warren <swarren@nvidia.com> | 2019-01-03 10:23:24 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-01-07 05:14:17 -0800 |
commit | 01cd364a15f42575ef4aac8f82ff05516ea5da9a (patch) | |
tree | 4717b9f118a5eadecc53414e2db3ff2a3b307f96 | |
parent | f65e192af35058e5c82da9e90871b472d24912bc (diff) |
net/mlx4: replace pci_{,un}map_sg with dma_{,un}map_sg
pci_{,un}map_sg are deprecated and replaced by dma_{,un}map_sg. This is
especially relevant since the rest of the driver uses the DMA API. Fix
the driver to use the replacement APIs.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/icm.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 76b84d08a058..d89a3da89e5a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -57,8 +57,8 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu int i; if (chunk->nsg > 0) - pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages, - PCI_DMA_BIDIRECTIONAL); + dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, + DMA_BIDIRECTIONAL); for (i = 0; i < chunk->npages; ++i) __free_pages(sg_page(&chunk->sg[i]), @@ -204,9 +204,9 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, if (coherent) ++chunk->nsg; else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg, - chunk->npages, - PCI_DMA_BIDIRECTIONAL); + chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, + chunk->sg, chunk->npages, + DMA_BIDIRECTIONAL); if (chunk->nsg <= 0) goto fail; @@ -219,9 +219,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, } if (!coherent && chunk) { - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg, - chunk->npages, - PCI_DMA_BIDIRECTIONAL); + chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg, + chunk->npages, DMA_BIDIRECTIONAL); if (chunk->nsg <= 0) goto fail; |