diff options
author | David S. Miller <davem@davemloft.net> | 2018-06-04 22:23:35 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-06-04 22:23:35 -0400 |
commit | 7d840a606515b04dfb4f13d1abb86dd59163799c (patch) | |
tree | eb0f4379a452554d4ddc4fc4244b109db4e6f4f9 /drivers/net/ethernet/mellanox/mlx4 | |
parent | d67b66b45a0593ebd68963c47a302ebfa4981040 (diff) | |
parent | 885892fb378dc096693557ba4f2b875188619b36 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/icm.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 685337d58276..5342bd8a3d0b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -43,12 +43,13 @@ #include "fw.h" /* - * We allocate in page size (default 4KB on many archs) chunks to avoid high - * order memory allocations in fragmented/high usage memory situation. + * We allocate in as big chunks as we can, up to a maximum of 256 KB + * per chunk. Note that the chunks are not necessarily in contiguous + * physical memory. */ enum { - MLX4_ICM_ALLOC_SIZE = PAGE_SIZE, - MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE, + MLX4_ICM_ALLOC_SIZE = 1 << 18, + MLX4_TABLE_CHUNK_SIZE = 1 << 18, }; static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) @@ -135,6 +136,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, struct mlx4_icm *icm; struct mlx4_icm_chunk *chunk = NULL; int cur_order; + gfp_t mask; int ret; /* We use sg_set_buf for coherent allocs, which assumes low memory */ @@ -178,13 +180,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, while (1 << cur_order > npages) --cur_order; + mask = gfp_mask; + if (cur_order) + mask &= ~__GFP_DIRECT_RECLAIM; + if (coherent) ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, &chunk->mem[chunk->npages], - cur_order, gfp_mask); + cur_order, mask); else ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], - cur_order, gfp_mask, + cur_order, mask, dev->numa_node); if (ret) { |