summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@mellanox.com>2019-01-08 16:07:26 +0200
committerJason Gunthorpe <jgg@mellanox.com>2019-01-08 16:41:38 -0700
commit13859d5df418ea535926e2b57c29d5161c522b9d (patch)
tree7dc6de94455552d1ee03c5861e499875c99699c2 /drivers/infiniband/hw/mlx5/mr.c
parent8b4d5bc5cf3f813dc4df5d69c2fcde16c40d8abd (diff)
RDMA/mlx5: Embed into the code flow the ODP config option
Convert various places to more readable code, which embeds CONFIG_INFINIBAND_ON_DEMAND_PAGING into the code flow. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c28
1 files changed, 12 insertions, 16 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index c389750f771e..494a90f4348c 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -71,10 +71,9 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- /* Wait until all page fault handlers using the mr complete. */
- synchronize_srcu(&dev->mr_srcu);
-#endif
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ /* Wait until all page fault handlers using the mr complete. */
+ synchronize_srcu(&dev->mr_srcu);
return err;
}
@@ -254,9 +253,8 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- synchronize_srcu(&dev->mr_srcu);
-#endif
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ synchronize_srcu(&dev->mr_srcu);
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
@@ -1329,8 +1327,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (!start && length == U64_MAX) {
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
+ length == U64_MAX) {
if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return ERR_PTR(-EINVAL);
@@ -1340,7 +1338,6 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_CAST(mr);
return &mr->ibmr;
}
-#endif
err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
&page_shift, &ncont, &order);
@@ -1401,9 +1398,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- mr->live = 1;
-#endif
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ mr->live = 1;
+
return &mr->ibmr;
error:
ib_umem_release(umem);
@@ -1518,9 +1515,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
}
mr->allocated_from_cache = 0;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- mr->live = 1;
-#endif
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ mr->live = 1;
} else {
/*
* Send a UMR WQE