summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorIlya Lesokhin <ilyal@mellanox.com>2017-09-24 21:46:34 +0300
committerDoug Ledford <dledford@redhat.com>2017-09-25 11:47:24 -0400
commitd67bc5d4e3e100d762c0f57ea67f28bc219698a6 (patch)
tree97c09abe7880086ebb17cecb43359b58e75c9605 /drivers/infiniband/hw
parent7c9d9662103ae1c11acc7bfc47d988466cff23cf (diff)
IB/mlx5: Simplify mlx5_ib_cont_pages
The patch simplifies mlx5_ib_cont_pages and fixes the following issues in the original implementation: First issues is related to alignment of the PFNs. After the check base + p != PFN, the alignment of the PFN wasn't checked. So the PFN sequence 0, 1, 1, 2 would result in a page_shift of 13 even though the 3rd PFN is not 8KB aligned. This wasn't actually a bug because it was supported by all the existing mlx5 compatible device, but we don't want to require this support in all future devices. Another issue is because the inner loop didn't advance PFN so the test "if (base + p != pfn)" always failed for SGE with len > (1<<page_shift). Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Ilya Lesokhin <ilyal@mellanox.com> Reviewed-by: Eli Cohen <eli@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c47
1 files changed, 17 insertions, 30 deletions
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 914f212e7ef6..f3dbd75a0a96 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
{
unsigned long tmp;
unsigned long m;
- int i, k;
- u64 base = 0;
- int p = 0;
- int skip;
- int mask;
- u64 len;
- u64 pfn;
+ u64 base = ~0, p = 0;
+ u64 len, pfn;
+ int i = 0;
struct scatterlist *sg;
int entry;
unsigned long page_shift = umem->page_shift;
@@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
m = find_first_bit(&tmp, BITS_PER_LONG);
if (max_page_shift)
m = min_t(unsigned long, max_page_shift - page_shift, m);
- skip = 1 << m;
- mask = skip - 1;
- i = 0;
+
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
len = sg_dma_len(sg) >> page_shift;
pfn = sg_dma_address(sg) >> page_shift;
- for (k = 0; k < len; k++) {
- if (!(i & mask)) {
- tmp = (unsigned long)pfn;
- m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
- skip = 1 << m;
- mask = skip - 1;
- base = pfn;
- p = 0;
- } else {
- if (base + p != pfn) {
- tmp = (unsigned long)p;
- m = find_first_bit(&tmp, BITS_PER_LONG);
- skip = 1 << m;
- mask = skip - 1;
- base = pfn;
- p = 0;
- }
- }
- p++;
- i++;
+ if (base + p != pfn) {
+ /* If either the offset or the new
+ * base are unaligned update m
+ */
+ tmp = (unsigned long)(pfn | p);
+ if (!IS_ALIGNED(tmp, 1 << m))
+ m = find_first_bit(&tmp, BITS_PER_LONG);
+
+ base = pfn;
+ p = 0;
}
+
+ p += len;
+ i += len;
}
if (i) {