summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/iommu/iova.c39
1 files changed, 38 insertions, 1 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index c28003e1d2ee..471c48dd71e7 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -154,6 +154,43 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
iovad->cached_node = rb_next(&free->node);
}
+static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
+{
+ struct rb_node *node, *next;
+ /*
+ * Ideally what we'd like to judge here is whether limit_pfn is close
+ * enough to the highest-allocated IOVA that starting the allocation
+ * walk from the anchor node will be quicker than this initial work to
+ * find an exact starting point (especially if that ends up being the
+ * anchor node anyway). This is an incredibly crude approximation which
+ * only really helps the most likely case, but is at least trivially easy.
+ */
+ if (limit_pfn > iovad->dma_32bit_pfn)
+ return &iovad->anchor.node;
+
+ node = iovad->rbroot.rb_node;
+ while (to_iova(node)->pfn_hi < limit_pfn)
+ node = node->rb_right;
+
+search_left:
+ while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
+ node = node->rb_left;
+
+ if (!node->rb_left)
+ return node;
+
+ next = node->rb_left;
+ while (next->rb_right) {
+ next = next->rb_right;
+ if (to_iova(next)->pfn_lo >= limit_pfn) {
+ node = next;
+ goto search_left;
+ }
+ }
+
+ return node;
+}
+
/* Insert the iova into domain rbtree by holding writer lock */
static void
iova_insert_rbtree(struct rb_root *root, struct iova *iova,
@@ -219,7 +256,7 @@ retry:
if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
high_pfn = limit_pfn;
low_pfn = retry_pfn;
- curr = &iovad->anchor.node;
+ curr = iova_find_limit(iovad, limit_pfn);
curr_iova = to_iova(curr);
goto retry;
}