summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJohn Garry <john.garry@huawei.com>2020-12-04 02:34:50 +0800
committerWill Deacon <will@kernel.org>2020-12-08 14:14:48 +0000
commit2f24dfb71208eeb0174f08dd56ca6d3c17b279a5 (patch)
tree82596f8a3c8414f8ce7366b4e6fa1d2b96ef7f1e /drivers/iommu
parent3a651b3a27a1ee35879499ead3942dc854a20968 (diff)
iommu: Delete split_and_remove_iova()
Function split_and_remove_iova() has not been referenced since commit e70b081c6f37 ("iommu/vt-d: Remove IOVA handling code from the non-dma_ops path"), so delete it. Signed-off-by: John Garry <john.garry@huawei.com> Link: https://lore.kernel.org/r/1607020492-189471-2-git-send-email-john.garry@huawei.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/iova.c41
1 files changed, 0 insertions, 41 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index ff59d8aa3041..3331c040b2b0 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -742,47 +742,6 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
}
EXPORT_SYMBOL_GPL(copy_reserved_iova);
-struct iova *
-split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
- unsigned long pfn_lo, unsigned long pfn_hi)
-{
- unsigned long flags;
- struct iova *prev = NULL, *next = NULL;
-
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- if (iova->pfn_lo < pfn_lo) {
- prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
- if (prev == NULL)
- goto error;
- }
- if (iova->pfn_hi > pfn_hi) {
- next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
- if (next == NULL)
- goto error;
- }
-
- __cached_rbnode_delete_update(iovad, iova);
- rb_erase(&iova->node, &iovad->rbroot);
-
- if (prev) {
- iova_insert_rbtree(&iovad->rbroot, prev, NULL);
- iova->pfn_lo = pfn_lo;
- }
- if (next) {
- iova_insert_rbtree(&iovad->rbroot, next, NULL);
- iova->pfn_hi = pfn_hi;
- }
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-
- return iova;
-
-error:
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- if (prev)
- free_iova_mem(prev);
- return NULL;
-}
-
/*
* Magazine caches for IOVA ranges. For an introduction to magazines,
* see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab