aboutsummaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorWill Deacon2020-12-08 15:01:27 +0000
committerWill Deacon2020-12-08 15:01:27 +0000
commit1ab2bf5831586820a1dbe4425daf1c86a482129d (patch)
tree158c3d900059118afada62ab2a07e72786a5c5c3 /drivers/iommu
parent33f974dbaabf1cbe5cb6996bf3f6e395519c15fe (diff)
parent176cfc187c24287a363e7612cd2385ba40c2042b (diff)
Merge branch 'for-next/iommu/iova' into for-next/iommu/core
IOVA allocator updates for 5.11, including removal of unused symbols and functions as well as some optimisations to improve allocation behaviour in the face of fragmentation. * for-next/iommu/iova: iommu: Stop exporting free_iova_mem() iommu: Stop exporting alloc_iova_mem() iommu: Delete split_and_remove_iova() iommu: avoid taking iova_rbtree_lock twice iommu/iova: Free global iova rcache on iova alloc failure iommu/iova: Retry from last rb tree node if iova search fails
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/iova.c100
1 files changed, 47 insertions, 53 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 30d969a4c5fd..4bb3293ae4d7 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -25,6 +25,7 @@ static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
static void fq_flush_timeout(struct timer_list *t);
+static void free_global_cached_iovas(struct iova_domain *iovad);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -184,8 +185,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
struct rb_node *curr, *prev;
struct iova *curr_iova;
unsigned long flags;
- unsigned long new_pfn;
+ unsigned long new_pfn, retry_pfn;
unsigned long align_mask = ~0UL;
+ unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
if (size_aligned)
align_mask <<= fls_long(size - 1);
@@ -198,15 +200,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr = __get_cached_rbnode(iovad, limit_pfn);
curr_iova = rb_entry(curr, struct iova, node);
+ retry_pfn = curr_iova->pfn_hi + 1;
+
+retry:
do {
- limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
- new_pfn = (limit_pfn - size) & align_mask;
+ high_pfn = min(high_pfn, curr_iova->pfn_lo);
+ new_pfn = (high_pfn - size) & align_mask;
prev = curr;
curr = rb_prev(curr);
curr_iova = rb_entry(curr, struct iova, node);
- } while (curr && new_pfn <= curr_iova->pfn_hi);
-
- if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+ } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
+
+ if (high_pfn < size || new_pfn < low_pfn) {
+ if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
+ high_pfn = limit_pfn;
+ low_pfn = retry_pfn;
+ curr = &iovad->anchor.node;
+ curr_iova = rb_entry(curr, struct iova, node);
+ goto retry;
+ }
iovad->max32_alloc_size = size;
goto iova32_full;
}
@@ -231,18 +243,16 @@ static struct kmem_cache *iova_cache;
static unsigned int iova_cache_users;
static DEFINE_MUTEX(iova_cache_mutex);
-struct iova *alloc_iova_mem(void)
+static struct iova *alloc_iova_mem(void)
{
return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
}
-EXPORT_SYMBOL(alloc_iova_mem);
-void free_iova_mem(struct iova *iova)
+static void free_iova_mem(struct iova *iova)
{
if (iova->pfn_lo != IOVA_ANCHOR)
kmem_cache_free(iova_cache, iova);
}
-EXPORT_SYMBOL(free_iova_mem);
int iova_cache_get(void)
{
@@ -390,10 +400,14 @@ EXPORT_SYMBOL_GPL(__free_iova);
void
free_iova(struct iova_domain *iovad, unsigned long pfn)
{
- struct iova *iova = find_iova(iovad, pfn);
+ unsigned long flags;
+ struct iova *iova;
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ iova = private_find_iova(iovad, pfn);
if (iova)
- __free_iova(iovad, iova);
+ private_free_iova(iovad, iova);
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
}
EXPORT_SYMBOL_GPL(free_iova);
@@ -431,6 +445,7 @@ retry:
flush_rcache = false;
for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad);
+ free_global_cached_iovas(iovad);
goto retry;
}
@@ -725,47 +740,6 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
}
EXPORT_SYMBOL_GPL(copy_reserved_iova);
-struct iova *
-split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
- unsigned long pfn_lo, unsigned long pfn_hi)
-{
- unsigned long flags;
- struct iova *prev = NULL, *next = NULL;
-
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- if (iova->pfn_lo < pfn_lo) {
- prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
- if (prev == NULL)
- goto error;
- }
- if (iova->pfn_hi > pfn_hi) {
- next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
- if (next == NULL)
- goto error;
- }
-
- __cached_rbnode_delete_update(iovad, iova);
- rb_erase(&iova->node, &iovad->rbroot);
-
- if (prev) {
- iova_insert_rbtree(&iovad->rbroot, prev, NULL);
- iova->pfn_lo = pfn_lo;
- }
- if (next) {
- iova_insert_rbtree(&iovad->rbroot, next, NULL);
- iova->pfn_hi = pfn_hi;
- }
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-
- return iova;
-
-error:
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- if (prev)
- free_iova_mem(prev);
- return NULL;
-}
-
/*
* Magazine caches for IOVA ranges. For an introduction to magazines,
* see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
@@ -1046,5 +1020,25 @@ void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
}
}
+/*
+ * free all the IOVA ranges of global cache
+ */
+static void free_global_cached_iovas(struct iova_domain *iovad)
+{
+ struct iova_rcache *rcache;
+ unsigned long flags;
+ int i, j;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ spin_lock_irqsave(&rcache->lock, flags);
+ for (j = 0; j < rcache->depot_size; ++j) {
+ iova_magazine_free_pfns(rcache->depot[j], iovad);
+ iova_magazine_free(rcache->depot[j]);
+ }
+ rcache->depot_size = 0;
+ spin_unlock_irqrestore(&rcache->lock, flags);
+ }
+}
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");