summaryrefslogtreecommitdiff
path: root/drivers/infiniband/core/fmr_pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/fmr_pool.c')
-rw-r--r--drivers/infiniband/core/fmr_pool.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 7f00347364f7..06d502c06a4d 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
{
int ret;
- struct ib_pool_fmr *fmr, *next;
+ struct ib_pool_fmr *fmr;
LIST_HEAD(unmap_list);
LIST_HEAD(fmr_list);
@@ -158,20 +158,6 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
#endif
}
- /*
- * The free_list may hold FMRs that have been put there
- * because they haven't reached the max_remap count.
- * Invalidate their mapping as well.
- */
- list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
- if (fmr->remap_count == 0)
- continue;
- hlist_del_init(&fmr->cache_node);
- fmr->remap_count = 0;
- list_add_tail(&fmr->fmr->list, &fmr_list);
- list_move(&fmr->list, &unmap_list);
- }
-
list_splice(&pool->dirty_list, &unmap_list);
INIT_LIST_HEAD(&pool->dirty_list);
pool->dirty_len = 0;
@@ -384,6 +370,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
i = 0;
list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
+ if (fmr->remap_count) {
+ INIT_LIST_HEAD(&fmr_list);
+ list_add_tail(&fmr->fmr->list, &fmr_list);
+ ib_unmap_fmr(&fmr_list);
+ }
ib_dealloc_fmr(fmr->fmr);
list_del(&fmr->list);
kfree(fmr);
@@ -407,8 +398,23 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool);
*/
int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
{
- int serial = atomic_inc_return(&pool->req_ser);
+ int serial;
+ struct ib_pool_fmr *fmr, *next;
+
+ /*
+ * The free_list holds FMRs that may have been used
+ * but have not been remapped enough times to be dirty.
+ * Put them on the dirty list now so that the cleanup
+ * thread will reap them too.
+ */
+ spin_lock_irq(&pool->pool_lock);
+ list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
+ if (fmr->remap_count > 0)
+ list_move(&fmr->list, &pool->dirty_list);
+ }
+ spin_unlock_irq(&pool->pool_lock);
+ serial = atomic_inc_return(&pool->req_ser);
wake_up_process(pool->thread);
if (wait_event_interruptible(pool->force_wait,