diff options
author | Wenwei Tao <ww.tao0320@gmail.com> | 2016-01-12 07:49:25 +0100 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-01-12 08:21:16 -0700 |
commit | b262924be03d5d2ae735bc9a4b37eb2c613f61f8 (patch) | |
tree | d9128e64b6cd635084725a75d295c8aeb6ed6a35 /drivers | |
parent | d0ca798f960ad7d86f5186fe312c131d00563eb7 (diff) |
lightnvm: fix locking and mempool in rrpc_lun_gc
This patch fix two issues in rrpc_lun_gc
1. prio_list is protected by rrpc_lun's lock not nvm_lun's, so
acquire rlun's lock instead of lun's before operate on the list.
2. we delete block from prio_list before allocating gcb, but gcb
allocation may fail, we end without putting it back to the list,
this makes the block won't get reclaimed in the future. To solve
this issue, delete block after gcb allocation.
Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/lightnvm/rrpc.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index bee2352fcce1..745acd9db523 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -445,7 +445,7 @@ static void rrpc_lun_gc(struct work_struct *work) if (nr_blocks_need < rrpc->nr_luns) nr_blocks_need = rrpc->nr_luns; - spin_lock(&lun->lock); + spin_lock(&rlun->lock); while (nr_blocks_need > lun->nr_free_blocks && !list_empty(&rlun->prio_list)) { struct rrpc_block *rblock = block_prio_find_max(rlun); @@ -454,16 +454,16 @@ static void rrpc_lun_gc(struct work_struct *work) if (!rblock->nr_invalid_pages) break; + gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); + if (!gcb) + break; + list_del_init(&rblock->prio); BUG_ON(!block_is_full(rrpc, rblock)); pr_debug("rrpc: selected block '%lu' for GC\n", block->id); - gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); - if (!gcb) - break; - gcb->rrpc = rrpc; gcb->rblk = rblock; INIT_WORK(&gcb->ws_gc, rrpc_block_gc); @@ -472,7 +472,7 @@ static void rrpc_lun_gc(struct work_struct *work) nr_blocks_need--; } - spin_unlock(&lun->lock); + spin_unlock(&rlun->lock); /* TODO: Hint that request queue can be started again */ } |