summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMing Lin <ming.l@ssi.samsung.com>2016-04-25 14:20:18 -0700
committerJens Axboe <axboe@fb.com>2016-05-02 09:16:11 -0600
commit0bf77e9dbb5247ae159342db6f8fdb48aba24b56 (patch)
tree82a18a51d2bd642265a0bc9a01490f7dab31a3a9
parenta5b714ad395803a6aa91793b9e52a81b176b8ba9 (diff)
nvme: switch to RCU freeing the namespace
Switch to RCU freeing the namespace structure so that nvme_start_queues, nvme_stop_queues and nvme_kill_queues would be able to get away with only a RCU read side critical section. Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lin <ming.l@ssi.samsung.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Sagi Grimerg <sagi@grimberg.me> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/nvme/host/core.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3cf366ab66e9..3428c0232403 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1429,7 +1429,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_revalidate_disk(ns->disk))
goto out_free_disk;
- list_add_tail(&ns->list, &ctrl->namespaces);
+ list_add_tail_rcu(&ns->list, &ctrl->namespaces);
kref_get(&ctrl->kref);
if (ns->type == NVME_NS_LIGHTNVM)
return;
@@ -1467,6 +1467,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
mutex_unlock(&ns->ctrl->namespaces_mutex);
+ synchronize_rcu();
nvme_put_ns(ns);
}
@@ -1751,8 +1752,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
if (!kref_get_unless_zero(&ns->kref))
continue;
@@ -1769,7 +1770,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
nvme_put_ns(ns);
}
- mutex_unlock(&ctrl->namespaces_mutex);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nvme_kill_queues);
@@ -1777,8 +1778,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
spin_lock_irq(ns->queue->queue_lock);
queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
spin_unlock_irq(ns->queue->queue_lock);
@@ -1786,7 +1787,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
blk_mq_cancel_requeue_work(ns->queue);
blk_mq_stop_hw_queues(ns->queue);
}
- mutex_unlock(&ctrl->namespaces_mutex);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
@@ -1794,13 +1795,13 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
blk_mq_kick_requeue_list(ns->queue);
}
- mutex_unlock(&ctrl->namespaces_mutex);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nvme_start_queues);