diff options
author | Keith Busch <keith.busch@intel.com> | 2015-01-07 18:55:50 -0700 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-01-08 09:02:08 -0700 |
commit | 0fb59cbc5f133207535b25ec7d16fba24d549ee2 (patch) | |
tree | c47b5e191b5c7289732ffd203b0b7e782ffa3cd6 /drivers | |
parent | ea191d2f36b0f577ce5377c3e72aedc34282969d (diff) |
NVMe: Admin queue removal handling
This protects admin queue access on shutdown. When the controller is
disabled, the queue is frozen to prevent new entry, and unfrozen on
resume, and fixes cq_vector signedness to not suspend a queue twice.
Since unfreezing the queue makes it available for commands, it requires
the queue be initialized, so this moves this part after that.
Special handling is done when the device is unresponsive during
shutdown. This can be optimized to not require subsequent commands to
timeout, but saving that fix for later.
This patch also removes the kill signals in this path that were left-over
artifacts from the blk-mq conversion and no longer necessary.
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/nvme-core.c | 34 |
1 files changed, 20 insertions, 14 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index beb8d48f8560..5fcb993fc6c9 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1183,6 +1183,8 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid) adapter_delete_sq(dev, qid); adapter_delete_cq(dev, qid); } + if (!qid && dev->admin_q) + blk_mq_freeze_queue_start(dev->admin_q); nvme_clear_queue(nvmeq); } @@ -1400,7 +1402,8 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) nvme_dev_remove_admin(dev); return -ENODEV; } - } + } else + blk_mq_unfreeze_queue(dev->admin_q); return 0; } @@ -1459,19 +1462,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) if (result) goto free_nvmeq; - result = nvme_alloc_admin_tags(dev); - if (result) - goto free_nvmeq; - nvmeq->cq_vector = 0; result = queue_request_irq(dev, nvmeq, nvmeq->irqname); if (result) - goto free_tags; + goto free_nvmeq; return result; - free_tags: - nvme_dev_remove_admin(dev); free_nvmeq: nvme_free_queues(dev, 0); return result; @@ -2256,13 +2253,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) break; if (!schedule_timeout(ADMIN_TIMEOUT) || fatal_signal_pending(current)) { + /* + * Disable the controller first since we can't trust it + * at this point, but leave the admin queue enabled + * until all queue deletion requests are flushed. + * FIXME: This may take a while if there are more h/w + * queues than admin tags. + */ set_current_state(TASK_RUNNING); - nvme_disable_ctrl(dev, readq(&dev->bar->cap)); - nvme_disable_queue(dev, 0); - - send_sig(SIGKILL, dq->worker->task, 1); + nvme_clear_queue(dev->queues[0]); flush_kthread_worker(dq->worker); + nvme_disable_queue(dev, 0); return; } } @@ -2339,7 +2341,6 @@ static void nvme_del_queue_start(struct kthread_work *work) { struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, cmdinfo.work); - allow_signal(SIGKILL); if (nvme_delete_sq(nvmeq)) nvme_del_queue_end(nvmeq); } @@ -2607,15 +2608,20 @@ static int nvme_dev_start(struct nvme_dev *dev) } nvme_init_queue(dev->queues[0], 0); + result = nvme_alloc_admin_tags(dev); + if (result) + goto disable; result = nvme_setup_io_queues(dev); if (result) - goto disable; + goto free_tags; nvme_set_irq_hints(dev); return result; + free_tags: + nvme_dev_remove_admin(dev); disable: nvme_disable_queue(dev, 0); nvme_dev_list_remove(dev); |