diff options
author | Keith Busch | 2016-01-12 14:41:18 -0700 |
---|---|---|
committer | Jens Axboe | 2016-01-12 14:47:59 -0700 |
commit | a5cdb68c2c10f0865122656833cd07636a4143ee (patch) | |
tree | 50c5b0762821995b884990770a3364e22082de16 /drivers/nvme | |
parent | db3cbfff5bcc0b9a82d8c71f00b9d60fad215871 (diff) |
NVMe: Shutdown controller only for power-off
We don't need to shutdown a controller for a reset. A controller in a
shutdown state may take longer to become ready than one that was simply
disabled. This patch has the driver shut down a controller only if the
device is about to be powered off or being removed. When taking the
controller down for a reset reason, the controller will be disabled
instead.
Function names have been updated in this patch to reflect their changed
semantics.
Signed-off-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/pci.c | 40 |
1 files changed, 19 insertions, 21 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 72f284ff42b6..8ff6ac5cafbe 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -87,7 +87,7 @@ struct nvme_queue; static int nvme_reset(struct nvme_dev *dev); static void nvme_process_cq(struct nvme_queue *nvmeq); static void nvme_remove_dead_ctrl(struct nvme_dev *dev); -static void nvme_dev_shutdown(struct nvme_dev *dev); +static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); /* * Represents an NVM Express device. Each nvme_dev is a PCI function. @@ -932,7 +932,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) dev_warn(dev->dev, "I/O %d QID %d timeout, disable controller\n", req->tag, nvmeq->qid); - nvme_dev_shutdown(dev); + nvme_dev_disable(dev, false); req->errors = NVME_SC_CANCELLED; return BLK_EH_HANDLED; } @@ -946,7 +946,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) dev_warn(dev->dev, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); - nvme_dev_shutdown(dev); + nvme_dev_disable(dev, false); queue_work(nvme_workq, &dev->reset_work); /* @@ -1065,21 +1065,20 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq) spin_unlock_irq(&nvmeq->q_lock); } -static void nvme_disable_queue(struct nvme_dev *dev, int qid) +static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) { - struct nvme_queue *nvmeq = dev->queues[qid]; + struct nvme_queue *nvmeq = dev->queues[0]; if (!nvmeq) return; if (nvme_suspend_queue(nvmeq)) return; - /* Don't tell the adapter to delete the admin queue. - * Don't tell a removed adapter to delete IO queues. */ - if (qid && readl(dev->bar + NVME_REG_CSTS) != -1) { - adapter_delete_sq(dev, qid); - adapter_delete_cq(dev, qid); - } + if (shutdown) + nvme_shutdown_ctrl(&dev->ctrl); + else + nvme_disable_ctrl(&dev->ctrl, lo_hi_readq( + dev->bar + NVME_REG_CAP)); spin_lock_irq(&nvmeq->q_lock); nvme_process_cq(nvmeq); @@ -1818,7 +1817,7 @@ static void nvme_dev_list_remove(struct nvme_dev *dev) kthread_stop(tmp); } -static void nvme_dev_shutdown(struct nvme_dev *dev) +static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) { int i; u32 csts = -1; @@ -1837,8 +1836,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) } } else { nvme_disable_io_queues(dev); - nvme_shutdown_ctrl(&dev->ctrl); - nvme_disable_queue(dev, 0); + nvme_disable_admin_queue(dev, shutdown); } nvme_dev_unmap(dev); @@ -1897,7 +1895,7 @@ static void nvme_reset_work(struct work_struct *work) * moving on. */ if (dev->bar) - nvme_dev_shutdown(dev); + nvme_dev_disable(dev, false); set_bit(NVME_CTRL_RESETTING, &dev->flags); @@ -1951,7 +1949,7 @@ static void nvme_reset_work(struct work_struct *work) dev->ctrl.admin_q = NULL; dev->queues[0]->tags = NULL; disable: - nvme_disable_queue(dev, 0); + nvme_disable_admin_queue(dev, false); unmap: nvme_dev_unmap(dev); out: @@ -2086,7 +2084,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) struct nvme_dev *dev = pci_get_drvdata(pdev); if (prepare) - nvme_dev_shutdown(dev); + nvme_dev_disable(dev, false); else queue_work(nvme_workq, &dev->reset_work); } @@ -2094,7 +2092,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) static void nvme_shutdown(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); - nvme_dev_shutdown(dev); + nvme_dev_disable(dev, true); } static void nvme_remove(struct pci_dev *pdev) @@ -2110,7 +2108,7 @@ static void nvme_remove(struct pci_dev *pdev) flush_work(&dev->scan_work); nvme_remove_namespaces(&dev->ctrl); nvme_uninit_ctrl(&dev->ctrl); - nvme_dev_shutdown(dev); + nvme_dev_disable(dev, true); nvme_dev_remove_admin(dev); nvme_free_queues(dev, 0); nvme_release_cmb(dev); @@ -2124,7 +2122,7 @@ static int nvme_suspend(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct nvme_dev *ndev = pci_get_drvdata(pdev); - nvme_dev_shutdown(ndev); + nvme_dev_disable(ndev, true); return 0; } @@ -2155,7 +2153,7 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: - nvme_dev_shutdown(dev); + nvme_dev_disable(dev, false); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: return PCI_ERS_RESULT_DISCONNECT; |