diff options
author | Christoph Hellwig | 2019-03-03 08:15:19 -0700 |
---|---|---|
committer | Christoph Hellwig | 2019-04-05 08:07:57 +0200 |
commit | 7fe07d14f71fabef642a478626248a9121e95b7b (patch) | |
tree | 6fb84a6aabae9d416daa65e252a289260843553b | |
parent | 915f04c93db4e3a7388c8ad8ddfc28830e4cbce3 (diff) |
nvme-pci: merge nvme_free_iod into nvme_unmap_data
This means we now have a function that undoes everything nvme_map_data
does and we can simplify the error handling a bit.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
-rw-r--r-- | drivers/nvme/host/pci.c | 44 |
1 files changed, 17 insertions, 27 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 2af6cfbd77ec..de199aff8d05 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -583,14 +583,24 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) return true; } -static void nvme_free_iod(struct nvme_dev *dev, struct request *req) +static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + enum dma_data_direction dma_dir = rq_data_dir(req) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE; const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1; dma_addr_t dma_addr = iod->first_dma, next_dma_addr; - int i; + if (iod->nents) { + /* P2PDMA requests do not need to be unmapped */ + if (!is_pci_p2pdma_page(sg_page(iod->sg))) + dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); + + if (blk_integrity_rq(req)) + dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); + } + if (iod->npages == 0) dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], dma_addr); @@ -847,50 +857,30 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); if (ret != BLK_STS_OK) - goto out_unmap; + goto out; ret = BLK_STS_IOERR; if (blk_integrity_rq(req)) { if (blk_rq_count_integrity_sg(q, req->bio) != 1) - goto out_unmap; + goto out; sg_init_table(&iod->meta_sg, 1); if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) - goto out_unmap; + goto out; if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) - goto out_unmap; + goto out; cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg)); } return BLK_STS_OK; -out_unmap: - dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); out: - nvme_free_iod(dev, req); + nvme_unmap_data(dev, req); return ret; } -static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) -{ - struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - enum dma_data_direction dma_dir = rq_data_dir(req) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE; - - if (iod->nents) { - /* P2PDMA requests do not need to be unmapped */ - if (!is_pci_p2pdma_page(sg_page(iod->sg))) - dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); - - if (blk_integrity_rq(req)) - dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); - } - - nvme_free_iod(dev, req); -} - /* * NOTE: ns is NULL when called on the admin queue. */ |