diff options
author | Keith Busch | 2020-05-20 19:22:53 -0700 |
---|---|---|
committer | Christoph Hellwig | 2020-05-27 07:12:40 +0200 |
commit | 3b2a1ebceba3e03b17ef0970bb7757a3a64cdc8b (patch) | |
tree | 9e63c3314af9b9cbc2e0128c171de8b933c60569 /drivers/nvme | |
parent | 1cdf9f7670a7d74e27177d5c390c2f8b3b9ba338 (diff) |
nvme: set dma alignment to qword
The default dma alignment mask is 511, which is much larger than any nvme
controller requires. NVMe controllers accept qword aligned DMA addresses,
so set the request_queue constraints to that. This can help avoid bounce
buffers on user passthrough commands.
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/core.c | 1 |
1 files changed, 1 insertions, 0 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index a3a4dbc59af1..569671e264b5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2322,6 +2322,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); } blk_queue_virt_boundary(q, ctrl->page_size - 1); + blk_queue_dma_alignment(q, 7); if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) vwc = true; blk_queue_write_cache(q, vwc, vwc); |