aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Hellwig2020-11-06 19:19:33 +0100
committerJason Gunthorpe2020-11-12 13:33:43 -0400
commitb116c702791a9834e6485f67ca6267d9fdf59b87 (patch)
tree05b7ce8cc48a6e565e824563826d07129e1ae264
parentfbb7dc5db6dee553b5a07c27e86364a5223e244c (diff)
RDMA/umem: Use ib_dma_max_seg_size instead of dma_get_max_seg_size
RDMA ULPs must not call DMA mapping APIs directly but instead use the ib_dma_* wrappers. Fixes: 0c16d9635e3a ("RDMA/umem: Move to allocate SG table from pages") Link: https://lore.kernel.org/r/20201106181941.1878556-3-hch@lst.de Reported-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-rw-r--r--drivers/infiniband/core/umem.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index f1fc7e39c782..7ca4112e3e8f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -229,10 +229,10 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
cur_base += ret * PAGE_SIZE;
npages -= ret;
- sg = __sg_alloc_table_from_pages(
- &umem->sg_head, page_list, ret, 0, ret << PAGE_SHIFT,
- dma_get_max_seg_size(device->dma_device), sg, npages,
- GFP_KERNEL);
+ sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret,
+ 0, ret << PAGE_SHIFT,
+ ib_dma_max_seg_size(device), sg, npages,
+ GFP_KERNEL);
umem->sg_nents = umem->sg_head.nents;
if (IS_ERR(sg)) {
unpin_user_pages_dirty_lock(page_list, ret, 0);