diff options
author | Alexander Lobakin | 2020-07-23 01:10:39 +0300 |
---|---|---|
committer | David S. Miller | 2020-07-22 18:19:03 -0700 |
commit | 155065866bc36f20061c55fd2ca287a466911b16 (patch) | |
tree | 96fc377365da74e239e44e26875680d5f6878b85 | |
parent | b6db3f71c976ea92361dbc7ebfb65db666ac9f02 (diff) |
qed: add support for different page sizes for chains
Extend current infrastructure to store chain page size in a struct
and use it in all functions instead of fixed QED_CHAIN_PAGE_SIZE.
Its value remains the default one, but can be overridden in
qed_chain_init_params before chain allocation.
Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/infiniband/hw/qedr/verbs.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_chain.c | 28 | ||||
-rw-r--r-- | include/linux/qed/qed_chain.h | 21 |
3 files changed, 33 insertions, 18 deletions
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 6737895a0d68..49b8a43e3fa2 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1960,9 +1960,11 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev, in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems, QEDR_SQE_ELEMENT_SIZE, + QED_CHAIN_PAGE_SIZE, QED_CHAIN_MODE_PBL); in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems, QEDR_RQE_ELEMENT_SIZE, + QED_CHAIN_PAGE_SIZE, QED_CHAIN_MODE_PBL); qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index a68ee4b3dbbc..f8efd36d66e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -18,8 +18,10 @@ static void qed_chain_init(struct qed_chain *chain, chain->mode = params->mode; chain->cnt_type = params->cnt_type; - chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size); + chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size, + params->page_size); chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size, + params->page_size, params->mode); chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size, params->mode); @@ -28,6 +30,7 @@ static void qed_chain_init(struct qed_chain *chain, chain->next_page_mask = chain->usable_per_page & chain->elem_per_page_mask; + chain->page_size = params->page_size; chain->page_cnt = page_cnt; chain->capacity = chain->usable_per_page * page_cnt; chain->size = chain->elem_per_page * page_cnt; @@ -82,7 +85,7 @@ static void qed_chain_free_next_ptr(struct qed_dev *cdev, virt_next = next->next_virt; phys_next = HILO_DMA_REGPAIR(next->next_phys); - dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys); + dma_free_coherent(dev, chain->page_size, virt, phys); virt = virt_next; phys = phys_next; @@ -95,7 +98,7 @@ static void qed_chain_free_single(struct qed_dev *cdev, if (!chain->p_virt_addr) return; - dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, + dma_free_coherent(&cdev->pdev->dev, chain->page_size, chain->p_virt_addr, chain->p_phys_addr); } @@ -113,7 +116,7 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain) if (!entry->virt_addr) break; - dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr, + dma_free_coherent(dev, chain->page_size, entry->virt_addr, entry->dma_map); } @@ -158,7 +161,7 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev, { u64 chain_size; - chain_size = ELEMS_PER_PAGE(params->elem_size); + chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size); chain_size *= page_cnt; if (!chain_size) @@ -201,7 +204,7 @@ static int qed_chain_alloc_next_ptr(struct qed_dev *cdev, u32 i; for (i = 0; i < chain->page_cnt; i++) { - virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys, + virt = dma_alloc_coherent(dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -232,7 +235,7 @@ static int qed_chain_alloc_single(struct qed_dev *cdev, dma_addr_t phys; void *virt; - virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, + virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -282,7 +285,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain) alloc_pages: for (i = 0; i < page_cnt; i++) { - virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys, + virt = dma_alloc_coherent(dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -318,11 +321,15 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, u32 page_cnt; int rc; + if (!params->page_size) + params->page_size = QED_CHAIN_PAGE_SIZE; + if (params->mode == QED_CHAIN_MODE_SINGLE) page_cnt = 1; else page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems, params->elem_size, + params->page_size, params->mode); rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt); @@ -330,9 +337,10 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, DP_NOTICE(cdev, "Cannot allocate a chain with the given arguments:\n"); DP_NOTICE(cdev, - "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", + "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n", params->intended_use, params->mode, params->cnt_type, - params->num_elems, params->elem_size); + params->num_elems, params->elem_size, + params->page_size); return rc; } diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index f5cfee0934e5..8a96c361cc19 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -11,6 +11,7 @@ #include <asm/byteorder.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/sizes.h> #include <linux/slab.h> #include <linux/qed/common_hsi.h> @@ -120,6 +121,8 @@ struct qed_chain { * but isn't involved in regular functionality. */ + u32 page_size; + /* Base address of a pre-allocated buffer for pbl */ struct { __le64 *table_virt; @@ -147,6 +150,7 @@ struct qed_chain_init_params { enum qed_chain_use_mode intended_use; enum qed_chain_cnt_type cnt_type; + u32 page_size; u32 num_elems; size_t elem_size; @@ -154,22 +158,23 @@ struct qed_chain_init_params { dma_addr_t ext_pbl_phys; }; -#define QED_CHAIN_PAGE_SIZE 0x1000 +#define QED_CHAIN_PAGE_SIZE SZ_4K -#define ELEMS_PER_PAGE(elem_size) \ - (QED_CHAIN_PAGE_SIZE / (elem_size)) +#define ELEMS_PER_PAGE(elem_size, page_size) \ + ((page_size) / (elem_size)) #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \ 0) -#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ - ((u32)(ELEMS_PER_PAGE(elem_size) - \ +#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \ + ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \ UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode)))) -#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ - DIV_ROUND_UP((elem_cnt), USABLE_ELEMS_PER_PAGE((elem_size), (mode))) +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \ + DIV_ROUND_UP((elem_cnt), \ + USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode))) #define is_chain_u16(p) \ ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) @@ -604,7 +609,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) for (i = 0; i < page_cnt; i++) memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, - QED_CHAIN_PAGE_SIZE); + p_chain->page_size); } #endif |