diff options
author | Selvin Xavier | 2023-12-11 16:51:15 -0800 |
---|---|---|
committer | Jakub Kicinski | 2023-12-12 16:05:58 -0800 |
commit | 297e625bf89e78c5c1c74c571c5f4315bebc67e6 (patch) | |
tree | 1760e1f350a6761a11116508b63ad08411612a2f /drivers | |
parent | 6dea3ebe0d226e021970f3552ed37fdcedca8773 (diff) |
bnxt_en: Allocate extra QP backing store memory when RoCE FW reports it
The Fast QP modify destroy RoCE feature requires additional QP entries
in QP context backing store. FW reports the extra count to be
allocated during backing store query. Use this value and allocate extra
memory. Note that this works for both the V1 and V1 backing store
FW APIs.
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20231212005122.2401-7-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 72e2bd4611de..42a52ee8c1bc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7535,6 +7535,7 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); + ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries); ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset, (init_mask & (1 << init_idx++)) != 0); @@ -7672,6 +7673,9 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req->qpc_pg_size_qpc_lvl, &req->qpc_page_dir); + + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD) + req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; @@ -8004,6 +8008,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) u32 num_mr, num_ah; u32 extra_srqs = 0; u32 extra_qps = 0; + u32 fast_qpmd_qps; u8 pg_lvl = 1; int i, rc; @@ -8020,14 +8025,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; l2_qps = ctxm->qp_l2_entries; qp1_qps = ctxm->qp_qp1_entries; + fast_qpmd_qps = ctxm->qp_fast_qpmd_entries; max_qps = ctxm->max_entries; ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; srqs = ctxm->srq_l2_entries; max_srqs = ctxm->max_entries; + ena = 0; if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { pg_lvl = 2; extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps); + /* allocate extra qps if fw supports RoCE fast qp destroy feature */ + extra_qps += fast_qpmd_qps; extra_srqs = min_t(u32, 8192, max_srqs - srqs); + if (fast_qpmd_qps) + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD; } ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; @@ -8057,7 +8068,6 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) if (rc) return rc; - ena = 0; if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) goto skip_rdma; @@ -8074,7 +8084,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); if (rc) return rc; - ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1); |