aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMaxim Mikityanskiy2022-09-30 09:28:53 -0700
committerJakub Kicinski2022-10-01 13:30:19 -0700
commit3f5fe0b2e606ab71d3425c138e311bce60b09543 (patch)
treeaf31b5c1b92be52ddab2ee9d75cebb8b18200f81 /drivers
parent42847fed55523bebb712bfd7e2c4616db00c3aef (diff)
net/mlx5e: xsk: Use partial batches in legacy RQ with XSK
The previous commit allowed allocating WQE batches in legacy RQ partially, however, XSK still checks whether there are enough frames in the fill ring. Remove this check to allow to allocate batches partially also with XSK. Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c14
1 files changed, 1 insertions, 13 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index ffca217b7d7e..80f2b5960782 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -429,17 +429,6 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
int i;
- if (rq->xsk_pool) {
- int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
-
- /* Check in advance that we have enough frames, instead of
- * allocating one-by-one, failing and moving frames to the
- * Reuse Ring.
- */
- if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired)))
- return -ENOMEM;
- }
-
for (i = 0; i < wqe_bulk; i++) {
int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
struct mlx5e_rx_wqe_cyc *wqe;
@@ -841,8 +830,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
bulk = wqe_bulk - ((head + wqe_bulk) & rq->wqe.info.wqe_index_mask);
count = mlx5e_alloc_rx_wqes(rq, head, bulk);
- if (likely(count > 0))
- mlx5_wq_cyc_push_n(wq, count);
+ mlx5_wq_cyc_push_n(wq, count);
if (unlikely(count != bulk)) {
rq->stats->buff_alloc_err++;
busy = true;