aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorChuck Lever2024-02-04 18:16:37 -0500
committerChuck Lever2024-03-01 09:12:25 -0500
commite67792cc96ef8e98364f509a25a6392a7c962f90 (patch)
treeae4a07c353f2a9609f18bab5cf9d67a64c8f1b49 /net
parentc8004c1ca442d99a7afdcc4238c7e6eeb30c214b (diff)
svcrdma: Reserve an extra WQE for ib_drain_rq()
Do as other ULPs already do: ensure there is an extra Receive WQE reserved for the tear-down drain WR. I haven't heard reports of problems but it can't hurt. Note that rq_depth is used to compute the Send Queue depth as well, so this fix should affect both the SQ and RQ. Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 4f27325ace4a..4a038c7e86f9 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -415,7 +415,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests +
- newxprt->sc_recv_batch;
+ newxprt->sc_recv_batch + 1 /* drain */;
if (rq_depth > dev->attrs.max_qp_wr) {
rq_depth = dev->attrs.max_qp_wr;
newxprt->sc_recv_batch = 1;