diff options
author | Bob Pearson | 2021-04-01 19:10:17 -0500 |
---|---|---|
committer | Jason Gunthorpe | 2021-04-08 15:59:28 -0300 |
commit | ea492251894073cbb366c9fcd227f62cbaa7ef4b (patch) | |
tree | 0f5df91a09fd4a09f1e57fad77c81ce20e6fcad7 /drivers/infiniband/sw | |
parent | 7d8f346504ebde71d92905e3055d40ea8f34416e (diff) |
RDMA/rxe: Fix missing acks from responder
All responder errors from request packets that do not consume a receive
WQE fail to generate acks for RC QPs. This patch corrects this behavior
by making the flow follow the same path as request packets that do consume
a WQE after the completion.
Link: https://lore.kernel.org/r/20210402001016.3210-1-rpearson@hpe.com
Link: https://lore.kernel.org/linux-rdma/1a7286ac-bcea-40fb-2267-480134dd301b@gmail.com/
Signed-off-by: Bob Pearson <rpearson@hpe.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_comp.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_resp.c | 18 |
2 files changed, 8 insertions, 11 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index a612b335baa0..2af26737d32d 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -676,7 +676,6 @@ int rxe_completer(void *arg) /* there is nothing to retry in this case */ if (!wqe || (wqe->state == wqe_state_posted)) { - pr_warn("Retry attempted without a valid wqe\n"); ret = -EAGAIN; goto done; } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 8e237b623b31..2b220659bddb 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -816,8 +816,8 @@ static enum resp_states do_complete(struct rxe_qp *qp, struct rxe_recv_wqe *wqe = qp->resp.wqe; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - if (unlikely(!wqe)) - return RESPST_CLEANUP; + if (!wqe) + goto finish; memset(&cqe, 0, sizeof(cqe)); @@ -917,12 +917,12 @@ static enum resp_states do_complete(struct rxe_qp *qp, if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1)) return RESPST_ERR_CQ_OVERFLOW; - if (qp->resp.state == QP_STATE_ERROR) +finish: + if (unlikely(qp->resp.state == QP_STATE_ERROR)) return RESPST_CHK_RESOURCE; - - if (!pkt) + if (unlikely(!pkt)) return RESPST_DONE; - else if (qp_type(qp) == IB_QPT_RC) + if (qp_type(qp) == IB_QPT_RC) return RESPST_ACKNOWLEDGE; else return RESPST_CLEANUP; @@ -1056,10 +1056,8 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, if (pkt->mask & RXE_SEND_MASK || pkt->mask & RXE_WRITE_MASK) { /* SEND. Ack again and cleanup. C9-105. */ - if (bth_ack(pkt)) - send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn); - rc = RESPST_CLEANUP; - goto out; + send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn); + return RESPST_CLEANUP; } else if (pkt->mask & RXE_READ_MASK) { struct resp_res *res; |