diff options
author | Chuck Lever | 2018-05-07 15:27:32 -0400 |
---|---|---|
committer | J. Bruce Fields | 2018-05-11 15:48:57 -0400 |
commit | 1e5f4160745690a0476929d128a336cae95c1df9 (patch) | |
tree | d8a7e8ad1aab3a1e343ca0f1de79a3806b057dd4 /net/sunrpc | |
parent | 2c577bfea85e421bfa91df16ccf5156361aa8d4b (diff) |
svcrdma: Simplify svc_rdma_recv_ctxt_put
Currently svc_rdma_recv_ctxt_put's callers have to know whether they
want to free the ctxt's pages or not. This means the human
developers have to know when and why to set that free_pages
argument.
Instead, the ctxt should carry that information with it so that
svc_rdma_recv_ctxt_put does the right thing no matter who is
calling.
We want to keep track of the number of pages in the Receive buffer
separately from the number of pages pulled over by RDMA Read. This
is so that the correct number of pages can be freed properly and
that number is well-documented.
So now, rc_hdr_count is the number of pages consumed by head[0]
(ie., the page index where the Read chunk should start); and
rc_page_count is always the number of pages that need to be released
when the ctxt is put.
The @free_pages argument is no longer needed.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 41 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_rw.c | 4 |
2 files changed, 24 insertions, 21 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index b7d9c55ee896..ecfe7c90a268 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -175,18 +175,15 @@ static void svc_rdma_recv_ctxt_unmap(struct svcxprt_rdma *rdma, * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list * @rdma: controlling svcxprt_rdma * @ctxt: object to return to the free list - * @free_pages: Non-zero if rc_pages should be freed * */ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, - struct svc_rdma_recv_ctxt *ctxt, - int free_pages) + struct svc_rdma_recv_ctxt *ctxt) { unsigned int i; - if (free_pages) - for (i = 0; i < ctxt->rc_page_count; i++) - put_page(ctxt->rc_pages[i]); + for (i = 0; i < ctxt->rc_page_count; i++) + put_page(ctxt->rc_pages[i]); spin_lock(&rdma->sc_recv_lock); list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts); spin_unlock(&rdma->sc_recv_lock); @@ -243,11 +240,11 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma) err_put_ctxt: svc_rdma_recv_ctxt_unmap(rdma, ctxt); - svc_rdma_recv_ctxt_put(rdma, ctxt, 1); + svc_rdma_recv_ctxt_put(rdma, ctxt); return -ENOMEM; err_post: svc_rdma_recv_ctxt_unmap(rdma, ctxt); - svc_rdma_recv_ctxt_put(rdma, ctxt, 1); + svc_rdma_recv_ctxt_put(rdma, ctxt); svc_xprt_put(&rdma->sc_xprt); return ret; } @@ -316,7 +313,7 @@ flushed: ib_wc_status_msg(wc->status), wc->status, wc->vendor_err); post_err: - svc_rdma_recv_ctxt_put(rdma, ctxt, 1); + svc_rdma_recv_ctxt_put(rdma, ctxt); set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); svc_xprt_enqueue(&rdma->sc_xprt); out: @@ -334,11 +331,11 @@ void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma) while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) { list_del(&ctxt->rc_list); - svc_rdma_recv_ctxt_put(rdma, ctxt, 1); + svc_rdma_recv_ctxt_put(rdma, ctxt); } while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) { list_del(&ctxt->rc_list); - svc_rdma_recv_ctxt_put(rdma, ctxt, 1); + svc_rdma_recv_ctxt_put(rdma, ctxt); } } @@ -383,16 +380,19 @@ static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp, len -= min_t(u32, len, ctxt->rc_sges[sge_no].length); sge_no++; } + ctxt->rc_hdr_count = sge_no; rqstp->rq_respages = &rqstp->rq_pages[sge_no]; rqstp->rq_next_page = rqstp->rq_respages + 1; /* If not all pages were used from the SGL, free the remaining ones */ - len = sge_no; while (sge_no < ctxt->rc_recv_wr.num_sge) { page = ctxt->rc_pages[sge_no++]; put_page(page); } - ctxt->rc_page_count = len; + + /* @ctxt's pages have all been released or moved to @rqstp->rq_pages. + */ + ctxt->rc_page_count = 0; /* Set up tail */ rqstp->rq_arg.tail[0].iov_base = NULL; @@ -602,11 +602,14 @@ static void rdma_read_complete(struct svc_rqst *rqstp, { int page_no; - /* Copy RPC pages */ + /* Move Read chunk pages to rqstp so that they will be released + * when svc_process is done with them. + */ for (page_no = 0; page_no < head->rc_page_count; page_no++) { put_page(rqstp->rq_pages[page_no]); rqstp->rq_pages[page_no] = head->rc_pages[page_no]; } + head->rc_page_count = 0; /* Point rq_arg.pages past header */ rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count]; @@ -777,7 +780,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) if (svc_rdma_is_backchannel_reply(xprt, p)) { ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, &rqstp->rq_arg); - svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0); + svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); return ret; } @@ -786,7 +789,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) goto out_readchunk; complete: - svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0); + svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); rqstp->rq_prot = IPPROTO_MAX; svc_xprt_copy_addrs(rqstp, xprt); return rqstp->rq_arg.len; @@ -799,16 +802,16 @@ out_readchunk: out_err: svc_rdma_send_error(rdma_xprt, p, ret); - svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0); + svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); return 0; out_postfail: if (ret == -EINVAL) svc_rdma_send_error(rdma_xprt, p, ret); - svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 1); + svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); return ret; out_drop: - svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 1); + svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); return 0; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index c080ce20ff40..8242aa318ac1 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -282,7 +282,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc) pr_err("svcrdma: read ctx: %s (%u/0x%x)\n", ib_wc_status_msg(wc->status), wc->status, wc->vendor_err); - svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt, 1); + svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt); } else { spin_lock(&rdma->sc_rq_dto_lock); list_add_tail(&info->ri_readctxt->rc_list, @@ -834,7 +834,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, * head->rc_arg. Pages involved with RDMA Read I/O are * transferred there. */ - head->rc_hdr_count = head->rc_page_count; + head->rc_page_count = head->rc_hdr_count; head->rc_arg.head[0] = rqstp->rq_arg.head[0]; head->rc_arg.tail[0] = rqstp->rq_arg.tail[0]; head->rc_arg.pages = head->rc_pages; |