aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle)2023-06-21 17:45:53 +0100
committerAndrew Morton2023-06-23 16:59:30 -0700
commit76fa88429075667fe76d4905f2f471e0ac3d543c (patch)
tree64d65033cb185dc69e09a6c581ed415b1162b6a4 /net
parentf8a101ff09a70ec708b66b3f5bd4e7405283d14a (diff)
net: convert sunrpc from pagevec to folio_batch
Remove the last usage of pagevecs. There is a slight change here; we now free the folio_batch as soon as it fills up instead of freeing the folio_batch when we try to add a page to a full batch. This should have no effect in practice. Link: https://lkml.kernel.org/r/20230621164557.3510324-10-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/svc.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 79967b6925bd..8b9011bbece7 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -649,7 +649,7 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
if (!rqstp)
return rqstp;
- pagevec_init(&rqstp->rq_pvec);
+ folio_batch_init(&rqstp->rq_fbatch);
__set_bit(RQ_BUSY, &rqstp->rq_flags);
rqstp->rq_server = serv;
@@ -860,9 +860,9 @@ bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
}
if (*rqstp->rq_next_page) {
- if (!pagevec_space(&rqstp->rq_pvec))
- __pagevec_release(&rqstp->rq_pvec);
- pagevec_add(&rqstp->rq_pvec, *rqstp->rq_next_page);
+ if (!folio_batch_add(&rqstp->rq_fbatch,
+ page_folio(*rqstp->rq_next_page)))
+ __folio_batch_release(&rqstp->rq_fbatch);
}
get_page(page);
@@ -896,7 +896,7 @@ void svc_rqst_release_pages(struct svc_rqst *rqstp)
void
svc_rqst_free(struct svc_rqst *rqstp)
{
- pagevec_release(&rqstp->rq_pvec);
+ folio_batch_release(&rqstp->rq_fbatch);
svc_release_buffer(rqstp);
if (rqstp->rq_scratch_page)
put_page(rqstp->rq_scratch_page);