aboutsummaryrefslogtreecommitdiff
path: root/fs/netfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/netfs')
-rw-r--r--fs/netfs/internal.h18
-rw-r--r--fs/netfs/objects.c12
-rw-r--r--fs/netfs/read_helper.c100
-rw-r--r--fs/netfs/stats.c1
4 files changed, 67 insertions, 64 deletions
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 89837e904fa7..54c761bcc8e6 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -6,6 +6,7 @@
*/
#include <linux/netfs.h>
+#include <linux/fscache.h>
#include <trace/events/netfs.h>
#ifdef pr_fmt
@@ -19,8 +20,6 @@
*/
struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
struct file *file,
- const struct netfs_request_ops *ops,
- void *netfs_priv,
loff_t start, size_t len,
enum netfs_io_origin origin);
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
@@ -81,6 +80,21 @@ static inline void netfs_stat_d(atomic_t *stat)
#define netfs_stat_d(x) do {} while(0)
#endif
+/*
+ * Miscellaneous functions.
+ */
+static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+ struct fscache_cookie *cookie = ctx->cache;
+
+ return fscache_cookie_valid(cookie) && cookie->cache_priv &&
+ fscache_cookie_enabled(cookie);
+#else
+ return false;
+#endif
+}
+
/*****************************************************************************/
/*
* debug tracing
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index ae18827e156b..657b19e60118 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -13,12 +13,12 @@
*/
struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
struct file *file,
- const struct netfs_request_ops *ops,
- void *netfs_priv,
loff_t start, size_t len,
enum netfs_io_origin origin)
{
static atomic_t debug_ids;
+ struct inode *inode = file ? file_inode(file) : mapping->host;
+ struct netfs_i_context *ctx = netfs_i_context(inode);
struct netfs_io_request *rreq;
int ret;
@@ -29,11 +29,10 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
rreq->start = start;
rreq->len = len;
rreq->origin = origin;
- rreq->netfs_ops = ops;
- rreq->netfs_priv = netfs_priv;
+ rreq->netfs_ops = ctx->ops;
rreq->mapping = mapping;
- rreq->inode = file_inode(file);
- rreq->i_size = i_size_read(rreq->inode);
+ rreq->inode = inode;
+ rreq->i_size = i_size_read(inode);
rreq->debug_id = atomic_inc_return(&debug_ids);
INIT_LIST_HEAD(&rreq->subrequests);
INIT_WORK(&rreq->work, netfs_rreq_work);
@@ -76,6 +75,7 @@ static void netfs_free_request(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
+
netfs_clear_subrequests(rreq, false);
if (rreq->netfs_priv)
rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index b5176f4320f4..c048cd328ce5 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -14,7 +14,6 @@
#include <linux/uio.h>
#include <linux/sched/mm.h>
#include <linux/task_io_accounting_ops.h>
-#include <linux/netfs.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/netfs.h>
@@ -735,8 +734,6 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq,
/**
* netfs_readahead - Helper to manage a read request
* @ractl: The description of the readahead request
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
*
* Fulfil a readahead request by drawing data from the cache if possible, or
* the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
@@ -744,35 +741,32 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq,
* readahead window can be expanded in either direction to a more convenient
* alighment for RPC efficiency or to make storage in the cache feasible.
*
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory. It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
*
* This is usable whether or not caching is enabled.
*/
-void netfs_readahead(struct readahead_control *ractl,
- const struct netfs_request_ops *ops,
- void *netfs_priv)
+void netfs_readahead(struct readahead_control *ractl)
{
struct netfs_io_request *rreq;
+ struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
unsigned int debug_index = 0;
int ret;
_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
if (readahead_count(ractl) == 0)
- goto cleanup;
+ return;
rreq = netfs_alloc_request(ractl->mapping, ractl->file,
- ops, netfs_priv,
readahead_pos(ractl),
readahead_length(ractl),
NETFS_READAHEAD);
if (IS_ERR(rreq))
- goto cleanup;
+ return;
- if (ops->begin_cache_operation) {
- ret = ops->begin_cache_operation(rreq);
+ if (ctx->ops->begin_cache_operation) {
+ ret = ctx->ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto cleanup_free;
}
@@ -804,42 +798,35 @@ void netfs_readahead(struct readahead_control *ractl,
cleanup_free:
netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
return;
-cleanup:
- if (netfs_priv)
- ops->cleanup(ractl->mapping, netfs_priv);
- return;
}
EXPORT_SYMBOL(netfs_readahead);
/**
* netfs_readpage - Helper to manage a readpage request
* @file: The file to read from
- * @folio: The folio to read
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
+ * @subpage: A subpage of the folio to read
*
* Fulfil a readpage request by drawing data from the cache if possible, or the
* netfs if not. Space beyond the EOF is zero-filled. Multiple I/O requests
* from different sources will get munged together.
*
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory. It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
*
* This is usable whether or not caching is enabled.
*/
-int netfs_readpage(struct file *file,
- struct folio *folio,
- const struct netfs_request_ops *ops,
- void *netfs_priv)
+int netfs_readpage(struct file *file, struct page *subpage)
{
+ struct folio *folio = page_folio(subpage);
+ struct address_space *mapping = folio->mapping;
struct netfs_io_request *rreq;
+ struct netfs_i_context *ctx = netfs_i_context(mapping->host);
unsigned int debug_index = 0;
int ret;
_enter("%lx", folio_index(folio));
- rreq = netfs_alloc_request(folio->mapping, file, ops, netfs_priv,
+ rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio),
NETFS_READPAGE);
if (IS_ERR(rreq)) {
@@ -847,8 +834,8 @@ int netfs_readpage(struct file *file,
goto alloc_error;
}
- if (ops->begin_cache_operation) {
- ret = ops->begin_cache_operation(rreq);
+ if (ctx->ops->begin_cache_operation) {
+ ret = ctx->ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
folio_unlock(folio);
goto out;
@@ -886,8 +873,6 @@ out:
netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
return ret;
alloc_error:
- if (netfs_priv)
- ops->cleanup(folio_file_mapping(folio), netfs_priv);
folio_unlock(folio);
return ret;
}
@@ -898,6 +883,7 @@ EXPORT_SYMBOL(netfs_readpage);
* @folio: The folio being prepared
* @pos: starting position for the write
* @len: length of write
+ * @always_fill: T if the folio should always be completely filled/cleared
*
* In some cases, write_begin doesn't need to read at all:
* - full folio write
@@ -907,17 +893,27 @@ EXPORT_SYMBOL(netfs_readpage);
* If any of these criteria are met, then zero out the unwritten parts
* of the folio and return true. Otherwise, return false.
*/
-static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len)
+static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
+ bool always_fill)
{
struct inode *inode = folio_inode(folio);
loff_t i_size = i_size_read(inode);
size_t offset = offset_in_folio(folio, pos);
+ size_t plen = folio_size(folio);
+
+ if (unlikely(always_fill)) {
+ if (pos - offset + len <= i_size)
+ return false; /* Page entirely before EOF */
+ zero_user_segment(&folio->page, 0, plen);
+ folio_mark_uptodate(folio);
+ return true;
+ }
/* Full folio write */
- if (offset == 0 && len >= folio_size(folio))
+ if (offset == 0 && len >= plen)
return true;
- /* pos beyond last folio in the file */
+ /* Page entirely beyond the end of the file */
if (pos - offset >= i_size)
goto zero_out;
@@ -927,7 +923,7 @@ static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len)
return false;
zero_out:
- zero_user_segments(&folio->page, 0, offset, offset + len, folio_size(folio));
+ zero_user_segments(&folio->page, 0, offset, offset + len, plen);
return true;
}
@@ -940,8 +936,6 @@ zero_out:
* @aop_flags: AOP_* flags
* @_folio: Where to put the resultant folio
* @_fsdata: Place for the netfs to store a cookie
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
*
* Pre-read data for a write-begin request by drawing data from the cache if
* possible, or the netfs if not. Space beyond the EOF is zero-filled.
@@ -960,17 +954,18 @@ zero_out:
* should go ahead; unlock the folio and return -EAGAIN to cause the folio to
* be regot; or return an error.
*
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
* This is usable whether or not caching is enabled.
*/
int netfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int aop_flags,
- struct folio **_folio, void **_fsdata,
- const struct netfs_request_ops *ops,
- void *netfs_priv)
+ struct folio **_folio, void **_fsdata)
{
struct netfs_io_request *rreq;
+ struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
struct folio *folio;
- struct inode *inode = file_inode(file);
unsigned int debug_index = 0, fgp_flags;
pgoff_t index = pos >> PAGE_SHIFT;
int ret;
@@ -986,9 +981,9 @@ retry:
if (!folio)
return -ENOMEM;
- if (ops->check_write_begin) {
+ if (ctx->ops->check_write_begin) {
/* Allow the netfs (eg. ceph) to flush conflicts. */
- ret = ops->check_write_begin(file, pos, len, folio, _fsdata);
+ ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata);
if (ret < 0) {
trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
if (ret == -EAGAIN)
@@ -1004,13 +999,13 @@ retry:
* within the cache granule containing the EOF, in which case we need
* to preload the granule.
*/
- if (!ops->is_cache_enabled(inode) &&
- netfs_skip_folio_read(folio, pos, len)) {
+ if (!netfs_is_cache_enabled(ctx) &&
+ netfs_skip_folio_read(folio, pos, len, false)) {
netfs_stat(&netfs_n_rh_write_zskip);
goto have_folio_no_wait;
}
- rreq = netfs_alloc_request(mapping, file, ops, netfs_priv,
+ rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio),
NETFS_READ_FOR_WRITE);
if (IS_ERR(rreq)) {
@@ -1019,10 +1014,9 @@ retry:
}
rreq->no_unlock_folio = folio_index(folio);
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
- netfs_priv = NULL;
- if (ops->begin_cache_operation) {
- ret = ops->begin_cache_operation(rreq);
+ if (ctx->ops->begin_cache_operation) {
+ ret = ctx->ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto error_put;
}
@@ -1076,8 +1070,6 @@ have_folio:
if (ret < 0)
goto error;
have_folio_no_wait:
- if (netfs_priv)
- ops->cleanup(mapping, netfs_priv);
*_folio = folio;
_leave(" = 0");
return 0;
@@ -1087,8 +1079,6 @@ error_put:
error:
folio_unlock(folio);
folio_put(folio);
- if (netfs_priv)
- ops->cleanup(mapping, netfs_priv);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c
index 9ae538c85378..5510a7a14a40 100644
--- a/fs/netfs/stats.c
+++ b/fs/netfs/stats.c
@@ -7,7 +7,6 @@
#include <linux/export.h>
#include <linux/seq_file.h>
-#include <linux/netfs.h>
#include "internal.h"
atomic_t netfs_n_rh_readahead;