aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/locking.rst10
-rw-r--r--Documentation/filesystems/vfs.rst8
-rw-r--r--include/linux/fs.h5
-rw-r--r--mm/truncate.c8
4 files changed, 17 insertions, 14 deletions
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index 8e9cbc0fb70f..dee512efb458 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -257,7 +257,7 @@ prototypes::
bool (*isolate_page) (struct page *, isolate_mode_t);
int (*migratepage)(struct address_space *, struct page *, struct page *);
void (*putback_page) (struct page *);
- int (*launder_page)(struct page *);
+ int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count);
int (*error_remove_page)(struct address_space *, struct page *);
int (*swap_activate)(struct file *);
@@ -285,7 +285,7 @@ direct_IO:
isolate_page: yes
migratepage: yes (both)
putback_page: yes
-launder_page: yes
+launder_folio: yes
is_partially_uptodate: yes
error_remove_page: yes
swap_activate: no
@@ -385,9 +385,9 @@ the kernel assumes that the fs has no private interest in the buffers.
->freepage() is called when the kernel is done dropping the page
from the page cache.
-->launder_page() may be called prior to releasing a page if
-it is still found to be dirty. It returns zero if the page was successfully
-cleaned, or an error value if not. Note that in order to prevent the page
+->launder_folio() may be called prior to releasing a folio if
+it is still found to be dirty. It returns zero if the folio was successfully
+cleaned, or an error value if not. Note that in order to prevent the folio
getting mapped back in and redirtied, it needs to be kept locked
across the entire operation.
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index 28704831652c..c54ca4d88ed6 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -745,7 +745,7 @@ cache in your filesystem. The following members are defined:
int (*migratepage) (struct page *, struct page *);
/* put migration-failed page back to right list */
void (*putback_page) (struct page *);
- int (*launder_page) (struct page *);
+ int (*launder_folio) (struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
@@ -930,9 +930,9 @@ cache in your filesystem. The following members are defined:
``putback_page``
Called by the VM when isolated page's migration fails.
-``launder_page``
- Called before freeing a page - it writes back the dirty page.
- To prevent redirtying the page, it is kept locked during the
+``launder_folio``
+ Called before freeing a folio - it writes back the dirty folio.
+ To prevent redirtying the folio, it is kept locked during the
whole operation.
``is_partially_uptodate``
diff --git a/include/linux/fs.h b/include/linux/fs.h
index af9ae091bd82..0af3075cdff2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -399,7 +399,10 @@ struct address_space_operations {
struct page *, struct page *, enum migrate_mode);
bool (*isolate_page)(struct page *, isolate_mode_t);
void (*putback_page)(struct page *);
- int (*launder_page) (struct page *);
+ union {
+ int (*launder_page) (struct page *);
+ int (*launder_folio) (struct folio *);
+ };
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct page *, bool *, bool *);
diff --git a/mm/truncate.c b/mm/truncate.c
index 8010461a59bd..6ad44b546dff 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -614,13 +614,13 @@ failed:
return 0;
}
-static int do_launder_folio(struct address_space *mapping, struct folio *folio)
+static int folio_launder(struct address_space *mapping, struct folio *folio)
{
if (!folio_test_dirty(folio))
return 0;
- if (folio->mapping != mapping || mapping->a_ops->launder_page == NULL)
+ if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
return 0;
- return mapping->a_ops->launder_page(&folio->page);
+ return mapping->a_ops->launder_folio(folio);
}
/**
@@ -686,7 +686,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
unmap_mapping_folio(folio);
BUG_ON(folio_mapped(folio));
- ret2 = do_launder_folio(mapping, folio);
+ ret2 = folio_launder(mapping, folio);
if (ret2 == 0) {
if (!invalidate_complete_folio2(mapping, folio))
ret2 = -EBUSY;