aboutsummaryrefslogtreecommitdiff
path: root/mm/swap.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle)2023-01-16 19:28:27 +0000
committerAndrew Morton2023-02-02 22:33:20 -0800
commite0650a41f7d024b72669a2a2db846ef70281abd8 (patch)
tree05a70d4702acffd853add01168d96f8a2dd07a49 /mm/swap.c
parent672aa27d0bd241759376e62b78abb8aae1792479 (diff)
mm: clean up mlock_page / munlock_page references in comments
Change documentation and comments that refer to now-renamed functions. Link: https://lkml.kernel.org/r/20230116192827.2146732-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 5e4f92700c16..2a51faa34e64 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -201,7 +201,7 @@ static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
* Is an smp_mb__after_atomic() still required here, before
* folio_evictable() tests the mlocked flag, to rule out the possibility
* of stranding an evictable folio on an unevictable LRU? I think
- * not, because __munlock_page() only clears the mlocked flag
+ * not, because __munlock_folio() only clears the mlocked flag
* while the LRU lock is held.
*
* (That is not true of __page_cache_release(), and not necessarily
@@ -216,7 +216,7 @@ static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
folio_set_unevictable(folio);
/*
* folio->mlock_count = !!folio_test_mlocked(folio)?
- * But that leaves __mlock_page() in doubt whether another
+ * But that leaves __mlock_folio() in doubt whether another
* actor has already counted the mlock or not. Err on the
* safe side, underestimate, let page reclaim fix it, rather
* than leaving a page on the unevictable LRU indefinitely.