diff options
author | Matthew Wilcox (Oracle) | 2021-02-25 17:16:03 -0800 |
---|---|---|
committer | Linus Torvalds | 2021-02-26 09:40:59 -0800 |
commit | 31d270fd98d196578223e5b568a0bd3bc6028b09 (patch) | |
tree | cd7eb19245298cf2ac5794bdbbefff77afdce0fe /mm | |
parent | ca122fe40eb463c8c11c3bfc1914f0048ca5c268 (diff) |
mm: add an 'end' parameter to pagevec_lookup_entries
Simplifies the callers and uses the existing functionality in
find_get_entries(). We can also drop the final argument of
truncate_exceptional_pvec_entries() and simplify the logic in that
function.
Link: https://lkml.kernel.org/r/20201112212641.27837-12-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/swap.c | 8 | ||||
-rw-r--r-- | mm/truncate.c | 41 |
2 files changed, 14 insertions, 35 deletions
diff --git a/mm/swap.c b/mm/swap.c index c5773a84feab..db8c354264a5 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1022,6 +1022,7 @@ void __pagevec_lru_add(struct pagevec *pvec) * @pvec: Where the resulting entries are placed * @mapping: The address_space to search * @start: The starting entry index + * @end: The highest index to return (inclusive). * @nr_entries: The maximum number of pages * @indices: The cache indices corresponding to the entries in @pvec * @@ -1042,11 +1043,10 @@ void __pagevec_lru_add(struct pagevec *pvec) * found. */ unsigned pagevec_lookup_entries(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t start, unsigned nr_entries, - pgoff_t *indices) + struct address_space *mapping, pgoff_t start, pgoff_t end, + unsigned nr_entries, pgoff_t *indices) { - pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries, + pvec->nr = find_get_entries(mapping, start, end, nr_entries, pvec->pages, indices); return pagevec_count(pvec); } diff --git a/mm/truncate.c b/mm/truncate.c index de7f4f47f780..60df23890c2d 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -57,11 +57,10 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, * exceptional entries similar to what pagevec_remove_exceptionals does. */ static void truncate_exceptional_pvec_entries(struct address_space *mapping, - struct pagevec *pvec, pgoff_t *indices, - pgoff_t end) + struct pagevec *pvec, pgoff_t *indices) { int i, j; - bool dax, lock; + bool dax; /* Handled by shmem itself */ if (shmem_mapping(mapping)) @@ -75,8 +74,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, return; dax = dax_mapping(mapping); - lock = !dax && indices[j] < end; - if (lock) + if (!dax) xa_lock_irq(&mapping->i_pages); for (i = j; i < pagevec_count(pvec); i++) { @@ -88,9 +86,6 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, continue; } - if (index >= end) - continue; - if (unlikely(dax)) { dax_delete_mapping_entry(mapping, index); continue; @@ -99,7 +94,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, __clear_shadow_entry(mapping, index, page); } - if (lock) + if (!dax) xa_unlock_irq(&mapping->i_pages); pvec->nr = j; } @@ -329,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping, while (index < end && find_lock_entries(mapping, index, end - 1, &pvec, indices)) { index = indices[pagevec_count(&pvec) - 1] + 1; - truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); + truncate_exceptional_pvec_entries(mapping, &pvec, indices); for (i = 0; i < pagevec_count(&pvec); i++) truncate_cleanup_page(mapping, pvec.pages[i]); delete_from_page_cache_batch(mapping, &pvec); @@ -381,8 +376,8 @@ void truncate_inode_pages_range(struct address_space *mapping, index = start; for ( ; ; ) { cond_resched(); - if (!pagevec_lookup_entries(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { + if (!pagevec_lookup_entries(&pvec, mapping, index, end - 1, + PAGEVEC_SIZE, indices)) { /* If all gone from start onwards, we're done */ if (index == start) break; @@ -390,23 +385,12 @@ void truncate_inode_pages_range(struct address_space *mapping, index = start; continue; } - if (index == start && indices[0] >= end) { - /* All gone out of hole to be punched, we're done */ - pagevec_remove_exceptionals(&pvec); - pagevec_release(&pvec); - break; - } for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; /* We rely upon deletion not changing page->index */ index = indices[i]; - if (index >= end) { - /* Restart punch to make sure all gone */ - index = start - 1; - break; - } if (xa_is_value(page)) continue; @@ -417,7 +401,7 @@ void truncate_inode_pages_range(struct address_space *mapping, truncate_inode_page(mapping, page); unlock_page(page); } - truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); + truncate_exceptional_pvec_entries(mapping, &pvec, indices); pagevec_release(&pvec); index++; } @@ -513,8 +497,6 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping, /* We rely upon deletion not changing page->index */ index = indices[i]; - if (index > end) - break; if (xa_is_value(page)) { invalidate_exceptional_entry(mapping, index, @@ -656,16 +638,13 @@ int invalidate_inode_pages2_range(struct address_space *mapping, pagevec_init(&pvec); index = start; - while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, - indices)) { + while (pagevec_lookup_entries(&pvec, mapping, index, end, + PAGEVEC_SIZE, indices)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; /* We rely upon deletion not changing page->index */ index = indices[i]; - if (index > end) - break; if (xa_is_value(page)) { if (!invalidate_exceptional_entry2(mapping, |