aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorPavel Tatashin2021-05-04 18:38:46 -0700
committerLinus Torvalds2021-05-05 11:27:26 -0700
commitf0f4463837da17a89d965dcbe4e411629dbcf308 (patch)
tree03e6eb3f8d8ac8a3eef6cb91cf8e5f8646229321 /mm
parent83c02c23d0747a7bdcd71f99a538aacec94b146c (diff)
mm/gup: return an error on migration failure
When migration failure occurs, we still pin pages, which means that we may pin CMA movable pages which should never be the case. Instead return an error without pinning pages when migration failure happens. No need to retry migrating, because migrate_pages() already retries 10 times. Link: https://lkml.kernel.org/r/20210215161349.246722-4-pasha.tatashin@soleen.com Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: James Morris <jmorris@namei.org> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sasha Levin <sashal@kernel.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Tyler Hicks <tyhicks@linux.microsoft.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 16015718c0df..da08c582c216 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1610,7 +1610,6 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
{
unsigned long i;
bool drain_allow = true;
- bool migrate_allow = true;
LIST_HEAD(cma_page_list);
long ret = nr_pages;
struct page *prev_head, *head;
@@ -1661,17 +1660,15 @@ check_again:
for (i = 0; i < nr_pages; i++)
put_page(pages[i]);
- if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
- (unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
- /*
- * some of the pages failed migration. Do get_user_pages
- * without migration.
- */
- migrate_allow = false;
-
+ ret = migrate_pages(&cma_page_list, alloc_migration_target,
+ NULL, (unsigned long)&mtc, MIGRATE_SYNC,
+ MR_CONTIG_RANGE);
+ if (ret) {
if (!list_empty(&cma_page_list))
putback_movable_pages(&cma_page_list);
+ return ret > 0 ? -ENOMEM : ret;
}
+
/*
* We did migrate all the pages, Try to get the page references
* again migrating any new CMA pages which we failed to isolate
@@ -1681,7 +1678,7 @@ check_again:
pages, vmas, NULL,
gup_flags);
- if ((ret > 0) && migrate_allow) {
+ if (ret > 0) {
nr_pages = ret;
drain_allow = true;
goto check_again;