aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Hildenbrand2020-07-22 11:45:56 +0200
committerHeiko Carstens2020-07-27 10:34:03 +0200
commitf2057b4266a6be469ea0630971cf3cd933e42cce (patch)
tree586d04fcf9fb067b5103c4755f19b30dc5705db5
parentb9ff81003cf1a0b12b8d60b6ef33a34e84dfe7ac (diff)
s390/vmemmap: fallback to PTEs if mapping large PMD fails
Let's fallback to single pages if short on huge pages. No need to stop memory hotplug. Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20200722094558.9828-8-david@redhat.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
-rw-r--r--arch/s390/mm/vmem.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b831f9f9130a..e82a63de19db 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -180,10 +180,10 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
*/
new_page = vmemmap_alloc_block(PMD_SIZE,
NUMA_NO_NODE);
- if (!new_page)
- goto out;
- pmd_val(*pmd) = __pa(new_page) | prot;
- continue;
+ if (new_page) {
+ pmd_val(*pmd) = __pa(new_page) | prot;
+ continue;
+ }
}
pte = vmem_pte_alloc();
if (!pte)