aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHari Bathini2019-09-11 20:24:28 +0530
committerMichael Ellerman2019-09-14 00:04:44 +1000
commit579ca1a27675485a99da50cd7fedc14232f817c3 (patch)
treebe16c5deaab9899b677ef3fcfa8e838591368468
parentfbcafdaea2e234d3c6d79e7f5605a2e8373f6678 (diff)
powerpc/fadump: make use of memblock's bottom up allocation mode
Earlier, memblock_find_in_range() was not used to find the memory to be reserved for FADump as bottom up allocation mode was not supported. But since commit 79442ed189acb8b ("mm/memblock.c: introduce bottom-up allocation mode") bottom up allocation mode is supported for memblock. So, use it to find the memory to be reserved for FADump. Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/156821364211.5656.14336025460336135194.stgit@hbathini.in.ibm.com
-rw-r--r--arch/powerpc/kernel/fadump.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 852ac4761e90..da751402c649 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -342,7 +342,8 @@ static void __init fadump_reserve_crash_area(unsigned long base,
int __init fadump_reserve_mem(void)
{
- u64 base, size, mem_boundary;
+ bool is_memblock_bottom_up = memblock_bottom_up();
+ u64 base, size, mem_boundary, align = PAGE_SIZE;
int ret = 1;
if (!fw_dump.fadump_enabled)
@@ -362,10 +363,11 @@ int __init fadump_reserve_mem(void)
fw_dump.boot_memory_size =
PAGE_ALIGN(fadump_calculate_reserve_size());
#ifdef CONFIG_CMA
- if (!fw_dump.nocma)
+ if (!fw_dump.nocma) {
+ align = FADUMP_CMA_ALIGNMENT;
fw_dump.boot_memory_size =
- ALIGN(fw_dump.boot_memory_size,
- FADUMP_CMA_ALIGNMENT);
+ ALIGN(fw_dump.boot_memory_size, align);
+ }
#endif
}
@@ -419,19 +421,15 @@ int __init fadump_reserve_mem(void)
} else {
/*
* Reserve memory at an offset closer to bottom of the RAM to
- * minimize the impact of memory hot-remove operation. We can't
- * use memblock_find_in_range() here since it doesn't allocate
- * from bottom to top.
+ * minimize the impact of memory hot-remove operation.
*/
- while (base <= (mem_boundary - size)) {
- if (memblock_is_region_memory(base, size) &&
- !memblock_is_region_reserved(base, size))
- break;
+ memblock_set_bottom_up(true);
+ base = memblock_find_in_range(base, mem_boundary, size, align);
- base += size;
- }
+ /* Restore the previous allocation mode */
+ memblock_set_bottom_up(is_memblock_bottom_up);
- if (base > (mem_boundary - size)) {
+ if (!base) {
pr_err("Failed to find memory chunk for reservation!\n");
goto error_out;
}