aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorAneesh Kumar K.V2023-08-08 14:45:01 +0530
committerAndrew Morton2023-08-21 13:37:49 -0700
commit1a8c64e110435e44e71bcd50a75663174b575f22 (patch)
tree374c0671df09f80c8f19eb362d36916ca35dba88 /drivers
parent603fd64dfa45d1e9df996911e4010f2b00731387 (diff)
mm/memory_hotplug: embed vmem_altmap details in memory block
With memmap on memory, some architecture needs more details w.r.t altmap such as base_pfn, end_pfn, etc to unmap vmemmap memory. Instead of computing them again when we remove a memory block, embed vmem_altmap details in struct memory_block if we are using memmap on memory block feature. [yangyingliang@huawei.com: fix error return code in add_memory_resource()] Link: https://lkml.kernel.org/r/20230809081552.1351184-1-yangyingliang@huawei.com Link: https://lkml.kernel.org/r/20230808091501.287660-7-aneesh.kumar@linux.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/memory.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index b456ac213610..8191709c9ad2 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -105,7 +105,8 @@ EXPORT_SYMBOL(unregister_memory_notifier);
static void memory_block_release(struct device *dev)
{
struct memory_block *mem = to_memory_block(dev);
-
+ /* Verify that the altmap is freed */
+ WARN_ON(mem->altmap);
kfree(mem);
}
@@ -183,7 +184,7 @@ static int memory_block_online(struct memory_block *mem)
{
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
+ unsigned long nr_vmemmap_pages = 0;
struct zone *zone;
int ret;
@@ -200,6 +201,9 @@ static int memory_block_online(struct memory_block *mem)
* stage helps to keep accounting easier to follow - e.g vmemmaps
* belong to the same zone as the memory they backed.
*/
+ if (mem->altmap)
+ nr_vmemmap_pages = mem->altmap->free;
+
if (nr_vmemmap_pages) {
ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
if (ret)
@@ -230,7 +234,7 @@ static int memory_block_offline(struct memory_block *mem)
{
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
+ unsigned long nr_vmemmap_pages = 0;
int ret;
if (!mem->zone)
@@ -240,6 +244,9 @@ static int memory_block_offline(struct memory_block *mem)
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
*/
+ if (mem->altmap)
+ nr_vmemmap_pages = mem->altmap->free;
+
if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
-nr_vmemmap_pages);
@@ -726,7 +733,7 @@ void memory_block_add_nid(struct memory_block *mem, int nid,
#endif
static int add_memory_block(unsigned long block_id, unsigned long state,
- unsigned long nr_vmemmap_pages,
+ struct vmem_altmap *altmap,
struct memory_group *group)
{
struct memory_block *mem;
@@ -744,7 +751,7 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
mem->start_section_nr = block_id * sections_per_block;
mem->state = state;
mem->nid = NUMA_NO_NODE;
- mem->nr_vmemmap_pages = nr_vmemmap_pages;
+ mem->altmap = altmap;
INIT_LIST_HEAD(&mem->group_next);
#ifndef CONFIG_NUMA
@@ -783,14 +790,14 @@ static int __init add_boot_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
return add_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, 0, NULL);
+ MEM_ONLINE, NULL, NULL);
}
static int add_hotplug_memory_block(unsigned long block_id,
- unsigned long nr_vmemmap_pages,
+ struct vmem_altmap *altmap,
struct memory_group *group)
{
- return add_memory_block(block_id, MEM_OFFLINE, nr_vmemmap_pages, group);
+ return add_memory_block(block_id, MEM_OFFLINE, altmap, group);
}
static void remove_memory_block(struct memory_block *memory)
@@ -818,7 +825,7 @@ static void remove_memory_block(struct memory_block *memory)
* Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size,
- unsigned long vmemmap_pages,
+ struct vmem_altmap *altmap,
struct memory_group *group)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
@@ -832,7 +839,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = add_hotplug_memory_block(block_id, vmemmap_pages, group);
+ ret = add_hotplug_memory_block(block_id, altmap, group);
if (ret)
break;
}