aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds2019-07-19 09:45:58 -0700
committerLinus Torvalds2019-07-19 09:45:58 -0700
commit249be8511b269495bc95cb8bdfdd5840b2ba73c0 (patch)
tree6920bde053faa0284b52b2a9c9695f5516520377 /arch
parent3bfe1fc46794631366faa3ef075e1b0ff7ba120a (diff)
parenteec4844fae7c033a0c1fc1eb3b8517aeb8b6cc49 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge yet more updates from Andrew Morton: "The rest of MM and a kernel-wide procfs cleanup. Summary of the more significant patches: - Patch series "mm/memory_hotplug: Factor out memory block devicehandling", v3. David Hildenbrand. Some spring-cleaning of the memory hotplug code, notably in drivers/base/memory.c - "mm: thp: fix false negative of shmem vma's THP eligibility". Yang Shi. Fix /proc/pid/smaps output for THP pages used in shmem. - "resource: fix locking in find_next_iomem_res()" + 1. Nadav Amit. Bugfix and speedup for kernel/resource.c - Patch series "mm: Further memory block device cleanups", David Hildenbrand. More spring-cleaning of the memory hotplug code. - Patch series "mm: Sub-section memory hotplug support". Dan Williams. Generalise the memory hotplug code so that pmem can use it more completely. Then remove the hacks from the libnvdimm code which were there to work around the memory-hotplug code's constraints. - "proc/sysctl: add shared variables for range check", Matteo Croce. We have about 250 instances of int zero; ... .extra1 = &zero, in the tree. This is a tree-wide sweep to make all those private "zero"s and "one"s use global variables. Alas, it isn't practical to make those two global integers const" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (38 commits) proc/sysctl: add shared variables for range check mm: migrate: remove unused mode argument mm/sparsemem: cleanup 'section number' data types libnvdimm/pfn: stop padding pmem namespaces to section alignment libnvdimm/pfn: fix fsdax-mode namespace info-block zero-fields mm/devm_memremap_pages: enable sub-section remap mm: document ZONE_DEVICE memory-model implications mm/sparsemem: support sub-section hotplug mm/sparsemem: prepare for sub-section ranges mm: kill is_dev_zone() helper mm/hotplug: kill is_dev_zone() usage in __remove_pages() mm/sparsemem: convert kmalloc_section_memmap() to populate_section_memmap() mm/hotplug: prepare shrink_{zone, pgdat}_span for sub-section removal mm/sparsemem: add helpers track active portions of a section at boot mm/sparsemem: introduce a SECTION_IS_EARLY flag mm/sparsemem: introduce struct mem_section_usage drivers/base/memory.c: get rid of find_memory_block_hinted() mm/memory_hotplug: move and simplify walk_memory_blocks() mm/memory_hotplug: rename walk_memory_range() and pass start+size instead of pfns mm: make register_mem_sect_under_node() static ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/mm/mmu.c17
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c23
-rw-r--r--arch/s390/appldata/appldata_base.c15
-rw-r--r--arch/s390/kernel/topology.c6
-rw-r--r--arch/s390/mm/init.c18
-rw-r--r--arch/sh/mm/init.c2
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c7
-rw-r--r--arch/x86/kernel/itmt.c6
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--arch/x86/mm/init_64.c6
12 files changed, 52 insertions, 54 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e661469cabdd..750a69dde39b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1074,4 +1074,21 @@ int arch_add_memory(int nid, u64 start, u64 size,
return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
restrictions);
}
+void arch_remove_memory(int nid, u64 start, u64 size,
+ struct vmem_altmap *altmap)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ struct zone *zone;
+
+ /*
+ * FIXME: Cleanup page tables (also in arch_add_memory() in case
+ * adding fails). Until then, this function should only be used
+ * during memory hotplug (adding memory), not for memory
+ * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
+ * unlocked yet.
+ */
+ zone = page_zone(pfn_to_page(start_pfn));
+ __remove_pages(zone, start_pfn, nr_pages, altmap);
+}
#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index d28e29103bdb..aae75fd7b810 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -681,7 +681,6 @@ int arch_add_memory(int nid, u64 start, u64 size,
return ret;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
void arch_remove_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap)
{
@@ -693,4 +692,3 @@ void arch_remove_memory(int nid, u64 start, u64 size,
__remove_pages(zone, start_pfn, nr_pages, altmap);
}
#endif
-#endif
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 26a8da3723bb..9259337d7374 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -125,7 +125,6 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
return __add_pages(nid, start_pfn, nr_pages, restrictions);
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
void __ref arch_remove_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap)
{
@@ -151,7 +150,6 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
pr_warn("Hash collision while resizing HPT\n");
}
#endif
-#endif /* CONFIG_MEMORY_HOTPLUG */
#ifndef CONFIG_NEED_MULTIPLE_NODES
void __init mem_topology_setup(void)
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index 5e53c1392d3b..eb2e75dac369 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -70,23 +70,23 @@ static int change_memblock_state(struct memory_block *mem, void *arg)
/* called with device_hotplug_lock held */
static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
{
- u64 end_pfn = start_pfn + nr_pages - 1;
+ const unsigned long start = PFN_PHYS(start_pfn);
+ const unsigned long size = PFN_PHYS(nr_pages);
- if (walk_memory_range(start_pfn, end_pfn, NULL,
- check_memblock_online))
+ if (walk_memory_blocks(start, size, NULL, check_memblock_online))
return false;
- walk_memory_range(start_pfn, end_pfn, (void *)MEM_GOING_OFFLINE,
- change_memblock_state);
+ walk_memory_blocks(start, size, (void *)MEM_GOING_OFFLINE,
+ change_memblock_state);
if (offline_pages(start_pfn, nr_pages)) {
- walk_memory_range(start_pfn, end_pfn, (void *)MEM_ONLINE,
- change_memblock_state);
+ walk_memory_blocks(start, size, (void *)MEM_ONLINE,
+ change_memblock_state);
return false;
}
- walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
- change_memblock_state);
+ walk_memory_blocks(start, size, (void *)MEM_OFFLINE,
+ change_memblock_state);
return true;
@@ -242,9 +242,8 @@ static int memtrace_online(void)
*/
if (!memhp_auto_online) {
lock_device_hotplug();
- walk_memory_range(PFN_DOWN(ent->start),
- PFN_UP(ent->start + ent->size - 1),
- NULL, online_mem_block);
+ walk_memory_blocks(ent->start, ent->size, NULL,
+ online_mem_block);
unlock_device_hotplug();
}
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index e4b58240ec53..aa738cad1338 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -220,15 +220,13 @@ appldata_timer_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int timer_active = appldata_timer_active;
- int zero = 0;
- int one = 1;
int rc;
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &timer_active,
.maxlen = sizeof(int),
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
};
rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
@@ -255,13 +253,12 @@ appldata_interval_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int interval = appldata_interval;
- int one = 1;
int rc;
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &interval,
.maxlen = sizeof(int),
- .extra1 = &one,
+ .extra1 = SYSCTL_ONE,
};
rc = proc_dointvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
@@ -289,13 +286,11 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
struct list_head *lh;
int rc, found;
int active;
- int zero = 0;
- int one = 1;
struct ctl_table ctl_entry = {
.data = &active,
.maxlen = sizeof(int),
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
};
found = 0;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 8964a3f60aad..2db6fb405a9a 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -587,15 +587,13 @@ static int topology_ctl_handler(struct ctl_table *ctl, int write,
{
int enabled = topology_is_enabled();
int new_mode;
- int zero = 0;
- int one = 1;
int rc;
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &enabled,
.maxlen = sizeof(int),
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
};
rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index f0bee6af3960..4e5bbe328594 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -273,6 +273,9 @@ int arch_add_memory(int nid, u64 start, u64 size,
unsigned long size_pages = PFN_DOWN(size);
int rc;
+ if (WARN_ON_ONCE(restrictions->altmap))
+ return -EINVAL;
+
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
@@ -283,16 +286,15 @@ int arch_add_memory(int nid, u64 start, u64 size,
return rc;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
void arch_remove_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap)
{
- /*
- * There is no hardware or firmware interface which could trigger a
- * hot memory remove on s390. So there is nothing that needs to be
- * implemented.
- */
- BUG();
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ struct zone *zone;
+
+ zone = page_zone(pfn_to_page(start_pfn));
+ __remove_pages(zone, start_pfn, nr_pages, altmap);
+ vmem_remove_mapping(start, size);
}
-#endif
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 13c6a6bb5fd9..dfdbaa50946e 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -429,7 +429,6 @@ int memory_add_physaddr_to_nid(u64 addr)
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
-#ifdef CONFIG_MEMORY_HOTREMOVE
void arch_remove_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap)
{
@@ -440,5 +439,4 @@ void arch_remove_memory(int nid, u64 start, u64 size,
zone = page_zone(pfn_to_page(start_pfn));
__remove_pages(zone, start_pfn, nr_pages, altmap);
}
-#endif
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 42d4c89f990e..240626e7f55a 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -65,9 +65,6 @@ subsys_initcall(sysenter_setup);
/* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h>
-static const int zero;
-static const int one = 1;
-
static struct ctl_table abi_table2[] = {
{
.procname = "vsyscall32",
@@ -75,8 +72,8 @@ static struct ctl_table abi_table2[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = (int *)&zero,
- .extra2 = (int *)&one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{}
};
diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c
index 838cf8a32c49..1cb3ca9bba49 100644
--- a/arch/x86/kernel/itmt.c
+++ b/arch/x86/kernel/itmt.c
@@ -65,8 +65,6 @@ static int sched_itmt_update_handler(struct ctl_table *table, int write,
return ret;
}
-static unsigned int zero;
-static unsigned int one = 1;
static struct ctl_table itmt_kern_table[] = {
{
.procname = "sched_itmt_enabled",
@@ -74,8 +72,8 @@ static struct ctl_table itmt_kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_itmt_update_handler,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{}
};
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index f265a4316179..4068abb9427f 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -860,7 +860,6 @@ int arch_add_memory(int nid, u64 start, u64 size,
return __add_pages(nid, start_pfn, nr_pages, restrictions);
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
void arch_remove_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap)
{
@@ -872,7 +871,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
__remove_pages(zone, start_pfn, nr_pages, altmap);
}
#endif
-#endif
int kernel_set_to_readonly __read_mostly;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 08bbf648827b..a6b5c653727b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1198,7 +1198,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
remove_pagetable(start, end, false, altmap);
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
static void __meminit
kernel_physical_mapping_remove(unsigned long start, unsigned long end)
{
@@ -1219,7 +1218,6 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
__remove_pages(zone, start_pfn, nr_pages, altmap);
kernel_physical_mapping_remove(start, start + size);
}
-#endif
#endif /* CONFIG_MEMORY_HOTPLUG */
static struct kcore_list kcore_vsyscall;
@@ -1520,7 +1518,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
{
int err;
- if (boot_cpu_has(X86_FEATURE_PSE))
+ if (end - start < PAGES_PER_SECTION * sizeof(struct page))
+ err = vmemmap_populate_basepages(start, end, node);
+ else if (boot_cpu_has(X86_FEATURE_PSE))
err = vmemmap_populate_hugepages(start, end, node, altmap);
else if (altmap) {
pr_err_once("%s: no cpu support for altmap allocations\n",