aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/amdtopology_64.c8
-rw-r--r--arch/x86/mm/init.c8
-rw-r--r--arch/x86/mm/init_64.c54
-rw-r--r--arch/x86/mm/numa_64.c6
-rw-r--r--arch/x86/mm/srat_64.c2
5 files changed, 66 insertions, 12 deletions
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index 51fae9cfdecb..ae6ad691a14a 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -221,12 +221,14 @@ int __init amd_scan_nodes(void)
apicid_base = boot_cpu_physical_apicid;
}
- for_each_node_mask(i, node_possible_map) {
- int j;
-
+ for_each_node_mask(i, node_possible_map)
memblock_x86_register_active_regions(i,
nodes[i].start >> PAGE_SHIFT,
nodes[i].end >> PAGE_SHIFT);
+ init_memory_mapping_high();
+ for_each_node_mask(i, node_possible_map) {
+ int j;
+
for (j = apicid_base; j < cores + apicid_base; j++)
apicid_to_node[(i << bits) + j] = i;
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 5863950ebe0c..fa6fe756d912 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -65,16 +65,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
#ifdef CONFIG_X86_32
/* for fixmap */
tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
-#endif
- /*
- * RED-PEN putting page tables only on node 0 could
- * cause a hotspot and fill up ZONE_DMA. The page tables
- * need roughly 0.5KB per GB.
- */
-#ifdef CONFIG_X86_32
good_end = max_pfn_mapped << PAGE_SHIFT;
#endif
+
base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
if (base == MEMBLOCK_ERROR)
panic("Cannot find space for the kernel page tables");
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 024847dc81ab..194f2732ab77 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -607,9 +607,63 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
int acpi, int k8)
{
memblock_x86_register_active_regions(0, start_pfn, end_pfn);
+ init_memory_mapping_high();
}
#endif
+struct mapping_work_data {
+ unsigned long start;
+ unsigned long end;
+ unsigned long pfn_mapped;
+};
+
+static int __init_refok
+mapping_work_fn(unsigned long start_pfn, unsigned long end_pfn, void *datax)
+{
+ struct mapping_work_data *data = datax;
+ unsigned long pfn_mapped;
+ unsigned long final_start, final_end;
+
+ final_start = max_t(unsigned long, start_pfn<<PAGE_SHIFT, data->start);
+ final_end = min_t(unsigned long, end_pfn<<PAGE_SHIFT, data->end);
+
+ if (final_end <= final_start)
+ return 0;
+
+ pfn_mapped = init_memory_mapping(final_start, final_end);
+
+ if (pfn_mapped > data->pfn_mapped)
+ data->pfn_mapped = pfn_mapped;
+
+ return 0;
+}
+
+static unsigned long __init_refok
+init_memory_mapping_active_regions(unsigned long start, unsigned long end)
+{
+ struct mapping_work_data data;
+
+ data.start = start;
+ data.end = end;
+ data.pfn_mapped = 0;
+
+ work_with_active_regions(MAX_NUMNODES, mapping_work_fn, &data);
+
+ return data.pfn_mapped;
+}
+
+void __init_refok init_memory_mapping_high(void)
+{
+ if (max_pfn > max_low_pfn) {
+ max_pfn_mapped = init_memory_mapping_active_regions(1UL<<32,
+ max_pfn<<PAGE_SHIFT);
+ /* can we preserve max_low_pfn ? */
+ max_low_pfn = max_pfn;
+
+ memblock.current_limit = get_max_mapped();
+ }
+}
+
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 02d36ff85ebd..7cc26ae0a15d 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -590,11 +590,12 @@ static int __init numa_emulation(unsigned long start_pfn,
* the e820 memory map.
*/
remove_all_active_ranges();
- for_each_node_mask(i, node_possible_map) {
+ for_each_node_mask(i, node_possible_map)
memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
nodes[i].end >> PAGE_SHIFT);
+ init_memory_mapping_high();
+ for_each_node_mask(i, node_possible_map)
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
- }
acpi_fake_nodes(nodes, num_nodes);
numa_init_array();
return 0;
@@ -645,6 +646,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
for (i = 0; i < nr_cpu_ids; i++)
numa_set_node(i, 0);
memblock_x86_register_active_regions(0, start_pfn, last_pfn);
+ init_memory_mapping_high();
setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
}
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index a35cb9d8b060..0b961c8bffb4 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -433,6 +433,8 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
return -1;
}
+ init_memory_mapping_high();
+
/* Account for nodes with cpus and no memory */
nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);