aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/realmode
diff options
context:
space:
mode:
authorH. Peter Anvin2012-05-16 13:49:10 -0700
committerH. Peter Anvin2012-05-16 13:49:10 -0700
commit137127018812ec7fcccb9843156cfc0b5cfa31d5 (patch)
tree12718f2524a26c5bd26a53518068229d84c41c08 /arch/x86/realmode
parent51edbe6a2f47c78c6c6e529999ee0a044fe59a89 (diff)
x86, realmode: Move kernel/realmode.c to realmode/init.c
Keep all the realmode code together, including initialization (only the rm/ subdirectory is actually built as real-mode code, anyway.) Signed-off-by: H. Peter Anvin <hpa@linux.intel.com> Cc: Jarkko Sakkinen <jarkko.sakkinen@intel.com>
Diffstat (limited to 'arch/x86/realmode')
-rw-r--r--arch/x86/realmode/Makefile1
-rw-r--r--arch/x86/realmode/init.c116
2 files changed, 117 insertions, 0 deletions
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index a05b3aca64ad..94f7fbe97b08 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -9,6 +9,7 @@
subdir- := rm
+obj-y += init.o
obj-y += rmpiggy.o
$(obj)/rmpiggy.o: $(obj)/rm/realmode.bin
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
new file mode 100644
index 000000000000..099277984b80
--- /dev/null
+++ b/arch/x86/realmode/init.c
@@ -0,0 +1,116 @@
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <asm/realmode.h>
+
+struct real_mode_header *real_mode_header;
+u32 *trampoline_cr4_features;
+
+void __init setup_real_mode(void)
+{
+ phys_addr_t mem;
+ u16 real_mode_seg;
+ u32 *rel;
+ u32 count;
+ u32 *ptr;
+ u16 *seg;
+ int i;
+ unsigned char *base;
+ struct trampoline_header *trampoline_header;
+ size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+#ifdef CONFIG_X86_64
+ u64 *trampoline_pgd;
+ u32 efer_low, efer_high;
+#endif
+
+ /* Has to be in very low memory so we can execute real-mode AP code. */
+ mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
+ if (!mem)
+ panic("Cannot allocate trampoline\n");
+
+ base = __va(mem);
+ memblock_reserve(mem, size);
+ real_mode_header = (struct real_mode_header *) base;
+ printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
+ base, (unsigned long long)mem, size);
+
+ memcpy(base, real_mode_blob, size);
+
+ real_mode_seg = __pa(base) >> 4;
+ rel = (u32 *) real_mode_relocs;
+
+ /* 16-bit segment relocations. */
+ count = rel[0];
+ rel = &rel[1];
+ for (i = 0; i < count; i++) {
+ seg = (u16 *) (base + rel[i]);
+ *seg = real_mode_seg;
+ }
+
+ /* 32-bit linear relocations. */
+ count = rel[i];
+ rel = &rel[i + 1];
+ for (i = 0; i < count; i++) {
+ ptr = (u32 *) (base + rel[i]);
+ *ptr += __pa(base);
+ }
+
+ /* Must be perfomed *after* relocation. */
+ trampoline_header = (struct trampoline_header *)
+ __va(real_mode_header->trampoline_header);
+
+#ifdef CONFIG_X86_32
+ trampoline_header->start = __pa(startup_32_smp);
+ trampoline_header->gdt_limit = __BOOT_DS + 7;
+ trampoline_header->gdt_base = __pa(boot_gdt);
+#else
+ /*
+ * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
+ * so we need to mask it out.
+ */
+ rdmsr(MSR_EFER, efer_low, efer_high);
+ trampoline_header->efer_low = efer_low & ~EFER_LMA;
+ trampoline_header->efer_high = efer_high;
+
+ trampoline_header->start = (u64) secondary_startup_64;
+ trampoline_cr4_features = &trampoline_header->cr4;
+ *trampoline_cr4_features = read_cr4();
+
+ trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+ trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
+ trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
+#endif
+}
+
+/*
+ * set_real_mode_permissions() gets called very early, to guarantee the
+ * availability of low memory. This is before the proper kernel page
+ * tables are set up, so we cannot set page permissions in that
+ * function. Thus, we use an arch_initcall instead.
+ */
+static int __init set_real_mode_permissions(void)
+{
+ unsigned char *base = (unsigned char *) real_mode_header;
+ size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+
+ size_t ro_size =
+ PAGE_ALIGN(real_mode_header->ro_end) -
+ __pa(base);
+
+ size_t text_size =
+ PAGE_ALIGN(real_mode_header->ro_end) -
+ real_mode_header->text_start;
+
+ unsigned long text_start =
+ (unsigned long) __va(real_mode_header->text_start);
+
+ set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
+ set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
+ set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
+
+ return 0;
+}
+
+arch_initcall(set_real_mode_permissions);