diff options
author | Ard Biesheuvel | 2015-01-22 10:01:40 +0000 |
---|---|---|
committer | Catalin Marinas | 2015-01-22 14:59:25 +0000 |
commit | 60305db9884515ca063474e262b454f6da04e4e2 (patch) | |
tree | cbf4480285dd0e7fc8d13780bac7b50678dbf714 /arch/arm64/kernel | |
parent | da141706aea52c1a9fbd28cb8d289b78819f5436 (diff) |
arm64/efi: move virtmap init to early initcall
Now that the create_mapping() code in mm/mmu.c is able to support
setting up kernel page tables at initcall time, we can move the whole
virtmap creation to arm64_enable_runtime_services() instead of having
a distinct stage during early boot. This also allows us to drop the
arm64-specific EFI_VIRTMAP flag.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/efi.c | 111 | ||||
-rw-r--r-- | arch/arm64/kernel/setup.c | 1 |
2 files changed, 53 insertions, 59 deletions
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index c9cb0fbe7aa4..b42c7b480e1e 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -38,6 +38,19 @@ struct efi_memory_map memmap; static u64 efi_system_table; +static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss; + +static struct mm_struct efi_mm = { + .mm_rb = RB_ROOT, + .pgd = efi_pgd, + .mm_users = ATOMIC_INIT(2), + .mm_count = ATOMIC_INIT(1), + .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), + .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), + .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), + INIT_MM_CONTEXT(efi_mm) +}; + static int uefi_debug __initdata; static int __init uefi_debug_setup(char *str) { @@ -213,6 +226,45 @@ void __init efi_init(void) return; reserve_regions(); + early_memunmap(memmap.map, params.mmap_size); +} + +static bool __init efi_virtmap_init(void) +{ + efi_memory_desc_t *md; + + for_each_efi_memory_desc(&memmap, md) { + u64 paddr, npages, size; + pgprot_t prot; + + if (!(md->attribute & EFI_MEMORY_RUNTIME)) + continue; + if (md->virt_addr == 0) + return false; + + paddr = md->phys_addr; + npages = md->num_pages; + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + + pr_info(" EFI remap 0x%016llx => %p\n", + md->phys_addr, (void *)md->virt_addr); + + /* + * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be + * executable, everything else can be mapped with the XN bits + * set. + */ + if (!is_normal_ram(md)) + prot = __pgprot(PROT_DEVICE_nGnRE); + else if (md->type == EFI_RUNTIME_SERVICES_CODE) + prot = PAGE_KERNEL_EXEC; + else + prot = PAGE_KERNEL; + + create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); + } + return true; } /* @@ -254,7 +306,7 @@ static int __init arm64_enable_runtime_services(void) } set_bit(EFI_SYSTEM_TABLES, &efi.flags); - if (!efi_enabled(EFI_VIRTMAP)) { + if (!efi_virtmap_init()) { pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); return -1; } @@ -283,19 +335,6 @@ static int __init arm64_dmi_init(void) } core_initcall(arm64_dmi_init); -static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss; - -static struct mm_struct efi_mm = { - .mm_rb = RB_ROOT, - .pgd = efi_pgd, - .mm_users = ATOMIC_INIT(2), - .mm_count = ATOMIC_INIT(1), - .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), - .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), - .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), - INIT_MM_CONTEXT(efi_mm) -}; - static void efi_set_pgd(struct mm_struct *mm) { cpu_switch_mm(mm->pgd, mm); @@ -315,47 +354,3 @@ void efi_virtmap_unload(void) efi_set_pgd(current->active_mm); preempt_enable(); } - -void __init efi_virtmap_init(void) -{ - efi_memory_desc_t *md; - - if (!efi_enabled(EFI_BOOT)) - return; - - for_each_efi_memory_desc(&memmap, md) { - u64 paddr, npages, size; - pgprot_t prot; - - if (!(md->attribute & EFI_MEMORY_RUNTIME)) - continue; - if (WARN(md->virt_addr == 0, - "UEFI virtual mapping incomplete or missing -- no entry found for 0x%llx\n", - md->phys_addr)) - return; - - paddr = md->phys_addr; - npages = md->num_pages; - memrange_efi_to_native(&paddr, &npages); - size = npages << PAGE_SHIFT; - - pr_info(" EFI remap 0x%016llx => %p\n", - md->phys_addr, (void *)md->virt_addr); - - /* - * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be - * executable, everything else can be mapped with the XN bits - * set. - */ - if (!is_normal_ram(md)) - prot = __pgprot(PROT_DEVICE_nGnRE); - else if (md->type == EFI_RUNTIME_SERVICES_CODE) - prot = PAGE_KERNEL_EXEC; - else - prot = PAGE_KERNEL; - - create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); - } - set_bit(EFI_VIRTMAP, &efi.flags); - early_memunmap(memmap.map, memmap.map_end - memmap.map); -} diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 207413fe08a0..bb10903887d4 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -382,7 +382,6 @@ void __init setup_arch(char **cmdline_p) paging_init(); request_standard_resources(); - efi_virtmap_init(); early_ioremap_reset(); unflatten_device_tree(); |