diff options
author | Ard Biesheuvel | 2015-03-19 16:42:27 +0000 |
---|---|---|
committer | Will Deacon | 2015-03-23 11:35:29 +0000 |
commit | dd006da21646f1c86f0242eb8f527d093303127a (patch) | |
tree | b67ea3943e04f7d6b643b0f38c4cba7f3db42ace /arch/arm64/kernel | |
parent | 6232cfd0fa01fe392df0b18a3a06628f130b83b2 (diff) |
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/head.S | 37 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 1 |
2 files changed, 38 insertions, 0 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 1fdf42041f42..51c9811e683c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -383,6 +383,43 @@ __create_page_tables: */ mov x0, x25 // idmap_pg_dir adrp x3, KERNEL_START // __pa(KERNEL_START) + +#ifndef CONFIG_ARM64_VA_BITS_48 +#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) +#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) + + /* + * If VA_BITS < 48, it may be too small to allow for an ID mapping to be + * created that covers system RAM if that is located sufficiently high + * in the physical address space. So for the ID map, use an extended + * virtual range in that case, by configuring an additional translation + * level. + * First, we have to verify our assumption that the current value of + * VA_BITS was chosen such that all translation levels are fully + * utilised, and that lowering T0SZ will always result in an additional + * translation level to be configured. + */ +#if VA_BITS != EXTRA_SHIFT +#error "Mismatch between VA_BITS and page size/number of translation levels" +#endif + + /* + * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the + * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), + * this number conveniently equals the number of leading zeroes in + * the physical address of KERNEL_END. + */ + adrp x5, KERNEL_END + clz x5, x5 + cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? + b.ge 1f // .. then skip additional level + + str_l x5, idmap_t0sz, x6 + + create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 +1: +#endif + create_pgd_entry x0, x3, x5, x6 mov x5, x3 // __pa(KERNEL_START) adr_l x6, KERNEL_END // __pa(KERNEL_END) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 4257369341e4..ffe8e1b814e0 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -151,6 +151,7 @@ asmlinkage void secondary_start_kernel(void) */ cpu_set_reserved_ttbr0(); flush_tlb_all(); + cpu_set_default_tcr_t0sz(); preempt_disable(); trace_hardirqs_off(); |