From e6a2e1b6c24a3993ffbb69a05dda202d2830ad90 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sun, 1 Mar 2015 10:14:39 +0000 Subject: arm64: mm: unexport set_memory_ro and set_memory_rw This effectively unexports set_memory_ro and set_memory_rw functions from commit 11d91a770f1f ("arm64: Add CONFIG_DEBUG_SET_MODULE_RONX support"). No module user of those is in mainline kernel and we explicitly do not want modules to use these functions, as they i.e. RO-protect eBPF (interpreted and JIT'ed) images from malicious modifications/bugs. Outside of eBPF scope, I believe also other set_memory_* functions should be unexported on arm64 due to non-existant mainline module user. Laura mentioned that they have some uses for modules doing set_memory_*, but none that are in mainline and it's unclear if they would ever get there. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: Laura Abbott Signed-off-by: Will Deacon --- arch/arm64/mm/pageattr.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 1d3ec3ddd84b..e47ed1c5dce1 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -73,7 +73,6 @@ int set_memory_ro(unsigned long addr, int numpages) __pgprot(PTE_RDONLY), __pgprot(PTE_WRITE)); } -EXPORT_SYMBOL_GPL(set_memory_ro); int set_memory_rw(unsigned long addr, int numpages) { @@ -81,7 +80,6 @@ int set_memory_rw(unsigned long addr, int numpages) __pgprot(PTE_WRITE), __pgprot(PTE_RDONLY)); } -EXPORT_SYMBOL_GPL(set_memory_rw); int set_memory_nx(unsigned long addr, int numpages) { -- cgit v1.2.3 From b63dbef93f91d56cb4385fdd8d1765201d451136 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 4 Mar 2015 13:27:35 +0000 Subject: arm64: fixmap: check idx is definitely valid Fixmap indices are in the interval (FIX_HOLE, __end_of_fixed_addresses), but in __set_fixmap we only check idx <= __end_of_fixed_addresses, and therefore indices <= FIX_HOLE are erroneously accepted. If called with such an idx, __set_fixmap may corrupt page tables outside of the fixmap region. This patch ensures that we validate the idx against both endpoints of the interval. Cc: Catalin Marinas Cc: Kees Cook Acked-by: Ard Biesheuvel Acked-by: Laura Abbott Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c6daaf6c6f97..c9267acb699c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -627,10 +627,7 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long addr = __fix_to_virt(idx); pte_t *pte; - if (idx >= __end_of_fixed_addresses) { - BUG(); - return; - } + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); pte = fixmap_pte(addr); -- cgit v1.2.3 From dd006da21646f1c86f0242eb8f527d093303127a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 19 Mar 2015 16:42:27 +0000 Subject: arm64: mm: increase VA range of identity map The page size and the number of translation levels, and hence the supported virtual address range, are build-time configurables on arm64 whose optimal values are use case dependent. However, in the current implementation, if the system's RAM is located at a very high offset, the virtual address range needs to reflect that merely because the identity mapping, which is only used to enable or disable the MMU, requires the extended virtual range to map the physical memory at an equal virtual offset. This patch relaxes that requirement, by increasing the number of translation levels for the identity mapping only, and only when actually needed, i.e., when system RAM's offset is found to be out of reach at runtime. Tested-by: Laura Abbott Reviewed-by: Catalin Marinas Tested-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu_context.h | 43 ++++++++++++++++++++++++++++++++++ arch/arm64/include/asm/page.h | 6 +++-- arch/arm64/include/asm/pgtable-hwdef.h | 7 +++++- arch/arm64/kernel/head.S | 37 +++++++++++++++++++++++++++++ arch/arm64/kernel/smp.c | 1 + arch/arm64/mm/mmu.c | 7 +++++- arch/arm64/mm/proc-macros.S | 10 ++++++++ arch/arm64/mm/proc.S | 3 +++ 8 files changed, 110 insertions(+), 4 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index a9eee33dfa62..ecf2d060036b 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void) : "r" (ttbr)); } +/* + * TCR.T0SZ value to use when the ID map is active. Usually equals + * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in + * physical memory, in which case it will be smaller. + */ +extern u64 idmap_t0sz; + +static inline bool __cpu_uses_extended_idmap(void) +{ + return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) && + unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS))); +} + +static inline void __cpu_set_tcr_t0sz(u64 t0sz) +{ + unsigned long tcr; + + if (__cpu_uses_extended_idmap()) + asm volatile ( + " mrs %0, tcr_el1 ;" + " bfi %0, %1, %2, %3 ;" + " msr tcr_el1, %0 ;" + " isb" + : "=&r" (tcr) + : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); +} + +/* + * Set TCR.T0SZ to the value appropriate for activating the identity map. + */ +static inline void cpu_set_idmap_tcr_t0sz(void) +{ + __cpu_set_tcr_t0sz(idmap_t0sz); +} + +/* + * Set TCR.T0SZ to its default value (based on VA_BITS) + */ +static inline void cpu_set_default_tcr_t0sz(void) +{ + __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)); +} + static inline void switch_new_context(struct mm_struct *mm) { unsigned long flags; diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 22b16232bd60..3d02b1869eb8 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -33,7 +33,9 @@ * image. Both require pgd, pud (4 levels only) and pmd tables to (section) * map the kernel. With the 64K page configuration, swapper and idmap need to * map to pte level. The swapper also maps the FDT (see __create_page_tables - * for more information). + * for more information). Note that the number of ID map translation levels + * could be increased on the fly if system RAM is out of reach for the default + * VA range, so 3 pages are reserved in all cases. */ #ifdef CONFIG_ARM64_64K_PAGES #define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS) @@ -42,7 +44,7 @@ #endif #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) -#define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE) +#define IDMAP_DIR_SIZE (3 * PAGE_SIZE) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 5f930cc9ea83..847e864202cc 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -143,7 +143,12 @@ /* * TCR flags. */ -#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0)) +#define TCR_T0SZ_OFFSET 0 +#define TCR_T1SZ_OFFSET 16 +#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) +#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) +#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) +#define TCR_TxSZ_WIDTH 6 #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 1fdf42041f42..51c9811e683c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -383,6 +383,43 @@ __create_page_tables: */ mov x0, x25 // idmap_pg_dir adrp x3, KERNEL_START // __pa(KERNEL_START) + +#ifndef CONFIG_ARM64_VA_BITS_48 +#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) +#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) + + /* + * If VA_BITS < 48, it may be too small to allow for an ID mapping to be + * created that covers system RAM if that is located sufficiently high + * in the physical address space. So for the ID map, use an extended + * virtual range in that case, by configuring an additional translation + * level. + * First, we have to verify our assumption that the current value of + * VA_BITS was chosen such that all translation levels are fully + * utilised, and that lowering T0SZ will always result in an additional + * translation level to be configured. + */ +#if VA_BITS != EXTRA_SHIFT +#error "Mismatch between VA_BITS and page size/number of translation levels" +#endif + + /* + * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the + * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), + * this number conveniently equals the number of leading zeroes in + * the physical address of KERNEL_END. + */ + adrp x5, KERNEL_END + clz x5, x5 + cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? + b.ge 1f // .. then skip additional level + + str_l x5, idmap_t0sz, x6 + + create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 +1: +#endif + create_pgd_entry x0, x3, x5, x6 mov x5, x3 // __pa(KERNEL_START) adr_l x6, KERNEL_END // __pa(KERNEL_END) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 4257369341e4..ffe8e1b814e0 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -151,6 +151,7 @@ asmlinkage void secondary_start_kernel(void) */ cpu_set_reserved_ttbr0(); flush_tlb_all(); + cpu_set_default_tcr_t0sz(); preempt_disable(); trace_hardirqs_off(); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c9267acb699c..428aaf86c95b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -40,6 +40,8 @@ #include "mm.h" +u64 idmap_t0sz = TCR_T0SZ(VA_BITS); + /* * Empty_zero_page is a special page that is used for zero-initialized data * and COW. @@ -454,6 +456,7 @@ void __init paging_init(void) */ cpu_set_reserved_ttbr0(); flush_tlb_all(); + cpu_set_default_tcr_t0sz(); } /* @@ -461,8 +464,10 @@ void __init paging_init(void) */ void setup_mm_for_reboot(void) { - cpu_switch_mm(idmap_pg_dir, &init_mm); + cpu_set_reserved_ttbr0(); flush_tlb_all(); + cpu_set_idmap_tcr_t0sz(); + cpu_switch_mm(idmap_pg_dir, &init_mm); } /* diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S index 005d29e2977d..4c4d93c4bf65 100644 --- a/arch/arm64/mm/proc-macros.S +++ b/arch/arm64/mm/proc-macros.S @@ -52,3 +52,13 @@ mov \reg, #4 // bytes per word lsl \reg, \reg, \tmp // actual cache line size .endm + +/* + * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map + */ + .macro tcr_set_idmap_t0sz, valreg, tmpreg +#ifndef CONFIG_ARM64_VA_BITS_48 + ldr_l \tmpreg, idmap_t0sz + bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH +#endif + .endm diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 28eebfb6af76..cdd754e19b9b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -156,6 +156,7 @@ ENTRY(cpu_do_resume) msr cpacr_el1, x6 msr ttbr0_el1, x1 msr ttbr1_el1, x7 + tcr_set_idmap_t0sz x8, x7 msr tcr_el1, x8 msr vbar_el1, x9 msr mdscr_el1, x10 @@ -233,6 +234,8 @@ ENTRY(__cpu_setup) */ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 + tcr_set_idmap_t0sz x10, x9 + /* * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in * TCR_EL1. -- cgit v1.2.3