From b615bbbff1c4d6fcd13007e75d75f6510aeb3808 Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Wed, 13 Aug 2014 09:04:49 -0700 Subject: arm: use generic fixmap.h ARM is different from other architectures in that fixmap pages are indexed with a positive offset from FIXADDR_START. Other architectures index with a negative offset from FIXADDR_TOP. In order to use the generic fixmap.h definitions, this patch redefines FIXADDR_TOP to be inclusive of the useable range. That is, FIXADDR_TOP is the virtual address of the topmost fixed page. The newly defined FIXADDR_END is the first virtual address past the fixed mappings. Signed-off-by: Mark Salter Reviewed-by: Doug Anderson [kees: update for a05e54c103b0 ("ARM: 8031/2: change fixmap ...")] Signed-off-by: Kees Cook Cc: Laura Abbott Cc: Rob Herring Acked-by: Nicolas Pitre --- arch/arm/include/asm/fixmap.h | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index 74124b0d0d79..a7add6f9315d 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -2,27 +2,18 @@ #define _ASM_FIXMAP_H #define FIXADDR_START 0xffc00000UL -#define FIXADDR_TOP 0xffe00000UL -#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START) +#define FIXADDR_END 0xffe00000UL +#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) -#define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT) +#include -#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) +enum fixed_addresses { + FIX_KMAP_BEGIN, + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, -extern void __this_fixmap_does_not_exist(void); + __end_of_fixed_addresses +}; -static inline unsigned long fix_to_virt(const unsigned int idx) -{ - if (idx >= FIX_KMAP_NR_PTES) - __this_fixmap_does_not_exist(); - return __fix_to_virt(idx); -} - -static inline unsigned int virt_to_fix(const unsigned long vaddr) -{ - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); - return __virt_to_fix(vaddr); -} +#include #endif -- cgit v1.2.3 From 836a24183273e9db1c092246bd8e306b297d9917 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 2 Jul 2014 02:01:15 -0500 Subject: ARM: expand fixmap region to 3MB With commit a05e54c103b0 ("ARM: 8031/2: change fixmap mapping region to support 32 CPUs"), the fixmap region was expanded to 2MB, but it precluded any other uses of the fixmap region. In order to support other uses the fixmap region needs to be expanded beyond 2MB. Fortunately, the adjacent 1MB range 0xffe00000-0xfff00000 is availabe. Remove fixmap_page_table ptr and lookup the page table via the virtual address so that the fixmap region can span more that one pmd. The 2nd pmd is already created since it is shared with the vector page. Signed-off-by: Rob Herring [kees: fixed CONFIG_DEBUG_HIGHMEM get_fixmap() calls] [kees: moved pte allocation outside of CONFIG_HIGHMEM] Signed-off-by: Kees Cook Acked-by: Nicolas Pitre --- Documentation/arm/memory.txt | 2 +- arch/arm/include/asm/fixmap.h | 2 +- arch/arm/mm/highmem.c | 15 ++++++++------- arch/arm/mm/mmu.c | 6 +++--- 4 files changed, 13 insertions(+), 12 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt index 38dc06d0a791..4178ebda6e66 100644 --- a/Documentation/arm/memory.txt +++ b/Documentation/arm/memory.txt @@ -41,7 +41,7 @@ fffe8000 fffeffff DTCM mapping area for platforms with fffe0000 fffe7fff ITCM mapping area for platforms with ITCM mounted inside the CPU. -ffc00000 ffdfffff Fixmap mapping region. Addresses provided +ffc00000 ffefffff Fixmap mapping region. Addresses provided by fix_to_virt() will be located here. fee00000 feffffff Mapping of PCI I/O space. This is a static diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index a7add6f9315d..d984ca69ce19 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -2,7 +2,7 @@ #define _ASM_FIXMAP_H #define FIXADDR_START 0xffc00000UL -#define FIXADDR_END 0xffe00000UL +#define FIXADDR_END 0xfff00000UL #define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) #include diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 45aeaaca9052..81061987ac45 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -18,19 +18,20 @@ #include #include "mm.h" -pte_t *fixmap_page_table; - static inline void set_fixmap_pte(int idx, pte_t pte) { unsigned long vaddr = __fix_to_virt(idx); - set_pte_ext(fixmap_page_table + idx, pte, 0); + pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); + + set_pte_ext(ptep, pte, 0); local_flush_tlb_kernel_page(vaddr); } static inline pte_t get_fixmap_pte(unsigned long vaddr) { - unsigned long idx = __virt_to_fix(vaddr); - return *(fixmap_page_table + idx); + pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); + + return *ptep; } void *kmap(struct page *page) @@ -84,7 +85,7 @@ void *kmap_atomic(struct page *page) * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ - BUG_ON(!pte_none(*(fixmap_page_table + idx))); + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); #endif /* * When debugging is off, kunmap_atomic leaves the previous mapping @@ -134,7 +135,7 @@ void *kmap_atomic_pfn(unsigned long pfn) idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(fixmap_page_table + idx))); + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); #endif set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 8348ed6b2efe..7fa0966cd15f 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1326,10 +1326,10 @@ static void __init kmap_init(void) #ifdef CONFIG_HIGHMEM pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), PKMAP_BASE, _PAGE_KERNEL_TABLE); - - fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START), - FIXADDR_START, _PAGE_KERNEL_TABLE); #endif + + early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START, + _PAGE_KERNEL_TABLE); } static void __init map_lowmem(void) -- cgit v1.2.3 From 99b4ac9afce4129323b5b4c7002a942a9489914c Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 4 Apr 2014 23:27:49 +0200 Subject: arm: fixmap: implement __set_fixmap() This is used from set_fixmap() and clear_fixmap() via asm-generic/fixmap.h. Also makes sure that the fixmap allocation fits into the expected range. Based on patch by Rabin Vincent. Signed-off-by: Kees Cook Cc: Rabin Vincent Acked-by: Nicolas Pitre --- arch/arm/include/asm/fixmap.h | 2 ++ arch/arm/mm/mmu.c | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index d984ca69ce19..714606f70425 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -14,6 +14,8 @@ enum fixed_addresses { __end_of_fixed_addresses }; +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); + #include #endif diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 7fa0966cd15f..bdf5c94f7c36 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -392,6 +393,29 @@ SET_MEMORY_FN(rw, pte_set_rw) SET_MEMORY_FN(x, pte_set_x) SET_MEMORY_FN(nx, pte_set_nx) +/* + * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). + * As a result, this can only be called with preemption disabled, as under + * stop_machine(). + */ +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) +{ + unsigned long vaddr = __fix_to_virt(idx); + pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr); + + /* Make sure fixmap region does not exceed available allocation. */ + BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > + FIXADDR_END); + BUG_ON(idx >= __end_of_fixed_addresses); + + if (pgprot_val(prot)) + set_pte_at(NULL, vaddr, pte, + pfn_pte(phys >> PAGE_SHIFT, prot)); + else + pte_clear(NULL, vaddr, pte); + local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); +} + /* * Adjust the PMD section entries according to the CPU in use. */ -- cgit v1.2.3 From ab0615e2d6fb074764a3e4d05f1326fa2fdb4627 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Thu, 24 Apr 2014 23:28:57 +0200 Subject: arm: use fixmap for text patching when text is RO Use fixmaps for text patching when the kernel text is read-only, inspired by x86. This makes jump labels and kprobes work with the currently available CONFIG_DEBUG_SET_MODULE_RONX and the upcoming CONFIG_DEBUG_RODATA options. Signed-off-by: Rabin Vincent [kees: fixed up for merge with "arm: use generic fixmap.h"] [kees: added parse acquire/release annotations to pass C=1 builds] [kees: always use stop_machine to keep TLB flushing local] Signed-off-by: Kees Cook Acked-by: Nicolas Pitre --- arch/arm/include/asm/fixmap.h | 4 ++ arch/arm/kernel/jump_label.c | 2 +- arch/arm/kernel/patch.c | 92 ++++++++++++++++++++++++++++++++++--------- arch/arm/kernel/patch.h | 12 +++++- 4 files changed, 89 insertions(+), 21 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index 714606f70425..0415eae1df27 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -11,6 +11,10 @@ enum fixed_addresses { FIX_KMAP_BEGIN, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, + /* Support writing RO kernel text via kprobes, jump labels, etc. */ + FIX_TEXT_POKE0, + FIX_TEXT_POKE1, + __end_of_fixed_addresses }; diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c index 4ce4f789446d..afeeb9ea6f43 100644 --- a/arch/arm/kernel/jump_label.c +++ b/arch/arm/kernel/jump_label.c @@ -19,7 +19,7 @@ static void __arch_jump_label_transform(struct jump_entry *entry, insn = arm_gen_nop(); if (is_static) - __patch_text(addr, insn); + __patch_text_early(addr, insn); else patch_text(addr, insn); } diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c index 07314af47733..5038960e3c55 100644 --- a/arch/arm/kernel/patch.c +++ b/arch/arm/kernel/patch.c @@ -1,8 +1,11 @@ #include +#include #include +#include #include #include +#include #include #include @@ -13,21 +16,77 @@ struct patch { unsigned int insn; }; -void __kprobes __patch_text(void *addr, unsigned int insn) +static DEFINE_SPINLOCK(patch_lock); + +static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) + __acquires(&patch_lock) +{ + unsigned int uintaddr = (uintptr_t) addr; + bool module = !core_kernel_text(uintaddr); + struct page *page; + + if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) + page = vmalloc_to_page(addr); + else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) + page = virt_to_page(addr); + else + return addr; + + if (flags) + spin_lock_irqsave(&patch_lock, *flags); + else + __acquire(&patch_lock); + + set_fixmap(fixmap, page_to_phys(page)); + + return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); +} + +static void __kprobes patch_unmap(int fixmap, unsigned long *flags) + __releases(&patch_lock) +{ + clear_fixmap(fixmap); + + if (flags) + spin_unlock_irqrestore(&patch_lock, *flags); + else + __release(&patch_lock); +} + +void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap) { bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); + unsigned int uintaddr = (uintptr_t) addr; + bool twopage = false; + unsigned long flags; + void *waddr = addr; int size; + if (remap) + waddr = patch_map(addr, FIX_TEXT_POKE0, &flags); + else + __acquire(&patch_lock); + if (thumb2 && __opcode_is_thumb16(insn)) { - *(u16 *)addr = __opcode_to_mem_thumb16(insn); + *(u16 *)waddr = __opcode_to_mem_thumb16(insn); size = sizeof(u16); - } else if (thumb2 && ((uintptr_t)addr & 2)) { + } else if (thumb2 && (uintaddr & 2)) { u16 first = __opcode_thumb32_first(insn); u16 second = __opcode_thumb32_second(insn); - u16 *addrh = addr; + u16 *addrh0 = waddr; + u16 *addrh1 = waddr + 2; + + twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2; + if (twopage && remap) + addrh1 = patch_map(addr + 2, FIX_TEXT_POKE1, NULL); + + *addrh0 = __opcode_to_mem_thumb16(first); + *addrh1 = __opcode_to_mem_thumb16(second); - addrh[0] = __opcode_to_mem_thumb16(first); - addrh[1] = __opcode_to_mem_thumb16(second); + if (twopage && addrh1 != addr + 2) { + flush_kernel_vmap_range(addrh1, 2); + patch_unmap(FIX_TEXT_POKE1, NULL); + } size = sizeof(u32); } else { @@ -36,10 +95,16 @@ void __kprobes __patch_text(void *addr, unsigned int insn) else insn = __opcode_to_mem_arm(insn); - *(u32 *)addr = insn; + *(u32 *)waddr = insn; size = sizeof(u32); } + if (waddr != addr) { + flush_kernel_vmap_range(waddr, twopage ? size / 2 : size); + patch_unmap(FIX_TEXT_POKE0, &flags); + } else + __release(&patch_lock); + flush_icache_range((uintptr_t)(addr), (uintptr_t)(addr) + size); } @@ -60,16 +125,5 @@ void __kprobes patch_text(void *addr, unsigned int insn) .insn = insn, }; - if (cache_ops_need_broadcast()) { - stop_machine(patch_text_stop_machine, &patch, cpu_online_mask); - } else { - bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL) - && __opcode_is_thumb32(insn) - && ((uintptr_t)addr & 2); - - if (straddles_word) - stop_machine(patch_text_stop_machine, &patch, NULL); - else - __patch_text(addr, insn); - } + stop_machine(patch_text_stop_machine, &patch, NULL); } diff --git a/arch/arm/kernel/patch.h b/arch/arm/kernel/patch.h index b4731f2dac38..77e054c2f6cd 100644 --- a/arch/arm/kernel/patch.h +++ b/arch/arm/kernel/patch.h @@ -2,6 +2,16 @@ #define _ARM_KERNEL_PATCH_H void patch_text(void *addr, unsigned int insn); -void __patch_text(void *addr, unsigned int insn); +void __patch_text_real(void *addr, unsigned int insn, bool remap); + +static inline void __patch_text(void *addr, unsigned int insn) +{ + __patch_text_real(addr, insn, true); +} + +static inline void __patch_text_early(void *addr, unsigned int insn) +{ + __patch_text_real(addr, insn, false); +} #endif -- cgit v1.2.3 From 80d6b0c2eed2a504f6740cd1f5ea76dc50abfc4d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 3 Apr 2014 13:29:50 -0700 Subject: ARM: mm: allow text and rodata sections to be read-only This introduces CONFIG_DEBUG_RODATA, making kernel text and rodata read-only. Additionally, this splits rodata from text so that rodata can also be NX, which may lead to wasted memory when aligning to SECTION_SIZE. The read-only areas are made writable during ftrace updates and kexec. Signed-off-by: Kees Cook Tested-by: Laura Abbott Acked-by: Nicolas Pitre --- arch/arm/include/asm/cacheflush.h | 10 ++++++++ arch/arm/kernel/ftrace.c | 19 ++++++++++++++++ arch/arm/kernel/machine_kexec.c | 1 + arch/arm/kernel/vmlinux.lds.S | 3 +++ arch/arm/mm/Kconfig | 12 ++++++++++ arch/arm/mm/init.c | 48 ++++++++++++++++++++++++++++++++++++++- 6 files changed, 92 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 10e78d00a0bb..2d46862e7bef 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -487,6 +487,16 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void); +void set_kernel_text_rw(void); +void set_kernel_text_ro(void); +#else +static inline void set_kernel_text_rw(void) { } +static inline void set_kernel_text_ro(void) { } +#endif + void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, void *kaddr, unsigned long len); + #endif diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index af9a8a927a4e..b8c75e45a950 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -35,6 +36,22 @@ #define OLD_NOP 0xe1a00000 /* mov r0, r0 */ +static int __ftrace_modify_code(void *data) +{ + int *command = data; + + set_kernel_text_rw(); + ftrace_modify_all_code(*command); + set_kernel_text_ro(); + + return 0; +} + +void arch_ftrace_update_code(int command) +{ + stop_machine(__ftrace_modify_code, &command, NULL); +} + static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) { return rec->arch.old_mcount ? OLD_NOP : NOP; @@ -73,6 +90,8 @@ int ftrace_arch_code_modify_prepare(void) int ftrace_arch_code_modify_post_process(void) { set_all_modules_text_ro(); + /* Make sure any TLB misses during machine stop are cleared. */ + flush_tlb_all(); return 0; } diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 8f75250cbe30..4423a565ef6f 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -164,6 +164,7 @@ void machine_kexec(struct kimage *image) reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ + set_kernel_text_rw(); kexec_start_address = image->start; kexec_indirection_page = page_list; kexec_mach_type = machine_arch_type; diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 18fd68a295ea..3afcb6c2cf06 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -120,6 +120,9 @@ SECTIONS ARM_CPU_KEEP(PROC_INFO) } +#ifdef CONFIG_DEBUG_RODATA + . = ALIGN(1< Reviewed-by: Jan-Simon Möller Reviewed-by: Mark Charlebois Acked-by: Will Deacon Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/thread_info.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index fc44d3761f9e..bb435152189f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -100,6 +100,11 @@ struct thread_info { #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) +/* + * how to get the current stack pointer in C + */ +register unsigned long current_stack_pointer asm ("sp"); + /* * how to get the thread information struct from C */ -- cgit v1.2.3 From f6c9cbf091a4b0769180d663e37621569b054d11 Mon Sep 17 00:00:00 2001 From: Behan Webster Date: Sat, 27 Sep 2014 00:31:09 +0100 Subject: ARM: 8173/1: Calculate current_thread_info from current_stack_pointer Use the global current_stack_pointer to get the value of the stack pointer. This change supports being able to compile the kernel with both gcc and clang. Signed-off-by: Behan Webster Reviewed-by: Mark Charlebois Reviewed-by: Jan-Simon Möller Acked-by: Will Deacon Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/thread_info.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index bb435152189f..b9dc3dce59a6 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -112,8 +112,8 @@ static inline struct thread_info *current_thread_info(void) __attribute_const__; static inline struct thread_info *current_thread_info(void) { - register unsigned long sp asm ("sp"); - return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); + return (struct thread_info *) + (current_stack_pointer & ~(THREAD_SIZE - 1)); } #define thread_saved_pc(tsk) \ -- cgit v1.2.3 From ccbd2da50db2111f6d055e3201c39a37e542ceb6 Mon Sep 17 00:00:00 2001 From: Mark Charlebois Date: Sat, 27 Sep 2014 00:31:05 +0100 Subject: ARM: 8174/1: Use global stack register variable for percpu Using global current_stack_pointer works on both clang and gcc. current_stack_pointer is an unsigned long and needs to be cast as a pointer to dereference. Signed-off-by: Mark Charlebois Signed-off-by: Behan Webster Signed-off-by: Russell King --- arch/arm/include/asm/percpu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index 209e6504922e..a89b4076cde4 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -30,14 +30,14 @@ static inline void set_my_cpu_offset(unsigned long off) static inline unsigned long __my_cpu_offset(void) { unsigned long off; - register unsigned long *sp asm ("sp"); /* * Read TPIDRPRW. * We want to allow caching the value, so avoid using volatile and * instead use a fake stack read to hazard against barrier(). */ - asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp)); + asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) + : "Q" (*(const unsigned long *)current_stack_pointer)); return off; } -- cgit v1.2.3 From 0ebc1f5671dff84b7ba4dcc38ebce956e6dea5fa Mon Sep 17 00:00:00 2001 From: Behan Webster Date: Sat, 27 Sep 2014 00:31:06 +0100 Subject: ARM: 8175/1: Use current_stack_pointer to calculate pt_regs address Use the global current_stack_pointer to calculate the end of the stack for current_pt_regs() Signed-off-by: Behan Webster Reviewed-by: Mark Charlebois Reviewed-by: Jan-Simon Möller Acked-by: Will Deacon Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/ptrace.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 601264d983fa..51622ba7c4a6 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -154,9 +154,8 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) return regs->ARM_sp; } -#define current_pt_regs(void) ({ \ - register unsigned long sp asm ("sp"); \ - (struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \ +#define current_pt_regs(void) ({ (struct pt_regs *) \ + ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \ }) #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From 6c96a4a6e249a0580b32893583771149e0611375 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 10 Nov 2014 21:56:41 +0100 Subject: ARM: 8197/1: vfp: Fix VFPv3 hwcap detection on CPUID based cpus The subarchitecture field in the fpsid register is 7 bits wide on ARM CPUs using the CPUID identification scheme, spanning bits 22 to 16. The topmost bit is used to designate that the subarchitecture designer is not ARM when it is set to 1. On non-CPUID scheme CPUs the subarchitecture field is only 4 bits wide and the higher bits are used to indicate no double precision support (bit 20) and the FTSMX/FLDMX format (bits 21-22). The VFP support code only looks at bits 19-16 to determine the VFP version. On Qualcomm's processors (Krait and Scorpion) we should see that we have HWCAP_VFPv3 but we don't because bit 22 is set to 1 to indicate that the subarchitecture is not implemented by ARM and the rest of the bits are left as 0 because this is the first subarchitecture that Qualcomm has designed. Unfortunately we can't just widen the FPSID subarchitecture bitmask to consider all the bits on a CPUID scheme because there may be CPUs without the CPUID scheme that have VFP without double precision support and then the version would be a very wrong and large number. Instead, update the version detection logic to consider if the CPU is using the CPUID scheme. If the CPU is using CPUID scheme, use the MVFR registers to determine what version of VFP is supported. We already do this for VFPv4, so do something similar for VFPv3 and look for single or double precision support in MVFR0. Otherwise fall back to using FPSID to detect VFP support on non-CPUID scheme CPUs. We know that VFPv3 is only present in CPUs that have support for the CPUID scheme so this should be equivalent. Tested-by: Rob Clark Reviewed-by: Will Deacon Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/include/asm/vfp.h | 5 +++ arch/arm/vfp/vfpmodule.c | 93 ++++++++++++++++++++++++++-------------------- 2 files changed, 57 insertions(+), 41 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h index f4ab34fd4f72..ee5f3084243c 100644 --- a/arch/arm/include/asm/vfp.h +++ b/arch/arm/include/asm/vfp.h @@ -22,6 +22,7 @@ #define FPSID_NODOUBLE (1<<20) #define FPSID_ARCH_BIT (16) #define FPSID_ARCH_MASK (0xF << FPSID_ARCH_BIT) +#define FPSID_CPUID_ARCH_MASK (0x7F << FPSID_ARCH_BIT) #define FPSID_PART_BIT (8) #define FPSID_PART_MASK (0xFF << FPSID_PART_BIT) #define FPSID_VARIANT_BIT (4) @@ -75,6 +76,10 @@ /* MVFR0 bits */ #define MVFR0_A_SIMD_BIT (0) #define MVFR0_A_SIMD_MASK (0xf << MVFR0_A_SIMD_BIT) +#define MVFR0_SP_BIT (4) +#define MVFR0_SP_MASK (0xf << MVFR0_SP_BIT) +#define MVFR0_DP_BIT (8) +#define MVFR0_DP_MASK (0xf << MVFR0_DP_BIT) /* Bit patterns for decoding the packaged operation descriptors */ #define VFPOPDESC_LENGTH_BIT (9) diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 2f37e1d6cb45..5002d002f6e3 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -722,6 +722,7 @@ static int __init vfp_init(void) { unsigned int vfpsid; unsigned int cpu_arch = cpu_architecture(); + u32 mvfr0; if (cpu_arch >= CPU_ARCH_ARMv6) on_each_cpu(vfp_enable, NULL, 1); @@ -738,63 +739,73 @@ static int __init vfp_init(void) vfp_vector = vfp_null_entry; pr_info("VFP support v0.3: "); - if (VFP_arch) + if (VFP_arch) { pr_cont("not present\n"); - else if (vfpsid & FPSID_NODOUBLE) { - pr_cont("no double precision support\n"); - } else { - hotcpu_notifier(vfp_hotplug, 0); - - VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ - pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n", - (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, - (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, - (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, - (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, - (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); - - vfp_vector = vfp_support_entry; - - thread_register_notifier(&vfp_notifier_block); - vfp_pm_init(); - + return 0; + /* Extract the architecture on CPUID scheme */ + } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { + VFP_arch = vfpsid & FPSID_CPUID_ARCH_MASK; + VFP_arch >>= FPSID_ARCH_BIT; /* - * We detected VFP, and the support code is - * in place; report VFP support to userspace. + * Check for the presence of the Advanced SIMD + * load/store instructions, integer and single + * precision floating point operations. Only check + * for NEON if the hardware has the MVFR registers. */ - elf_hwcap |= HWCAP_VFP; +#ifdef CONFIG_NEON + if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) + elf_hwcap |= HWCAP_NEON; +#endif #ifdef CONFIG_VFPv3 - if (VFP_arch >= 2) { + mvfr0 = fmrx(MVFR0); + if (((mvfr0 & MVFR0_DP_MASK) >> MVFR0_DP_BIT) == 0x2 || + ((mvfr0 & MVFR0_SP_MASK) >> MVFR0_SP_BIT) == 0x2) { elf_hwcap |= HWCAP_VFPv3; - /* * Check for VFPv3 D16 and VFPv4 D16. CPUs in * this configuration only have 16 x 64bit * registers. */ - if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) - elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */ + if ((mvfr0 & MVFR0_A_SIMD_MASK) == 1) + /* also v4-D16 */ + elf_hwcap |= HWCAP_VFPv3D16; else elf_hwcap |= HWCAP_VFPD32; } + + if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) + elf_hwcap |= HWCAP_VFPv4; #endif - /* - * Check for the presence of the Advanced SIMD - * load/store instructions, integer and single - * precision floating point operations. Only check - * for NEON if the hardware has the MVFR registers. - */ - if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { -#ifdef CONFIG_NEON - if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) - elf_hwcap |= HWCAP_NEON; -#endif -#ifdef CONFIG_VFPv3 - if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) - elf_hwcap |= HWCAP_VFPv4; -#endif + /* Extract the architecture version on pre-cpuid scheme */ + } else { + if (vfpsid & FPSID_NODOUBLE) { + pr_cont("no double precision support\n"); + return 0; } + + VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; } + + hotcpu_notifier(vfp_hotplug, 0); + + vfp_vector = vfp_support_entry; + + thread_register_notifier(&vfp_notifier_block); + vfp_pm_init(); + + /* + * We detected VFP, and the support code is + * in place; report VFP support to userspace. + */ + elf_hwcap |= HWCAP_VFP; + + pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n", + (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, + VFP_arch, + (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, + (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, + (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); + return 0; } -- cgit v1.2.3 From 44cb09c23f8e5838f24ef47d43dd2804b71b3a65 Mon Sep 17 00:00:00 2001 From: Dmitry Eremin-Solenikov Date: Sat, 22 Nov 2014 14:48:15 +0100 Subject: ARM: 8218/1: warn if bad IRQ was scheduled If somebody causes an unexpected bad IRQ, this even will be unnoticed in both dmesg and system logs. If the "bad" IRQ is stuck, the device will just hang silently w/o reporting anything. Compare this to the generic behaviour (from include/asm-generic/hardirq.h) which prints a message with critical level. So to help everybody, include the same message into ARM-specific ack_bad_irq(). Signed-off-by: Dmitry Eremin-Solenikov Signed-off-by: Russell King --- arch/arm/include/asm/hw_irq.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index a71b417b1856..af79da40af2a 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h @@ -8,6 +8,7 @@ static inline void ack_bad_irq(int irq) { extern unsigned long irq_err_count; irq_err_count++; + pr_crit("unexpected IRQ trap at vector %02x\n", irq); } void set_irq_flags(unsigned int irq, unsigned int flags); -- cgit v1.2.3 From 1d4d37159d013a4c54d785407dd8902f901d7bc5 Mon Sep 17 00:00:00 2001 From: Jungseung Lee Date: Sat, 29 Nov 2014 02:33:30 +0100 Subject: ARM: 8235/1: Support for the PXN CPU feature on ARMv7 Modern ARMv7-A/R cores optionally implement below new hardware feature: - PXN: Privileged execute-never(PXN) is a security feature. PXN bit determines whether the processor can execute software from the region. This is effective solution against ret2usr attack. On an implementation that does not include the LPAE, PXN is optionally supported. This patch set PXN bit on user page table for preventing user code execution with privilege mode. Reviewed-by: Catalin Marinas Signed-off-by: Jungseung Lee Signed-off-by: Russell King --- arch/arm/include/asm/pgalloc.h | 10 +++++++++- arch/arm/include/asm/pgtable-2level-hwdef.h | 2 ++ arch/arm/include/asm/pgtable-3level-hwdef.h | 1 + arch/arm/mm/mmu.c | 18 +++++++++++++++++- 4 files changed, 29 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 78a779361682..19cfab526d13 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -157,7 +157,15 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) { - __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); + extern pmdval_t user_pmd_table; + pmdval_t prot; + + if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE)) + prot = user_pmd_table; + else + prot = _PAGE_USER_TABLE; + + __pmd_populate(pmdp, page_to_phys(ptep), prot); } #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h index 5cfba15cb401..5e68278e953e 100644 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h @@ -20,12 +20,14 @@ #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ #define PMD_BIT4 (_AT(pmdval_t, 1) << 4) #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ /* * - section */ +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */ #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h index 9fd61c72a33a..f8f1cff62065 100644 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -76,6 +76,7 @@ #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */ #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */ /* diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index d028dc74a9d5..b590a4c92462 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -52,6 +52,8 @@ EXPORT_SYMBOL(empty_zero_page); */ pmd_t *top_pmd; +pmdval_t user_pmd_table = _PAGE_USER_TABLE; + #define CPOLICY_UNCACHED 0 #define CPOLICY_BUFFERED 1 #define CPOLICY_WRITETHROUGH 2 @@ -528,14 +530,23 @@ static void __init build_mem_type_table(void) hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; +#ifndef CONFIG_ARM_LPAE /* * We don't use domains on ARMv6 (since this causes problems with * v6/v7 kernels), so we must use a separate memory type for user * r/o, kernel r/w to map the vectors page. */ -#ifndef CONFIG_ARM_LPAE if (cpu_arch == CPU_ARCH_ARMv6) vecs_pgprot |= L_PTE_MT_VECTORS; + + /* + * Check is it with support for the PXN bit + * in the Short-descriptor translation table format descriptors. + */ + if (cpu_arch == CPU_ARCH_ARMv7 && + (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) { + user_pmd_table |= PMD_PXNTABLE; + } #endif /* @@ -605,6 +616,11 @@ static void __init build_mem_type_table(void) } kern_pgprot |= PTE_EXT_AF; vecs_pgprot |= PTE_EXT_AF; + + /* + * Set PXN for user mappings + */ + user_pgprot |= PTE_EXT_PXN; #endif for (i = 0; i < 16; i++) { -- cgit v1.2.3 From 1f92f77ab68a012638afa9e001b7a1e1e5197930 Mon Sep 17 00:00:00 2001 From: Jungseung Lee Date: Sat, 29 Nov 2014 03:03:51 +0100 Subject: ARM: 8239/1: Introduce {set,clear}_pte_bit Introduce helper functions for pte_mk* functions and it would be used to change individual bits in ptes at times. Signed-off-by: Jungseung Lee Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 62 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 51 insertions(+), 11 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 3b30062975b2..d5cac545ba33 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -252,17 +252,57 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, set_pte_ext(ptep, pteval, ext); } -#define PTE_BIT_FUNC(fn,op) \ -static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } - -PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY); -PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY); -PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); -PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); -PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); -PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); -PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN); -PTE_BIT_FUNC(mknexec, |= L_PTE_XN); +static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) +{ + pte_val(pte) &= ~pgprot_val(prot); + return pte; +} + +static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) +{ + pte_val(pte) |= pgprot_val(prot); + return pte; +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_RDONLY)); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY)); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY)); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_DIRTY)); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG)); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_YOUNG)); +} + +static inline pte_t pte_mkexec(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_XN)); +} + +static inline pte_t pte_mknexec(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_XN)); +} static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { -- cgit v1.2.3 From 216b4688cc351e97514c40dbedd14f67ce59639a Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Sat, 29 Nov 2014 03:19:42 +0100 Subject: ARM: 8240/1: MCPM: document mcpm_sync_init() Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/mcpm.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index d428e386c88e..3446f6a1d9fa 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -219,6 +219,23 @@ void __mcpm_outbound_leave_critical(unsigned int cluster, int state); bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); int __mcpm_cluster_state(unsigned int cluster); +/** + * mcpm_sync_init - Initialize the cluster synchronization support + * + * @power_up_setup: platform specific function invoked during very + * early CPU/cluster bringup stage. + * + * This prepares memory used by vlocks and the MCPM state machine used + * across CPUs that may have their caches active or inactive. Must be + * called only after a successful call to mcpm_platform_register(). + * + * The power_up_setup argument is a pointer to assembly code called when + * the MMU and caches are still disabled during boot and no stack space is + * available. The affinity level passed to that code corresponds to the + * resource that needs to be initialized (e.g. 1 for cluster level, 0 for + * CPU level). Proper exclusion mechanisms are already activated at that + * point. + */ int __init mcpm_sync_init( void (*power_up_setup)(unsigned int affinity_level)); -- cgit v1.2.3