diff options
author | Linus Torvalds | 2017-01-20 11:44:47 -0800 |
---|---|---|
committer | Linus Torvalds | 2017-01-20 11:44:47 -0800 |
commit | f8f2d4bdb52e67139b0b3e5ae16da04e71ebc1a6 (patch) | |
tree | 2bbb0dfdaf054e593cb605459c3e1f3d2e862805 /arch | |
parent | 44b4b461a0fb407507b46ea76a71376d74de7058 (diff) | |
parent | 7d9e8f71b989230bc613d121ca38507d34ada849 (diff) |
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas:
- avoid potential stack information leak via the ptrace ABI caused by
uninitialised variables
- SWIOTLB DMA API fall-back allocation fix when the SWIOTLB buffer is
not initialised (all RAM is suitable for 32-bit DMA masks)
- fix the bad_mode function returning for unhandled exceptions coming
from user space
- fix name clash in __page_to_voff()
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: avoid returning from bad_mode
arm64/ptrace: Reject attempts to set incomplete hardware breakpoint fields
arm64/ptrace: Avoid uninitialised struct padding in fpr_set()
arm64/ptrace: Preserve previous registers for short regset write
arm64/ptrace: Preserve previous registers for short regset write
arm64/ptrace: Preserve previous registers for short regset write
arm64: mm: avoid name clash in __page_to_voff()
arm64: Fix swiotlb fallback allocation
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/include/asm/memory.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/ptrace.h | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/ptrace.c | 16 | ||||
-rw-r--r-- | arch/arm64/kernel/traps.c | 28 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 2 |
6 files changed, 40 insertions, 11 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index bfe632808d77..90c39a662379 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x) #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #else #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) -#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) +#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index b5c3933ed441..d1ff83dfe5de 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -77,6 +77,7 @@ struct user_fpsimd_state { __uint128_t vregs[32]; __u32 fpsr; __u32 fpcr; + __u32 __reserved[2]; }; struct user_hwdebug_state { diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 923841ffe4a9..43512d4d7df2 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -683,7 +683,7 @@ el0_inv: mov x0, sp mov x1, #BAD_SYNC mov x2, x25 - bl bad_mode + bl bad_el0_sync b ret_to_user ENDPROC(el0_sync) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index fc35e06ccaac..a22161ccf447 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target, /* (address, ctrl) registers */ limit = regset->n * regset->size; while (count && offset < limit) { + if (count < PTRACE_HBP_ADDR_SZ) + return -EINVAL; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, offset, offset + PTRACE_HBP_ADDR_SZ); if (ret) @@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target, return ret; offset += PTRACE_HBP_ADDR_SZ; + if (!count) + break; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, offset, offset + PTRACE_HBP_CTRL_SZ); if (ret) @@ -596,7 +600,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - struct user_pt_regs newregs; + struct user_pt_regs newregs = task_pt_regs(target)->user_regs; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); if (ret) @@ -626,7 +630,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - struct user_fpsimd_state newstate; + struct user_fpsimd_state newstate = + target->thread.fpsimd_state.user_fpsimd; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); if (ret) @@ -650,7 +655,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - unsigned long tls; + unsigned long tls = target->thread.tp_value; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); if (ret) @@ -676,7 +681,8 @@ static int system_call_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - int syscallno, ret; + int syscallno = task_pt_regs(target)->syscallno; + int ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); if (ret) @@ -948,7 +954,7 @@ static int compat_tls_set(struct task_struct *target, const void __user *ubuf) { int ret; - compat_ulong_t tls; + compat_ulong_t tls = target->thread.tp_value; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); if (ret) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 5b830be79c01..659b2e6b6cf7 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr) } /* - * bad_mode handles the impossible case in the exception vector. + * bad_mode handles the impossible case in the exception vector. This is always + * fatal. */ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) { - siginfo_t info; - void __user *pc = (void __user *)instruction_pointer(regs); console_verbose(); pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", handler[reason], smp_processor_id(), esr, esr_get_class_string(esr)); + + die("Oops - bad mode", regs, 0); + local_irq_disable(); + panic("bad mode"); +} + +/* + * bad_el0_sync handles unexpected, but potentially recoverable synchronous + * exceptions taken from EL0. Unlike bad_mode, this returns. + */ +asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) +{ + siginfo_t info; + void __user *pc = (void __user *)instruction_pointer(regs); + console_verbose(); + + pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n", + smp_processor_id(), esr, esr_get_class_string(esr)); __show_regs(regs); info.si_signo = SIGILL; @@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) info.si_code = ILL_ILLOPC; info.si_addr = pc; - arm64_notify_die("Oops - bad mode", regs, &info, 0); + current->thread.fault_address = 0; + current->thread.fault_code = 0; + + force_sig_info(info.si_signo, &info, current); } void __pte_error(const char *file, int line, unsigned long val) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 716d1226ba69..380ebe705093 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -404,6 +404,8 @@ void __init mem_init(void) if (swiotlb_force == SWIOTLB_FORCE || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) swiotlb_init(1); + else + swiotlb_force = SWIOTLB_NO_FORCE; set_max_mapnr(pfn_to_page(max_pfn) - mem_map); |