From 1142b71d85894dcff1466dd6c871ea3c89e0352c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 19 Nov 2010 13:18:31 +0100 Subject: ARM: 6489/1: thumb2: fix incorrect optimisation in usracc Commit 8b592783 added a Thumb-2 variant of usracc which, when it is called with \rept=2, calls usraccoff once with an offset of 0 and secondly with a hard-coded offset of 4 in order to avoid incrementing the pointer again. If \inc != 4 then we will store the data to the wrong offset from \ptr. Luckily, the only caller that passes \rept=2 to this function is __clear_user so we haven't been actively corrupting user data. This patch fixes usracc to pass \inc instead of #4 to usraccoff when it is called a second time. Cc: Reported-by: Tony Thompson Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm/assembler.h') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 062b58c029ab..749bb6622404 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -238,7 +238,7 @@ @ Slightly optimised to avoid incrementing the pointer twice usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort .if \rept == 2 - usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort + usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort .endif add\cond \ptr, #\rept * \inc -- cgit v1.2.3 From ed3768a8d9dc2d345d4f27eb44ee1e4825056c08 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 1 Dec 2010 15:39:23 +0100 Subject: ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels. * __fixup_smp_on_up has been modified with support for the THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split into halfwords in case of misalignment, since we can't rely on unaligned accesses working before turning the MMU on. No attempt is made to optimise the aligned case, since the number of fixups is typically small, and it seems best to keep the code as simple as possible. * Add a rotate in the fixup_smp code in order to support CPU_BIG_ENDIAN, as suggested by Nicolas Pitre. * Add an assembly-time sanity-check to ALT_UP() to ensure that the content really is the right size (4 bytes). (No check is done for ALT_SMP(). Possibly, this could be fixed by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus ALT_SMP...SMP_UP_B) into two macros. In the first case, ALT_SMP needs to expand to >= 4 bytes, not == 4.) * smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due to macro limitations) has not been modified: the affected instruction (mov) has no 16-bit encoding, so the correct instruction size is satisfied in this case. * A "mode" parameter has been added to smp_dmb: smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser) smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP() This avoids assembly failures due to use of W() inside smp_dmb, when assembling pure-ARM code in the vectors page. There might be a better way to achieve this. * Kconfig: make SMP_ON_UP depend on (!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2 currently assumes little-endian order.) Tested using a single generic realview kernel on: ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y}) ARM RealView PBX-A9 (SMP) Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/Kconfig | 2 +- arch/arm/include/asm/assembler.h | 22 +++++++++++++++++++--- arch/arm/kernel/entry-armv.S | 4 ++-- arch/arm/kernel/head.S | 13 ++++++++++--- 4 files changed, 32 insertions(+), 9 deletions(-) (limited to 'arch/arm/include/asm/assembler.h') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index db524e75c4a2..290a4b57617f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1229,7 +1229,7 @@ config SMP config SMP_ON_UP bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)" depends on EXPERIMENTAL - depends on SMP && !XIP && !THUMB2_KERNEL + depends on SMP && !XIP default y help SMP kernels contain instructions which fail on non-SMP processors. diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 749bb6622404..72d3389e9c12 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -157,16 +157,24 @@ #ifdef CONFIG_SMP #define ALT_SMP(instr...) \ 9998: instr +/* + * Note: if you get assembler errors from ALT_UP() when building with + * CONFIG_THUMB2_KERNEL, you almost certainly need to use + * ALT_SMP( W(instr) ... ) + */ #define ALT_UP(instr...) \ .pushsection ".alt.smp.init", "a" ;\ .long 9998b ;\ - instr ;\ +9997: instr ;\ + .if . - 9997b != 4 ;\ + .error "ALT_UP() content must assemble to exactly 4 bytes";\ + .endif ;\ .popsection #define ALT_UP_B(label) \ .equ up_b_offset, label - 9998b ;\ .pushsection ".alt.smp.init", "a" ;\ .long 9998b ;\ - b . + up_b_offset ;\ + W(b) . + up_b_offset ;\ .popsection #else #define ALT_SMP(instr...) @@ -177,16 +185,24 @@ /* * SMP data memory barrier */ - .macro smp_dmb + .macro smp_dmb mode #ifdef CONFIG_SMP #if __LINUX_ARM_ARCH__ >= 7 + .ifeqs "\mode","arm" ALT_SMP(dmb) + .else + ALT_SMP(W(dmb)) + .endif #elif __LINUX_ARM_ARCH__ == 6 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb #else #error Incompatible SMP platform #endif + .ifeqs "\mode","arm" ALT_UP(nop) + .else + ALT_UP(W(nop)) + .endif #endif .endm diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 955cf5f539ed..7f22a11a5105 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -842,7 +842,7 @@ __kuser_helper_start: */ __kuser_memory_barrier: @ 0xffff0fa0 - smp_dmb + smp_dmb arm usr_ret lr .align 5 @@ -959,7 +959,7 @@ kuser_cmpxchg_fixup: #else - smp_dmb + smp_dmb arm 1: ldrex r3, [r2] subs r3, r3, r0 strexeq r3, r1, [r2] diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index fd94e4e82fc9..359e54e83bd5 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -412,10 +412,17 @@ __fixup_smp_on_up: add r4, r4, r3 add r5, r5, r3 2: cmp r4, r5 + movhs pc, lr ldmia r4!, {r0, r6} - strlo r6, [r0, r3] - blo 2b - mov pc, lr + ARM( str r6, [r0, r3] ) + THUMB( add r0, r0, r3 ) +#ifdef __ARMEB__ + THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. +#endif + THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords + THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. + THUMB( strh r6, [r0] ) + b 2b ENDPROC(__fixup_smp) 1: .word . -- cgit v1.2.3