From 187a51ad11351b009abab688fb7f6d6f3210a45f Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 21 May 2005 18:14:44 +0100 Subject: [PATCH] ARM SMP: consolidate main IRQ handler code Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 4eb36155dc93..b0efbf98d832 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -23,6 +23,19 @@ #include "entry-header.S" +/* + * Interrupt handling. Preserves r7, r8, r9 + */ + .macro irq_handler +1: get_irqnr_and_base r0, r6, r5, lr + movne r1, sp + @ + @ routine called with r0 = irq number, r1 = struct pt_regs * + @ + adrne lr, 1b + bne asm_do_IRQ + .endm + /* * Invalid mode handlers */ @@ -136,13 +149,7 @@ __irq_svc: add r7, r9, #1 @ increment it str r7, [r8, #TI_PREEMPT] #endif -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - adrne lr, 1b - bne asm_do_IRQ + irq_handler #ifdef CONFIG_PREEMPT ldr r0, [r8, #TI_FLAGS] @ get flags tst r0, #_TIF_NEED_RESCHED @@ -337,13 +344,7 @@ __irq_usr: add r7, r9, #1 @ increment it str r7, [r8, #TI_PREEMPT] #endif -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - adrne lr, 1b - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - bne asm_do_IRQ + irq_handler #ifdef CONFIG_PREEMPT ldr r0, [r8, #TI_PREEMPT] teq r0, r7 -- cgit v1.2.3 From 706fdd9faaad5bd52c774190a54c0fd1dfc0f418 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 21 May 2005 18:15:45 +0100 Subject: [PATCH] ARM SMP: reallocate main IRQ handler code registers By changing r9 -> r8 and r8 to 'tsk' (r9) we are able to remove one instruction from the preempt path. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index b0efbf98d832..8ff82bc75eb1 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -144,20 +144,20 @@ __dabt_svc: __irq_svc: svc_entry irq #ifdef CONFIG_PREEMPT - get_thread_info r8 - ldr r9, [r8, #TI_PREEMPT] @ get preempt count - add r7, r9, #1 @ increment it - str r7, [r8, #TI_PREEMPT] + get_thread_info tsk + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count + add r7, r8, #1 @ increment it + str r7, [tsk, #TI_PREEMPT] #endif irq_handler #ifdef CONFIG_PREEMPT - ldr r0, [r8, #TI_FLAGS] @ get flags + ldr r0, [tsk, #TI_FLAGS] @ get flags tst r0, #_TIF_NEED_RESCHED blne svc_preempt preempt_return: - ldr r0, [r8, #TI_PREEMPT] @ read preempt value + ldr r0, [tsk, #TI_PREEMPT] @ read preempt value + str r8, [tsk, #TI_PREEMPT] @ restore preempt count teq r0, r7 - str r9, [r8, #TI_PREEMPT] @ restore preempt count strne r0, [r0, -r0] @ bug() #endif ldr r0, [sp, #S_PSR] @ irqs are already disabled @@ -168,7 +168,7 @@ preempt_return: #ifdef CONFIG_PREEMPT svc_preempt: - teq r9, #0 @ was preempt count = 0 + teq r8, #0 @ was preempt count = 0 ldreq r6, .LCirq_stat movne pc, lr @ no ldr r0, [r6, #4] @ local_irq_count @@ -176,9 +176,9 @@ svc_preempt: adds r0, r0, r1 movne pc, lr mov r7, #0 @ preempt_schedule_irq - str r7, [r8, #TI_PREEMPT] @ expects preempt_count == 0 + str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0 1: bl preempt_schedule_irq @ irq en/disable is done inside - ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS + ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED beq preempt_return @ go again b 1b @@ -338,21 +338,18 @@ __dabt_usr: __irq_usr: usr_entry irq + get_thread_info tsk #ifdef CONFIG_PREEMPT - get_thread_info r8 - ldr r9, [r8, #TI_PREEMPT] @ get preempt count - add r7, r9, #1 @ increment it - str r7, [r8, #TI_PREEMPT] + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count + add r7, r8, #1 @ increment it + str r7, [tsk, #TI_PREEMPT] #endif irq_handler #ifdef CONFIG_PREEMPT - ldr r0, [r8, #TI_PREEMPT] + ldr r0, [tsk, #TI_PREEMPT] + str r8, [tsk, #TI_PREEMPT] teq r0, r7 - str r9, [r8, #TI_PREEMPT] strne r0, [r0, -r0] - mov tsk, r8 -#else - get_thread_info tsk #endif mov why, #0 b ret_to_user -- cgit v1.2.3 From 791be9b976ba621b21745c30a7fca225fada9110 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 21 May 2005 18:16:44 +0100 Subject: [PATCH] ARM SMP: add IPI support Add support for inter-processor interrupts to the main IRQ handling code. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 8ff82bc75eb1..a99e686c0b83 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -34,6 +34,20 @@ @ adrne lr, 1b bne asm_do_IRQ + +#ifdef CONFIG_SMP + /* + * XXX + * + * this macro assumes that irqstat (r6) and base (r5) are + * preserved from get_irqnr_and_base above + */ + test_for_ipi r0, r6, r5, lr + movne r0, sp + adrne lr, 1b + bne do_IPI +#endif + .endm /* -- cgit v1.2.3 From 49f680ea7bac5c679fb6374a326a164a3fba07cc Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 31 May 2005 18:02:00 +0100 Subject: [PATCH] ARM SMP: convert alignment enable The current vector entry system does not allow for SMP. In order to work around this, we need to eliminate our reliance on the fixed save areas, which breaks the way we enable alignment traps. This patch makes the alignment trap enable code independent of the way we handle the save areas. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 4 +++- arch/arm/kernel/entry-header.S | 7 +++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index a99e686c0b83..cfb5cf5e48fc 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -269,6 +269,8 @@ __pabt_svc: .word __temp_und .LCabt: .word __temp_abt +.LCcralign: + .word cr_alignment #ifdef MULTI_ABORT .LCprocfns: .word processor @@ -311,7 +313,7 @@ __pabt_svc: @ @ Enable the alignment trap while in kernel mode @ - alignment_trap r7, r0, __temp_\sym + alignment_trap r0 @ @ Clear FP to mark the first stack frame diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index a3d40a0e2b04..afef21273963 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -59,11 +59,10 @@ mov \rd, \rd, lsl #13 .endm - .macro alignment_trap, rbase, rtemp, sym + .macro alignment_trap, rtemp #ifdef CONFIG_ALIGNMENT_TRAP -#define OFF_CR_ALIGNMENT(x) cr_alignment - x - - ldr \rtemp, [\rbase, #OFF_CR_ALIGNMENT(\sym)] + ldr \rtemp, .LCcralign + ldr \rtemp, [\rtemp] mcr p15, 0, \rtemp, c1, c0 #endif .endm -- cgit v1.2.3 From ccea7a19e54349d4f40778304e1bb88da83d39e7 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 31 May 2005 22:22:32 +0100 Subject: [PATCH] ARM SMP: Fix vector entry The current vector entry system does not allow for SMP. In order to work around this, we need to eliminate our reliance on the fixed save areas, which breaks the way we enable alignment traps. This patch changes the way we handle the save areas such that we can have one per CPU. Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 172 ++++++++++++++++++++++--------------------- arch/arm/kernel/setup.c | 52 ++++++++++++- 2 files changed, 139 insertions(+), 85 deletions(-) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index cfb5cf5e48fc..78cf84cdc2ae 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -53,46 +53,62 @@ /* * Invalid mode handlers */ - .macro inv_entry, sym, reason - sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - lr} @ Save XXX r0 - lr - ldr r4, .LC\sym + .macro inv_entry, reason + sub sp, sp, #S_FRAME_SIZE + stmib sp, {r1 - lr} mov r1, #\reason .endm __pabt_invalid: - inv_entry abt, BAD_PREFETCH - b 1f + inv_entry BAD_PREFETCH + b common_invalid __dabt_invalid: - inv_entry abt, BAD_DATA - b 1f + inv_entry BAD_DATA + b common_invalid __irq_invalid: - inv_entry irq, BAD_IRQ - b 1f + inv_entry BAD_IRQ + b common_invalid __und_invalid: - inv_entry und, BAD_UNDEFINSTR + inv_entry BAD_UNDEFINSTR + + @ + @ XXX fall through to common_invalid + @ + +@ +@ common_invalid - generic code for failed exception (re-entrant version of handlers) +@ +common_invalid: + zero_fp + + ldmia r0, {r4 - r6} + add r0, sp, #S_PC @ here for interlock avoidance + mov r7, #-1 @ "" "" "" "" + str r4, [sp] @ save preserved r0 + stmia r0, {r5 - r7} @ lr_, + @ cpsr_, "old_r0" -1: zero_fp - ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0 - add r4, sp, #S_PC - stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0 mov r0, sp - and r2, r6, #31 @ int mode + and r2, r6, #0x1f b bad_mode /* * SVC mode handlers */ - .macro svc_entry, sym + .macro svc_entry sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r2, .LC\sym - add r0, sp, #S_FRAME_SIZE - ldmia r2, {r2 - r4} @ get pc, cpsr - add r5, sp, #S_SP + stmib sp, {r1 - r12} + + ldmia r0, {r1 - r3} + add r5, sp, #S_SP @ here for interlock avoidance + mov r4, #-1 @ "" "" "" "" + add r0, sp, #S_FRAME_SIZE @ "" "" "" "" + str r1, [sp] @ save the "real" r0 copied + @ from the exception stack + mov r1, lr @ @@ -109,7 +125,7 @@ __und_invalid: .align 5 __dabt_svc: - svc_entry abt + svc_entry @ @ get ready to re-enable interrupts if appropriate @@ -156,13 +172,15 @@ __dabt_svc: .align 5 __irq_svc: - svc_entry irq + svc_entry + #ifdef CONFIG_PREEMPT get_thread_info tsk ldr r8, [tsk, #TI_PREEMPT] @ get preempt count add r7, r8, #1 @ increment it str r7, [tsk, #TI_PREEMPT] #endif + irq_handler #ifdef CONFIG_PREEMPT ldr r0, [tsk, #TI_FLAGS] @ get flags @@ -200,7 +218,7 @@ svc_preempt: .align 5 __und_svc: - svc_entry und + svc_entry @ @ call emulation code, which returns using r9 if it has emulated @@ -230,7 +248,7 @@ __und_svc: .align 5 __pabt_svc: - svc_entry abt + svc_entry @ @ re-enable interrupts if appropriate @@ -263,12 +281,6 @@ __pabt_svc: ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr .align 5 -.LCirq: - .word __temp_irq -.LCund: - .word __temp_und -.LCabt: - .word __temp_abt .LCcralign: .word cr_alignment #ifdef MULTI_ABORT @@ -285,12 +297,16 @@ __pabt_svc: /* * User mode handlers */ - .macro usr_entry, sym - sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r7, .LC\sym - add r5, sp, #S_PC - ldmia r7, {r2 - r4} @ Get USR pc, cpsr + .macro usr_entry + sub sp, sp, #S_FRAME_SIZE + stmib sp, {r1 - r12} + + ldmia r0, {r1 - r3} + add r0, sp, #S_PC @ here for interlock avoidance + mov r4, #-1 @ "" "" "" "" + + str r1, [sp] @ save the "real" r0 copied + @ from the exception stack #if __LINUX_ARM_ARCH__ < 6 @ make sure our user space atomic helper is aborted @@ -307,8 +323,8 @@ __pabt_svc: @ @ Also, separately save sp_usr and lr_usr @ - stmia r5, {r2 - r4} - stmdb r5, {sp, lr}^ + stmia r0, {r2 - r4} + stmdb r0, {sp, lr}^ @ @ Enable the alignment trap while in kernel mode @@ -323,7 +339,7 @@ __pabt_svc: .align 5 __dabt_usr: - usr_entry abt + usr_entry @ @ Call the processor-specific abort handler: @@ -352,7 +368,7 @@ __dabt_usr: .align 5 __irq_usr: - usr_entry irq + usr_entry get_thread_info tsk #ifdef CONFIG_PREEMPT @@ -360,6 +376,7 @@ __irq_usr: add r7, r8, #1 @ increment it str r7, [tsk, #TI_PREEMPT] #endif + irq_handler #ifdef CONFIG_PREEMPT ldr r0, [tsk, #TI_PREEMPT] @@ -367,6 +384,7 @@ __irq_usr: teq r0, r7 strne r0, [r0, -r0] #endif + mov why, #0 b ret_to_user @@ -374,7 +392,7 @@ __irq_usr: .align 5 __und_usr: - usr_entry und + usr_entry tst r3, #PSR_T_BIT @ Thumb mode? bne fpundefinstr @ ignore FP @@ -490,7 +508,7 @@ fpundefinstr: .align 5 __pabt_usr: - usr_entry abt + usr_entry enable_irq @ Enable interrupts mov r0, r2 @ address (pc) @@ -749,29 +767,41 @@ __kuser_helper_end: * * Common stub entry macro: * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC + * + * SP points to a minimal amount of processor-private memory, the address + * of which is copied into r0 for the mode specific abort handler. */ - .macro vector_stub, name, sym, correction=0 + .macro vector_stub, name, correction=0 .align 5 vector_\name: - ldr r13, .LCs\sym .if \correction sub lr, lr, #\correction .endif - str lr, [r13] @ save lr_IRQ + + @ + @ Save r0, lr_ (parent PC) and spsr_ + @ (parent CPSR) + @ + stmia sp, {r0, lr} @ save r0, lr mrs lr, spsr - str lr, [r13, #4] @ save spsr_IRQ + str lr, [sp, #8] @ save spsr + @ - @ now branch to the relevant MODE handling routine + @ Prepare for SVC32 mode. IRQs remain disabled. @ - mrs r13, cpsr - bic r13, r13, #MODE_MASK - orr r13, r13, #SVC_MODE - msr spsr_cxsf, r13 @ switch to SVC_32 mode + mrs r0, cpsr + bic r0, r0, #MODE_MASK + orr r0, r0, #SVC_MODE + msr spsr_cxsf, r0 - and lr, lr, #15 + @ + @ the branch table must immediately follow this code + @ + mov r0, sp + and lr, lr, #0x0f ldr lr, [pc, lr, lsl #2] - movs pc, lr @ Changes mode and branches + movs pc, lr @ branch to handler in SVC mode .endm .globl __stubs_start @@ -779,7 +809,7 @@ __stubs_start: /* * Interrupt dispatcher */ - vector_stub irq, irq, 4 + vector_stub irq, 4 .long __irq_usr @ 0 (USR_26 / USR_32) .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) @@ -802,7 +832,7 @@ __stubs_start: * Data abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ - vector_stub dabt, abt, 8 + vector_stub dabt, 8 .long __dabt_usr @ 0 (USR_26 / USR_32) .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) @@ -825,7 +855,7 @@ __stubs_start: * Prefetch abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ - vector_stub pabt, abt, 4 + vector_stub pabt, 4 .long __pabt_usr @ 0 (USR_26 / USR_32) .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) @@ -848,7 +878,7 @@ __stubs_start: * Undef instr entry dispatcher * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */ - vector_stub und, und + vector_stub und .long __und_usr @ 0 (USR_26 / USR_32) .long __und_invalid @ 1 (FIQ_26 / FIQ_32) @@ -902,13 +932,6 @@ vector_addrexcptn: .LCvswi: .word vector_swi -.LCsirq: - .word __temp_irq -.LCsund: - .word __temp_und -.LCsabt: - .word __temp_abt - .globl __stubs_end __stubs_end: @@ -930,23 +953,6 @@ __vectors_end: .data -/* - * Do not reorder these, and do not insert extra data between... - */ - -__temp_irq: - .word 0 @ saved lr_irq - .word 0 @ saved spsr_irq - .word -1 @ old_r0 -__temp_und: - .word 0 @ Saved lr_und - .word 0 @ Saved spsr_und - .word -1 @ old_r0 -__temp_abt: - .word 0 @ Saved lr_abt - .word 0 @ Saved spsr_abt - .word -1 @ old_r0 - .globl cr_alignment .globl cr_no_alignment cr_alignment: diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index c2a7da3ac0f1..7ecdda3f1253 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -92,6 +92,14 @@ struct cpu_user_fns cpu_user; struct cpu_cache_fns cpu_cache; #endif +struct stack { + u32 irq[3]; + u32 abt[3]; + u32 und[3]; +} ____cacheline_aligned; + +static struct stack stacks[NR_CPUS]; + char elf_platform[ELF_PLATFORM_SIZE]; EXPORT_SYMBOL(elf_platform); @@ -307,8 +315,6 @@ static void __init setup_processor(void) cpu_name, processor_id, (int)processor_id & 15, proc_arch[cpu_architecture()]); - dump_cpu_info(smp_processor_id()); - sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS); sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; @@ -316,6 +322,46 @@ static void __init setup_processor(void) cpu_proc_init(); } +/* + * cpu_init - initialise one CPU. + * + * cpu_init dumps the cache information, initialises SMP specific + * information, and sets up the per-CPU stacks. + */ +void __init cpu_init(void) +{ + unsigned int cpu = smp_processor_id(); + struct stack *stk = &stacks[cpu]; + + if (cpu >= NR_CPUS) { + printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); + BUG(); + } + + dump_cpu_info(cpu); + + /* + * setup stacks for re-entrant exception handlers + */ + __asm__ ( + "msr cpsr_c, %1\n\t" + "add sp, %0, %2\n\t" + "msr cpsr_c, %3\n\t" + "add sp, %0, %4\n\t" + "msr cpsr_c, %5\n\t" + "add sp, %0, %6\n\t" + "msr cpsr_c, %7" + : + : "r" (stk), + "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), + "I" (offsetof(struct stack, irq[0])), + "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), + "I" (offsetof(struct stack, abt[0])), + "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), + "I" (offsetof(struct stack, und[0])), + "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)); +} + static struct machine_desc * __init setup_machine(unsigned int nr) { struct machine_desc *list; @@ -715,6 +761,8 @@ void __init setup_arch(char **cmdline_p) paging_init(&meminfo, mdesc); request_standard_resources(&meminfo, mdesc); + cpu_init(); + /* * Set up various architecture-specific pointers */ -- cgit v1.2.3