diff options
author | Michael Ellerman | 2009-04-28 01:57:43 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt | 2009-05-21 15:43:58 +1000 |
commit | f2694ba56808a3a0fa45d9bb45289575f31e48d2 (patch) | |
tree | a5b24b590f83c42e7c224f69d0f43867caa743bb /arch/powerpc/kernel/irq.c | |
parent | fb94fc2b89ea0422950cb1220f275622246bd66d (diff) |
powerpc/irq: Move #ifdef'ed body of do_IRQ() into a separate function
Rather than a giant ifdef in the body of do_IRQ(), including a
dangling else, move the irq stack logic into a separate routine and
do the ifdef there.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 96 |
1 files changed, 56 insertions, 40 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 8c1a4966867e..3d3658d0b7b9 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -248,13 +248,63 @@ void fixup_irqs(cpumask_t map) } #endif +#ifdef CONFIG_IRQSTACKS +static inline void handle_one_irq(unsigned int irq) +{ + struct thread_info *curtp, *irqtp; + unsigned long saved_sp_limit; + struct irq_desc *desc; + void *handler; + + /* Switch to the irq stack to handle this */ + curtp = current_thread_info(); + irqtp = hardirq_ctx[smp_processor_id()]; + + if (curtp == irqtp) { + /* We're already on the irq stack, just handle it */ + generic_handle_irq(irq); + return; + } + + desc = irq_desc + irq; + saved_sp_limit = current->thread.ksp_limit; + + handler = desc->handle_irq; + if (handler == NULL) + handler = &__do_IRQ; + + irqtp->task = curtp->task; + irqtp->flags = 0; + + /* Copy the softirq bits in preempt_count so that the + * softirq checks work in the hardirq context. */ + irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | + (curtp->preempt_count & SOFTIRQ_MASK); + + current->thread.ksp_limit = (unsigned long)irqtp + + _ALIGN_UP(sizeof(struct thread_info), 16); + + call_handle_irq(irq, desc, irqtp, handler); + current->thread.ksp_limit = saved_sp_limit; + irqtp->task = NULL; + + /* Set any flag that may have been set on the + * alternate stack + */ + if (irqtp->flags) + set_bits(irqtp->flags, &curtp->flags); +} +#else +static inline void handle_one_irq(unsigned int irq) +{ + generic_handle_irq(irq); +} +#endif + void do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned int irq; -#ifdef CONFIG_IRQSTACKS - struct thread_info *curtp, *irqtp; -#endif irq_enter(); @@ -282,43 +332,9 @@ void do_IRQ(struct pt_regs *regs) */ irq = ppc_md.get_irq(); - if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { -#ifdef CONFIG_IRQSTACKS - /* Switch to the irq stack to handle this */ - curtp = current_thread_info(); - irqtp = hardirq_ctx[smp_processor_id()]; - if (curtp != irqtp) { - struct irq_desc *desc = irq_desc + irq; - void *handler = desc->handle_irq; - unsigned long saved_sp_limit = current->thread.ksp_limit; - if (handler == NULL) - handler = &__do_IRQ; - irqtp->task = curtp->task; - irqtp->flags = 0; - - /* Copy the softirq bits in preempt_count so that the - * softirq checks work in the hardirq context. - */ - irqtp->preempt_count = - (irqtp->preempt_count & ~SOFTIRQ_MASK) | - (curtp->preempt_count & SOFTIRQ_MASK); - - current->thread.ksp_limit = (unsigned long)irqtp + - _ALIGN_UP(sizeof(struct thread_info), 16); - call_handle_irq(irq, desc, irqtp, handler); - current->thread.ksp_limit = saved_sp_limit; - irqtp->task = NULL; - - - /* Set any flag that may have been set on the - * alternate stack - */ - if (irqtp->flags) - set_bits(irqtp->flags, &curtp->flags); - } else -#endif - generic_handle_irq(irq); - } else if (irq != NO_IRQ_IGNORE) + if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) + handle_one_irq(irq); + else if (irq != NO_IRQ_IGNORE) /* That's not SMP safe ... but who cares ? */ ppc_spurious_interrupts++; |