aboutsummaryrefslogtreecommitdiff
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c42
1 files changed, 25 insertions, 17 deletions
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index d42422fc4af3..2e73f353f165 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -85,8 +85,8 @@ static int ack_QIC_CPI(__u8 cpi);
static void ack_special_QIC_CPI(__u8 cpi);
static void ack_VIC_CPI(__u8 cpi);
static void send_CPI_allbutself(__u8 cpi);
-static void enable_vic_irq(unsigned int irq);
-static void disable_vic_irq(unsigned int irq);
+static void mask_vic_irq(unsigned int irq);
+static void unmask_vic_irq(unsigned int irq);
static unsigned int startup_vic_irq(unsigned int irq);
static void enable_local_vic_irq(unsigned int irq);
static void disable_local_vic_irq(unsigned int irq);
@@ -205,15 +205,12 @@ ack_CPI(__u8 cpi)
/* The VIC IRQ descriptors -- these look almost identical to the
* 8259 IRQs except that masks and things must be kept per processor
*/
-static struct hw_interrupt_type vic_irq_type = {
- .typename = "VIC-level",
- .startup = startup_vic_irq,
- .shutdown = disable_vic_irq,
- .enable = enable_vic_irq,
- .disable = disable_vic_irq,
- .ack = before_handle_vic_irq,
- .end = after_handle_vic_irq,
- .set_affinity = set_vic_irq_affinity,
+static struct irq_chip vic_chip = {
+ .name = "VIC",
+ .startup = startup_vic_irq,
+ .mask = mask_vic_irq,
+ .unmask = unmask_vic_irq,
+ .set_affinity = set_vic_irq_affinity,
};
/* used to count up as CPUs are brought on line (starts at 0) */
@@ -1397,6 +1394,17 @@ setup_profiling_timer(unsigned int multiplier)
return 0;
}
+/* This is a bit of a mess, but forced on us by the genirq changes
+ * there's no genirq handler that really does what voyager wants
+ * so hack it up with the simple IRQ handler */
+static void fastcall
+handle_vic_irq(unsigned int irq, struct irq_desc *desc)
+{
+ before_handle_vic_irq(irq);
+ handle_simple_irq(irq, desc);
+ after_handle_vic_irq(irq);
+}
+
/* The CPIs are handled in the per cpu 8259s, so they must be
* enabled to be received: FIX: enabling the CPIs in the early
@@ -1433,7 +1441,7 @@ smp_intr_init(void)
* This is for later: first 16 correspond to PC IRQs; next 16
* are Primary MC IRQs and final 16 are Secondary MC IRQs */
for(i = 0; i < 48; i++)
- irq_desc[i].chip = &vic_irq_type;
+ set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
}
/* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
@@ -1531,7 +1539,7 @@ ack_VIC_CPI(__u8 cpi)
static unsigned int
startup_vic_irq(unsigned int irq)
{
- enable_vic_irq(irq);
+ unmask_vic_irq(irq);
return 0;
}
@@ -1558,7 +1566,7 @@ startup_vic_irq(unsigned int irq)
* adjust their masks accordingly. */
static void
-enable_vic_irq(unsigned int irq)
+unmask_vic_irq(unsigned int irq)
{
/* linux doesn't to processor-irq affinity, so enable on
* all CPUs we know about */
@@ -1567,7 +1575,7 @@ enable_vic_irq(unsigned int irq)
__u32 processorList = 0;
unsigned long flags;
- VDEBUG(("VOYAGER: enable_vic_irq(%d) CPU%d affinity 0x%lx\n",
+ VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
irq, cpu, cpu_irq_affinity[cpu]));
spin_lock_irqsave(&vic_irq_lock, flags);
for_each_online_cpu(real_cpu) {
@@ -1591,7 +1599,7 @@ enable_vic_irq(unsigned int irq)
}
static void
-disable_vic_irq(unsigned int irq)
+mask_vic_irq(unsigned int irq)
{
/* lazy disable, do nothing */
}
@@ -1819,7 +1827,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
* disabled again as it comes in (voyager lazy disable). If
* the affinity map is tightened to disable the interrupt on a
* cpu, it will be pushed off when it comes in */
- enable_vic_irq(irq);
+ unmask_vic_irq(irq);
}
static void