aboutsummaryrefslogtreecommitdiff
path: root/kernel/irq
diff options
context:
space:
mode:
authorIngo Molnar2006-06-29 02:24:42 -0700
committerLinus Torvalds2006-06-29 10:26:22 -0700
commitcd916d31cc31273eca8a620fae02b7bf7f577559 (patch)
tree1af0c37c1fd92c90a178ed1a7d1d8b5a90a1ddb8 /kernel/irq
parent4a733ee12618cf3ec25cbc337a5e0ba3ad5d7fb6 (diff)
[PATCH] genirq: cleanup: merge pending_irq_cpumask[] into irq_desc[]
Consolidation: remove the pending_irq_cpumask[NR_IRQS] array and move it into the irq_desc[NR_IRQS].pending_mask field. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/irq/migration.c8
2 files changed, 4 insertions, 8 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6a6f1d3dd399..ca9b5d36abe8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -16,10 +16,6 @@
#ifdef CONFIG_SMP
-#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
-cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
-#endif
-
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index a571c3abb793..a57ebe9fa6f6 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -8,7 +8,7 @@ void set_pending_irq(unsigned int irq, cpumask_t mask)
spin_lock_irqsave(&desc->lock, flags);
desc->move_irq = 1;
- pending_irq_cpumask[irq] = mask;
+ irq_desc[irq].pending_mask = mask;
spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -30,7 +30,7 @@ void move_native_irq(int irq)
desc->move_irq = 0;
- if (unlikely(cpus_empty(pending_irq_cpumask[irq])))
+ if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
return;
if (!desc->chip->set_affinity)
@@ -38,7 +38,7 @@ void move_native_irq(int irq)
assert_spin_locked(&desc->lock);
- cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
+ cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
/*
* If there was a valid mask to work with, please
@@ -58,5 +58,5 @@ void move_native_irq(int irq)
if (likely(!(desc->status & IRQ_DISABLED)))
desc->chip->enable(irq);
}
- cpus_clear(pending_irq_cpumask[irq]);
+ cpus_clear(irq_desc[irq].pending_mask);
}