aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Buesch2007-09-28 16:19:03 +0200
committerDavid S. Miller2007-10-10 16:54:13 -0700
commit05b64b364822863974c0121359b01d7ba0f22205 (patch)
tree9f11ab39989780f93bdd953d798e070ada6ed069
parent42bb4cd5ae320dd46630533fecb91b940d4468e2 (diff)
[B43]: Rewrite pwork locking policy.
Implement much easier and more lightweight locking for the periodic work. This also removes the last big busywait loop and replaces it by a sleeping loop. Signed-off-by: Michael Buesch <mb@bu3sch.de> Signed-off-by: John W. Linville <linville@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/wireless/b43/main.c88
1 files changed, 32 insertions, 56 deletions
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index a603a154d496..6c80f2e2f4ee 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1976,6 +1976,7 @@ void b43_mac_enable(struct b43_wldev *dev)
{
dev->mac_suspended--;
B43_WARN_ON(dev->mac_suspended < 0);
+ B43_WARN_ON(irqs_disabled());
if (dev->mac_suspended == 0) {
b43_write32(dev, B43_MMIO_MACCTL,
b43_read32(dev, B43_MMIO_MACCTL)
@@ -1986,6 +1987,11 @@ void b43_mac_enable(struct b43_wldev *dev)
b43_read32(dev, B43_MMIO_MACCTL);
b43_read32(dev, B43_MMIO_GEN_IRQ_REASON);
b43_power_saving_ctl_bits(dev, 0);
+
+ /* Re-enable IRQs. */
+ spin_lock_irq(&dev->wl->irq_lock);
+ b43_interrupt_enable(dev, dev->irq_savedstate);
+ spin_unlock_irq(&dev->wl->irq_lock);
}
}
@@ -1995,23 +2001,34 @@ void b43_mac_suspend(struct b43_wldev *dev)
int i;
u32 tmp;
+ might_sleep();
+ B43_WARN_ON(irqs_disabled());
B43_WARN_ON(dev->mac_suspended < 0);
+
if (dev->mac_suspended == 0) {
+ /* Mask IRQs before suspending MAC. Otherwise
+ * the MAC stays busy and won't suspend. */
+ spin_lock_irq(&dev->wl->irq_lock);
+ tmp = b43_interrupt_disable(dev, B43_IRQ_ALL);
+ spin_unlock_irq(&dev->wl->irq_lock);
+ b43_synchronize_irq(dev);
+ dev->irq_savedstate = tmp;
+
b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
b43_write32(dev, B43_MMIO_MACCTL,
b43_read32(dev, B43_MMIO_MACCTL)
& ~B43_MACCTL_ENABLED);
/* force pci to flush the write */
b43_read32(dev, B43_MMIO_MACCTL);
- for (i = 10000; i; i--) {
+ for (i = 40; i; i--) {
tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON);
if (tmp & B43_IRQ_MAC_SUSPENDED)
goto out;
- udelay(1);
+ msleep(1);
}
b43err(dev->wl, "MAC suspend failed\n");
}
- out:
+out:
dev->mac_suspended++;
}
@@ -2349,77 +2366,36 @@ static void do_periodic_work(struct b43_wldev *dev)
b43_periodic_every15sec(dev);
}
-/* Estimate a "Badness" value based on the periodic work
- * state-machine state. "Badness" is worse (bigger), if the
- * periodic work will take longer.
+/* Periodic work locking policy:
+ * The whole periodic work handler is protected by
+ * wl->mutex. If another lock is needed somewhere in the
+ * pwork callchain, it's aquired in-place, where it's needed.
*/
-static int estimate_periodic_work_badness(unsigned int state)
-{
- int badness = 0;
-
- if (state % 8 == 0) /* every 120 sec */
- badness += 10;
- if (state % 4 == 0) /* every 60 sec */
- badness += 5;
- if (state % 2 == 0) /* every 30 sec */
- badness += 1;
-
-#define BADNESS_LIMIT 4
- return badness;
-}
-
static void b43_periodic_work_handler(struct work_struct *work)
{
- struct b43_wldev *dev =
- container_of(work, struct b43_wldev, periodic_work.work);
- unsigned long flags, delay;
- u32 savedirqs = 0;
- int badness;
+ struct b43_wldev *dev = container_of(work, struct b43_wldev,
+ periodic_work.work);
+ struct b43_wl *wl = dev->wl;
+ unsigned long delay;
- mutex_lock(&dev->wl->mutex);
+ mutex_lock(&wl->mutex);
if (unlikely(b43_status(dev) != B43_STAT_STARTED))
goto out;
if (b43_debug(dev, B43_DBG_PWORK_STOP))
goto out_requeue;
- badness = estimate_periodic_work_badness(dev->periodic_state);
- if (badness > BADNESS_LIMIT) {
- spin_lock_irqsave(&dev->wl->irq_lock, flags);
- /* Suspend TX as we don't want to transmit packets while
- * we recalibrate the hardware. */
- b43_tx_suspend(dev);
- savedirqs = b43_interrupt_disable(dev, B43_IRQ_ALL);
- /* Periodic work will take a long time, so we want it to
- * be preemtible and release the spinlock. */
- spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
- b43_synchronize_irq(dev);
-
- do_periodic_work(dev);
-
- spin_lock_irqsave(&dev->wl->irq_lock, flags);
- b43_interrupt_enable(dev, savedirqs);
- b43_tx_resume(dev);
- mmiowb();
- spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
- } else {
- /* Take the global driver lock. This will lock any operation. */
- spin_lock_irqsave(&dev->wl->irq_lock, flags);
-
- do_periodic_work(dev);
+ do_periodic_work(dev);
- mmiowb();
- spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
- }
dev->periodic_state++;
out_requeue:
if (b43_debug(dev, B43_DBG_PWORK_FAST))
delay = msecs_to_jiffies(50);
else
delay = round_jiffies(HZ * 15);
- queue_delayed_work(dev->wl->hw->workqueue, &dev->periodic_work, delay);
+ queue_delayed_work(wl->hw->workqueue, &dev->periodic_work, delay);
out:
- mutex_unlock(&dev->wl->mutex);
+ mutex_unlock(&wl->mutex);
}
static void b43_periodic_tasks_setup(struct b43_wldev *dev)