diff options
author | David S. Miller | 2008-07-16 03:22:39 -0700 |
---|---|---|
committer | David S. Miller | 2008-07-17 19:21:29 -0700 |
commit | c7e4f3bbb4ba4e48ab3b529d5016e454cee1ccd6 (patch) | |
tree | 76975288fd9448ee522867e3681978804431e736 /net | |
parent | 78a5b30b7324b2d66bcf7d2e3935877d3c26497c (diff) |
pkt_sched: Kill qdisc_lock_tree and qdisc_unlock_tree.
No longer used.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/sch_generic.c | 36 |
1 files changed, 3 insertions, 33 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 8cdf0b4a6a5a..3d53e92ad9c8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -29,44 +29,14 @@ /* Main transmission queue. */ /* Modifications to data participating in scheduling must be protected with - * queue->lock spinlock. + * qdisc_root_lock(qdisc) spinlock. * * The idea is the following: - * - enqueue, dequeue are serialized via top level device - * spinlock queue->lock. - * - ingress filtering is serialized via top level device - * spinlock dev->rx_queue.lock. + * - enqueue, dequeue are serialized via qdisc root lock + * - ingress filtering is also serialized via qdisc root lock * - updates to tree and tree walking are only done under the rtnl mutex. */ -void qdisc_lock_tree(struct net_device *dev) - __acquires(dev->rx_queue.lock) -{ - unsigned int i; - - local_bh_disable(); - for (i = 0; i < dev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - spin_lock(&txq->lock); - } - spin_lock(&dev->rx_queue.lock); -} -EXPORT_SYMBOL(qdisc_lock_tree); - -void qdisc_unlock_tree(struct net_device *dev) - __releases(dev->rx_queue.lock) -{ - unsigned int i; - - spin_unlock(&dev->rx_queue.lock); - for (i = 0; i < dev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - spin_unlock(&txq->lock); - } - local_bh_enable(); -} -EXPORT_SYMBOL(qdisc_unlock_tree); - static inline int qdisc_qlen(struct Qdisc *q) { return q->q.qlen; |