aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmerigo Wang2012-08-10 01:24:38 +0000
committerDavid S. Miller2012-08-14 14:33:30 -0700
commit38e6bc185d9544dfad1774b3f8902a0b061aea25 (patch)
tree5dc41a28f9dc48095c998dfc0dcf8b970db6e1b5
parent47be03a28cc6c80e3aa2b3e8ed6d960ff0c5c0af (diff)
netpoll: make __netpoll_cleanup non-block
Like the previous patch, slave_disable_netpoll() and __netpoll_cleanup() may be called with read_lock() held too, so we should make them non-block, by moving the cleanup and kfree() to call_rcu_bh() callbacks. Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Cong Wang <amwang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--include/linux/netpoll.h3
-rw-r--r--net/8021q/vlan_dev.c6
-rw-r--r--net/bridge/br_device.c6
-rw-r--r--net/core/netpoll.c42
5 files changed, 38 insertions, 23 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8697136e27c0..e42891683e3b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1257,9 +1257,7 @@ static inline void slave_disable_netpoll(struct slave *slave)
return;
slave->np = NULL;
- synchronize_rcu_bh();
- __netpoll_cleanup(np);
- kfree(np);
+ __netpoll_free_rcu(np);
}
static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
{
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index bf2d51eec0f3..907812efb4d9 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -23,6 +23,7 @@ struct netpoll {
u8 remote_mac[ETH_ALEN];
struct list_head rx; /* rx_np list element */
+ struct rcu_head rcu;
};
struct netpoll_info {
@@ -38,6 +39,7 @@ struct netpoll_info {
struct delayed_work tx_work;
struct netpoll *netpoll;
+ struct rcu_head rcu;
};
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
@@ -48,6 +50,7 @@ int netpoll_setup(struct netpoll *np);
int netpoll_trap(void);
void netpoll_set_trap(int trap);
void __netpoll_cleanup(struct netpoll *np);
+void __netpoll_free_rcu(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb);
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ee4ae0944cef..b65623f90660 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -704,11 +704,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
info->netpoll = NULL;
- /* Wait for transmitting packets to finish before freeing. */
- synchronize_rcu_bh();
-
- __netpoll_cleanup(netpoll);
- kfree(netpoll);
+ __netpoll_free_rcu(netpoll);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ed0e0f9dc788..f41ba4048c9a 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -267,11 +267,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
p->np = NULL;
- /* Wait for transmitting packets to finish before freeing. */
- synchronize_rcu_bh();
-
- __netpoll_cleanup(np);
- kfree(np);
+ __netpoll_free_rcu(np);
}
#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 37cc854774a4..dc17f1db1479 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -878,6 +878,24 @@ static int __init netpoll_init(void)
}
core_initcall(netpoll_init);
+static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
+{
+ struct netpoll_info *npinfo =
+ container_of(rcu_head, struct netpoll_info, rcu);
+
+ skb_queue_purge(&npinfo->arp_tx);
+ skb_queue_purge(&npinfo->txq);
+
+ /* we can't call cancel_delayed_work_sync here, as we are in softirq */
+ cancel_delayed_work(&npinfo->tx_work);
+
+ /* clean after last, unfinished work */
+ __skb_queue_purge(&npinfo->txq);
+ /* now cancel it again */
+ cancel_delayed_work(&npinfo->tx_work);
+ kfree(npinfo);
+}
+
void __netpoll_cleanup(struct netpoll *np)
{
struct netpoll_info *npinfo;
@@ -903,20 +921,24 @@ void __netpoll_cleanup(struct netpoll *np)
ops->ndo_netpoll_cleanup(np->dev);
RCU_INIT_POINTER(np->dev->npinfo, NULL);
+ call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
+ }
+}
+EXPORT_SYMBOL_GPL(__netpoll_cleanup);
- /* avoid racing with NAPI reading npinfo */
- synchronize_rcu_bh();
+static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
+{
+ struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
- skb_queue_purge(&npinfo->arp_tx);
- skb_queue_purge(&npinfo->txq);
- cancel_delayed_work_sync(&npinfo->tx_work);
+ __netpoll_cleanup(np);
+ kfree(np);
+}
- /* clean after last, unfinished work */
- __skb_queue_purge(&npinfo->txq);
- kfree(npinfo);
- }
+void __netpoll_free_rcu(struct netpoll *np)
+{
+ call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
}
-EXPORT_SYMBOL_GPL(__netpoll_cleanup);
+EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
void netpoll_cleanup(struct netpoll *np)
{