aboutsummaryrefslogtreecommitdiff
path: root/net/bridge/br_forward.c
diff options
context:
space:
mode:
authorLinus Torvalds2005-04-16 15:20:36 -0700
committerLinus Torvalds2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /net/bridge/br_forward.c
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'net/bridge/br_forward.c')
-rw-r--r--net/bridge/br_forward.c159
1 files changed, 159 insertions, 0 deletions
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
new file mode 100644
index 000000000000..ef9f2095f96e
--- /dev/null
+++ b/net/bridge/br_forward.c
@@ -0,0 +1,159 @@
+/*
+ * Forwarding decision
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ *
+ * $Id: br_forward.c,v 1.4 2001/08/14 22:05:57 davem Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_bridge.h>
+#include "br_private.h"
+
+static inline int should_deliver(const struct net_bridge_port *p,
+ const struct sk_buff *skb)
+{
+ if (skb->dev == p->dev ||
+ p->state != BR_STATE_FORWARDING)
+ return 0;
+
+ return 1;
+}
+
+int br_dev_queue_push_xmit(struct sk_buff *skb)
+{
+ if (skb->len > skb->dev->mtu)
+ kfree_skb(skb);
+ else {
+#ifdef CONFIG_BRIDGE_NETFILTER
+ /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
+ nf_bridge_maybe_copy_header(skb);
+#endif
+ skb_push(skb, ETH_HLEN);
+
+ dev_queue_xmit(skb);
+ }
+
+ return 0;
+}
+
+int br_forward_finish(struct sk_buff *skb)
+{
+ NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
+ br_dev_queue_push_xmit);
+
+ return 0;
+}
+
+static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
+{
+ skb->dev = to->dev;
+#ifdef CONFIG_NETFILTER_DEBUG
+ skb->nf_debug = 0;
+#endif
+ NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ br_forward_finish);
+}
+
+static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
+{
+ struct net_device *indev;
+
+ indev = skb->dev;
+ skb->dev = to->dev;
+ skb->ip_summed = CHECKSUM_NONE;
+
+ NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
+ br_forward_finish);
+}
+
+/* called with rcu_read_lock */
+void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
+{
+ if (should_deliver(to, skb)) {
+ __br_deliver(to, skb);
+ return;
+ }
+
+ kfree_skb(skb);
+}
+
+/* called with rcu_read_lock */
+void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
+{
+ if (should_deliver(to, skb)) {
+ __br_forward(to, skb);
+ return;
+ }
+
+ kfree_skb(skb);
+}
+
+/* called under bridge lock */
+static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone,
+ void (*__packet_hook)(const struct net_bridge_port *p,
+ struct sk_buff *skb))
+{
+ struct net_bridge_port *p;
+ struct net_bridge_port *prev;
+
+ if (clone) {
+ struct sk_buff *skb2;
+
+ if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
+ br->statistics.tx_dropped++;
+ return;
+ }
+
+ skb = skb2;
+ }
+
+ prev = NULL;
+
+ list_for_each_entry_rcu(p, &br->port_list, list) {
+ if (should_deliver(p, skb)) {
+ if (prev != NULL) {
+ struct sk_buff *skb2;
+
+ if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
+ br->statistics.tx_dropped++;
+ kfree_skb(skb);
+ return;
+ }
+
+ __packet_hook(prev, skb2);
+ }
+
+ prev = p;
+ }
+ }
+
+ if (prev != NULL) {
+ __packet_hook(prev, skb);
+ return;
+ }
+
+ kfree_skb(skb);
+}
+
+
+/* called with rcu_read_lock */
+void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, int clone)
+{
+ br_flood(br, skb, clone, __br_deliver);
+}
+
+/* called under bridge lock */
+void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, int clone)
+{
+ br_flood(br, skb, clone, __br_forward);
+}