aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds2020-04-07 12:03:32 -0700
committerLinus Torvalds2020-04-07 12:03:32 -0700
commit479a72c0c6d7c24aa8b8a0a467672b6a6bac5947 (patch)
treeadd17fd97a3c51b06fd5a2e902e986f9db366ad3 /net
parent07d6f6dcc094dc732f9ac6c9b8e3553c692a0847 (diff)
parentaa81700cf2326e288c9ca1fe7b544039617f1fc2 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) Slave bond and team devices should not be assigned ipv6 link local addresses, from Jarod Wilson. 2) Fix clock sink config on some at803x PHY devices, from Oleksij Rempel. 3) Uninitialized stack space transmitted in slcan frames, fix from Richard Palethorpe. 4) Guard HW VLAN ops properly in stmmac driver, from Jose Abreu. 5) "=" --> "|=" fix in aquantia driver, from Colin Ian King. 6) Fix TCP fallback in mptcp, from Florian Westphal. (accessing a plain tcp_sk as if it were an mptcp socket). 7) Fix cavium driver in some configurations wrt. PTP, from Yue Haibing. 8) Make ipv6 and ipv4 consistent in the lower bound allowed for neighbour entry retrans_time, from Hangbin Liu. 9) Don't use private workqueue in pegasus usb driver, from Petko Manolov. 10) Fix integer overflow in mlxsw, from Colin Ian King. 11) Missing refcnt init in cls_tcindex, from Cong Wang. 12) One too many loop iterations when processing cmpri entries in ipv6 rpl code, from Alexander Aring. 13) Disable SG and TSO by default in r8169, from Heiner Kallweit. 14) NULL deref in macsec, from Davide Caratti. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (42 commits) macsec: fix NULL dereference in macsec_upd_offload() skbuff.h: Improve the checksum related comments net: dsa: bcm_sf2: Ensure correct sub-node is parsed qed: remove redundant assignment to variable 'rc' wimax: remove some redundant assignments to variable result mlxsw: spectrum_flower: Do not stop at FLOW_ACTION_VLAN_MANGLE mlxsw: spectrum_flower: Do not stop at FLOW_ACTION_PRIORITY r8169: change back SG and TSO to be disabled by default net: dsa: bcm_sf2: Do not register slave MDIO bus with OF ipv6: rpl: fix loop iteration tun: Don't put_page() for all negative return values from XDP program net: dsa: mt7530: fix null pointer dereferencing in port5 setup mptcp: add some missing pr_fmt defines net: phy: micrel: kszphy_resume(): add delay after genphy_resume() before accessing PHY registers net_sched: fix a missing refcnt in tcindex_init() net: stmmac: dwmac1000: fix out-of-bounds mac address reg setting mlxsw: spectrum_trap: fix unintention integer overflow on left shift pegasus: Remove pegasus' own workqueue neigh: support smaller retrans_time settting net: openvswitch: use hlist_for_each_entry_rcu instead of hlist_for_each_entry ...
Diffstat (limited to 'net')
-rw-r--r--net/core/neighbour.c10
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/ipv6/addrconf.c11
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/rpl.c6
-rw-r--r--net/ipv6/rpl_iptunnel.c2
-rw-r--r--net/mptcp/options.c2
-rw-r--r--net/mptcp/pm.c2
-rw-r--r--net/mptcp/pm_netlink.c2
-rw-r--r--net/mptcp/protocol.c109
-rw-r--r--net/mptcp/protocol.h2
-rw-r--r--net/mptcp/subflow.c3
-rw-r--r--net/mptcp/token.c9
-rw-r--r--net/openvswitch/flow_table.c10
-rw-r--r--net/sched/cls_tcindex.c45
16 files changed, 186 insertions, 35 deletions
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 5bf8d22a47ec..39d37d0ef575 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1065,11 +1065,12 @@ static void neigh_timer_handler(struct timer_list *t)
neigh->updated = jiffies;
atomic_set(&neigh->probes, 0);
notify = 1;
- next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
+ next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
+ HZ/100);
}
} else {
/* NUD_PROBE|NUD_INCOMPLETE */
- next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
+ next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
}
if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
@@ -1125,7 +1126,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
neigh->nud_state = NUD_INCOMPLETE;
neigh->updated = now;
next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
- HZ/2);
+ HZ/100);
neigh_add_timer(neigh, next);
immediate_probe = true;
} else {
@@ -1427,7 +1428,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
neigh->nud_state = NUD_INCOMPLETE;
atomic_set(&neigh->probes, neigh_max_probes(neigh));
neigh_add_timer(neigh,
- jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
+ jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
+ HZ/100));
}
EXPORT_SYMBOL(__neigh_set_probe_once);
diff --git a/net/core/sock.c b/net/core/sock.c
index da32d9b6d09f..ce1d8dce9b7a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -574,7 +574,7 @@ static int sock_setbindtodevice_locked(struct sock *sk, int ifindex)
/* Sorry... */
ret = -EPERM;
- if (!ns_capable(net->user_ns, CAP_NET_RAW))
+ if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
goto out;
ret = -EINVAL;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 5390ff541658..e94eb1aac602 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1338,7 +1338,7 @@ static void dsa_hw_port_list_free(struct list_head *hw_port_list)
}
/* Make the hardware datapath to/from @dev limited to a common MTU */
-void dsa_bridge_mtu_normalization(struct dsa_port *dp)
+static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
{
struct list_head hw_port_list;
struct dsa_switch_tree *dst;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a11fd4d67832..24e319dfb510 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1357,7 +1357,7 @@ retry:
regen_advance = idev->cnf.regen_max_retry *
idev->cnf.dad_transmits *
- NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
+ max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
/* recalculate max_desync_factor each time and update
* idev->desync_factor if it's larger
@@ -3298,6 +3298,10 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
if (netif_is_l3_master(idev->dev))
return;
+ /* no link local addresses on devices flagged as slaves */
+ if (idev->dev->flags & IFF_SLAVE)
+ return;
+
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
switch (idev->cnf.addr_gen_mode) {
@@ -4117,7 +4121,8 @@ static void addrconf_dad_work(struct work_struct *w)
ifp->dad_probes--;
addrconf_mod_dad_work(ifp,
- NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
+ max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
+ HZ/100));
spin_unlock(&ifp->lock);
write_unlock_bh(&idev->lock);
@@ -4523,7 +4528,7 @@ restart:
!(ifp->flags&IFA_F_TENTATIVE)) {
unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
ifp->idev->cnf.dad_transmits *
- NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
+ max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
if (age >= ifp->prefered_lft - regen_advance) {
struct inet6_ifaddr *ifpub = ifp->ifpub;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 6ffa153e5166..1ecd4e9b0bdf 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1359,8 +1359,8 @@ skip_defrtr:
if (rtime && rtime/1000 < MAX_SCHEDULE_TIMEOUT/HZ) {
rtime = (rtime*HZ)/1000;
- if (rtime < HZ/10)
- rtime = HZ/10;
+ if (rtime < HZ/100)
+ rtime = HZ/100;
NEIGH_VAR_SET(in6_dev->nd_parms, RETRANS_TIME, rtime);
in6_dev->tstamp = jiffies;
send_ifinfo_notify = true;
diff --git a/net/ipv6/rpl.c b/net/ipv6/rpl.c
index dc4f20e23bf7..d38b476fc7f2 100644
--- a/net/ipv6/rpl.c
+++ b/net/ipv6/rpl.c
@@ -48,7 +48,7 @@ void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
outhdr->cmpri = 0;
outhdr->cmpre = 0;
- for (i = 0; i <= n; i++)
+ for (i = 0; i < n; i++)
ipv6_rpl_addr_decompress(&outhdr->rpl_segaddr[i], daddr,
ipv6_rpl_segdata_pos(inhdr, i),
inhdr->cmpri);
@@ -66,7 +66,7 @@ static unsigned char ipv6_rpl_srh_calc_cmpri(const struct ipv6_rpl_sr_hdr *inhdr
int i;
for (plen = 0; plen < sizeof(*daddr); plen++) {
- for (i = 0; i <= n; i++) {
+ for (i = 0; i < n; i++) {
if (daddr->s6_addr[plen] !=
inhdr->rpl_segaddr[i].s6_addr[plen])
return plen;
@@ -114,7 +114,7 @@ void ipv6_rpl_srh_compress(struct ipv6_rpl_sr_hdr *outhdr,
outhdr->cmpri = cmpri;
outhdr->cmpre = cmpre;
- for (i = 0; i <= n; i++)
+ for (i = 0; i < n; i++)
ipv6_rpl_addr_compress(ipv6_rpl_segdata_pos(outhdr, i),
&inhdr->rpl_segaddr[i], cmpri);
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index a49ddc6cd020..c3ececd7cfc1 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -210,7 +210,7 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
struct dst_entry *orig_dst = skb_dst(skb);
struct dst_entry *dst = NULL;
struct rpl_lwt *rlwt;
- int err = -EINVAL;
+ int err;
rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index bd220ee4aac9..faf57585b892 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -4,6 +4,8 @@
* Copyright (c) 2017 - 2019, Intel Corporation.
*/
+#define pr_fmt(fmt) "MPTCP: " fmt
+
#include <linux/kernel.h>
#include <net/tcp.h>
#include <net/mptcp.h>
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 064639f72487..977d9c8b1453 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -3,6 +3,8 @@
*
* Copyright (c) 2019, Intel Corporation.
*/
+#define pr_fmt(fmt) "MPTCP: " fmt
+
#include <linux/kernel.h>
#include <net/tcp.h>
#include <net/mptcp.h>
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index a0ce7f324499..86d61ab34c7c 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -4,6 +4,8 @@
* Copyright (c) 2020, Red Hat, Inc.
*/
+#define pr_fmt(fmt) "MPTCP: " fmt
+
#include <linux/inet.h>
#include <linux/kernel.h>
#include <net/tcp.h>
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 1833bc1f4a43..939a5045181a 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -57,10 +57,43 @@ static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk)
return msk->first && !sk_is_mptcp(msk->first);
}
+static struct socket *mptcp_is_tcpsk(struct sock *sk)
+{
+ struct socket *sock = sk->sk_socket;
+
+ if (sock->sk != sk)
+ return NULL;
+
+ if (unlikely(sk->sk_prot == &tcp_prot)) {
+ /* we are being invoked after mptcp_accept() has
+ * accepted a non-mp-capable flow: sk is a tcp_sk,
+ * not an mptcp one.
+ *
+ * Hand the socket over to tcp so all further socket ops
+ * bypass mptcp.
+ */
+ sock->ops = &inet_stream_ops;
+ return sock;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ } else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
+ sock->ops = &inet6_stream_ops;
+ return sock;
+#endif
+ }
+
+ return NULL;
+}
+
static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
{
+ struct socket *sock;
+
sock_owned_by_me((const struct sock *)msk);
+ sock = mptcp_is_tcpsk((struct sock *)msk);
+ if (unlikely(sock))
+ return sock;
+
if (likely(!__mptcp_needs_tcp_fallback(msk)))
return NULL;
@@ -84,6 +117,10 @@ static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state)
struct socket *ssock;
int err;
+ ssock = __mptcp_tcp_fallback(msk);
+ if (unlikely(ssock))
+ return ssock;
+
ssock = __mptcp_nmpc_socket(msk);
if (ssock)
goto set_state;
@@ -121,6 +158,27 @@ static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
MPTCP_SKB_CB(skb)->offset = offset;
}
+/* both sockets must be locked */
+static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk,
+ struct sock *ssk)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ u64 dsn = mptcp_subflow_get_mapped_dsn(subflow);
+
+ /* revalidate data sequence number.
+ *
+ * mptcp_subflow_data_available() is usually called
+ * without msk lock. Its unlikely (but possible)
+ * that msk->ack_seq has been advanced since the last
+ * call found in-sequence data.
+ */
+ if (likely(dsn == msk->ack_seq))
+ return true;
+
+ subflow->data_avail = 0;
+ return mptcp_subflow_data_available(ssk);
+}
+
static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
struct sock *ssk,
unsigned int *bytes)
@@ -132,6 +190,11 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
struct tcp_sock *tp;
bool done = false;
+ if (!mptcp_subflow_dsn_valid(msk, ssk)) {
+ *bytes = 0;
+ return false;
+ }
+
if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
int rcvbuf = max(ssk->sk_rcvbuf, sk->sk_rcvbuf);
@@ -290,6 +353,15 @@ void mptcp_data_acked(struct sock *sk)
sock_hold(sk);
}
+void mptcp_subflow_eof(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) &&
+ schedule_work(&msk->work))
+ sock_hold(sk);
+}
+
static void mptcp_stop_timer(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -994,6 +1066,27 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
return 0;
}
+static void mptcp_check_for_eof(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+ int receivers = 0;
+
+ mptcp_for_each_subflow(msk, subflow)
+ receivers += !subflow->rx_eof;
+
+ if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
+ /* hopefully temporary hack: propagate shutdown status
+ * to msk, when all subflows agree on it
+ */
+ sk->sk_shutdown |= RCV_SHUTDOWN;
+
+ smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
+ set_bit(MPTCP_DATA_READY, &msk->flags);
+ sk->sk_data_ready(sk);
+ }
+}
+
static void mptcp_worker(struct work_struct *work)
{
struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
@@ -1010,6 +1103,9 @@ static void mptcp_worker(struct work_struct *work)
__mptcp_flush_join_list(msk);
__mptcp_move_skbs(msk);
+ if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
+ mptcp_check_for_eof(msk);
+
if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
goto unlock;
@@ -1752,7 +1848,9 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
msk = mptcp_sk(sk);
lock_sock(sk);
- ssock = __mptcp_nmpc_socket(msk);
+ ssock = __mptcp_tcp_fallback(msk);
+ if (!ssock)
+ ssock = __mptcp_nmpc_socket(msk);
if (ssock) {
mask = ssock->ops->poll(file, ssock, wait);
release_sock(sk);
@@ -1762,9 +1860,6 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
release_sock(sk);
sock_poll_wait(file, sock, wait);
lock_sock(sk);
- ssock = __mptcp_tcp_fallback(msk);
- if (unlikely(ssock))
- return ssock->ops->poll(file, ssock, NULL);
if (test_bit(MPTCP_DATA_READY, &msk->flags))
mask = EPOLLIN | EPOLLRDNORM;
@@ -1783,11 +1878,17 @@ static int mptcp_shutdown(struct socket *sock, int how)
{
struct mptcp_sock *msk = mptcp_sk(sock->sk);
struct mptcp_subflow_context *subflow;
+ struct socket *ssock;
int ret = 0;
pr_debug("sk=%p, how=%d", msk, how);
lock_sock(sock->sk);
+ ssock = __mptcp_tcp_fallback(msk);
+ if (ssock) {
+ release_sock(sock->sk);
+ return inet_shutdown(ssock, how);
+ }
if (how == SHUT_WR || how == SHUT_RDWR)
inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index f733c5425552..67448002a2d7 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -89,6 +89,7 @@
#define MPTCP_DATA_READY 0
#define MPTCP_SEND_SPACE 1
#define MPTCP_WORK_RTX 2
+#define MPTCP_WORK_EOF 3
static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field)
{
@@ -339,6 +340,7 @@ void mptcp_finish_connect(struct sock *sk);
void mptcp_data_ready(struct sock *sk, struct sock *ssk);
bool mptcp_finish_join(struct sock *sk);
void mptcp_data_acked(struct sock *sk);
+void mptcp_subflow_eof(struct sock *sk);
int mptcp_token_new_request(struct request_sock *req);
void mptcp_token_destroy_request(u32 token);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index b5180c81588e..50a8bea987c6 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -994,8 +994,7 @@ static void subflow_state_change(struct sock *sk)
if (!(parent->sk_shutdown & RCV_SHUTDOWN) &&
!subflow->rx_eof && subflow_is_done(sk)) {
subflow->rx_eof = 1;
- parent->sk_shutdown |= RCV_SHUTDOWN;
- __subflow_state_change(parent);
+ mptcp_subflow_eof(parent);
}
}
diff --git a/net/mptcp/token.c b/net/mptcp/token.c
index 129a5ad1bc35..33352dd99d4d 100644
--- a/net/mptcp/token.c
+++ b/net/mptcp/token.c
@@ -40,7 +40,7 @@ static int token_used __read_mostly;
/**
* mptcp_token_new_request - create new key/idsn/token for subflow_request
- * @req - the request socket
+ * @req: the request socket
*
* This function is called when a new mptcp connection is coming in.
*
@@ -80,7 +80,7 @@ int mptcp_token_new_request(struct request_sock *req)
/**
* mptcp_token_new_connect - create new key/idsn/token for subflow
- * @sk - the socket that will initiate a connection
+ * @sk: the socket that will initiate a connection
*
* This function is called when a new outgoing mptcp connection is
* initiated.
@@ -125,6 +125,7 @@ int mptcp_token_new_connect(struct sock *sk)
/**
* mptcp_token_new_accept - insert token for later processing
* @token: the token to insert to the tree
+ * @conn: the just cloned socket linked to the new connection
*
* Called when a SYN packet creates a new logical connection, i.e.
* is not a join request.
@@ -169,7 +170,7 @@ struct mptcp_sock *mptcp_token_get_sock(u32 token)
/**
* mptcp_token_destroy_request - remove mptcp connection/token
- * @token - token of mptcp connection to remove
+ * @token: token of mptcp connection to remove
*
* Remove not-yet-fully-established incoming connection identified
* by @token.
@@ -183,7 +184,7 @@ void mptcp_token_destroy_request(u32 token)
/**
* mptcp_token_destroy - remove mptcp connection/token
- * @token - token of mptcp connection to remove
+ * @token: token of mptcp connection to remove
*
* Remove the connection identified by @token.
*/
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index fd8a01ca7a2d..2398d7238300 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -462,12 +462,14 @@ static void flow_table_copy_flows(struct table_instance *old,
struct hlist_head *head = &old->buckets[i];
if (ufid)
- hlist_for_each_entry(flow, head,
- ufid_table.node[old_ver])
+ hlist_for_each_entry_rcu(flow, head,
+ ufid_table.node[old_ver],
+ lockdep_ovsl_is_held())
ufid_table_instance_insert(new, flow);
else
- hlist_for_each_entry(flow, head,
- flow_table.node[old_ver])
+ hlist_for_each_entry_rcu(flow, head,
+ flow_table.node[old_ver],
+ lockdep_ovsl_is_held())
table_instance_insert(new, flow);
}
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 9904299424a1..61e95029c18f 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -11,6 +11,7 @@
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/refcount.h>
#include <net/act_api.h>
#include <net/netlink.h>
#include <net/pkt_cls.h>
@@ -26,9 +27,12 @@
#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
+struct tcindex_data;
+
struct tcindex_filter_result {
struct tcf_exts exts;
struct tcf_result res;
+ struct tcindex_data *p;
struct rcu_work rwork;
};
@@ -49,6 +53,7 @@ struct tcindex_data {
u32 hash; /* hash table size; 0 if undefined */
u32 alloc_hash; /* allocated size */
u32 fall_through; /* 0: only classify if explicit match */
+ refcount_t refcnt; /* a temporary refcnt for perfect hash */
struct rcu_work rwork;
};
@@ -57,6 +62,20 @@ static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
return tcf_exts_has_actions(&r->exts) || r->res.classid;
}
+static void tcindex_data_get(struct tcindex_data *p)
+{
+ refcount_inc(&p->refcnt);
+}
+
+static void tcindex_data_put(struct tcindex_data *p)
+{
+ if (refcount_dec_and_test(&p->refcnt)) {
+ kfree(p->perfect);
+ kfree(p->h);
+ kfree(p);
+ }
+}
+
static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
u16 key)
{
@@ -132,6 +151,7 @@ static int tcindex_init(struct tcf_proto *tp)
p->mask = 0xffff;
p->hash = DEFAULT_HASH_SIZE;
p->fall_through = 1;
+ refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
rcu_assign_pointer(tp->root, p);
return 0;
@@ -141,6 +161,7 @@ static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
{
tcf_exts_destroy(&r->exts);
tcf_exts_put_net(&r->exts);
+ tcindex_data_put(r->p);
}
static void tcindex_destroy_rexts_work(struct work_struct *work)
@@ -212,6 +233,8 @@ found:
else
__tcindex_destroy_fexts(f);
} else {
+ tcindex_data_get(p);
+
if (tcf_exts_get_net(&r->exts))
tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
else
@@ -228,9 +251,7 @@ static void tcindex_destroy_work(struct work_struct *work)
struct tcindex_data,
rwork);
- kfree(p->perfect);
- kfree(p->h);
- kfree(p);
+ tcindex_data_put(p);
}
static inline int
@@ -248,9 +269,11 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
};
static int tcindex_filter_result_init(struct tcindex_filter_result *r,
+ struct tcindex_data *p,
struct net *net)
{
memset(r, 0, sizeof(*r));
+ r->p = p;
return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
TCA_TCINDEX_POLICE);
}
@@ -290,6 +313,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
if (err < 0)
goto errout;
+ cp->perfect[i].p = cp;
}
return 0;
@@ -334,6 +358,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
cp->alloc_hash = p->alloc_hash;
cp->fall_through = p->fall_through;
cp->tp = tp;
+ refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
if (tb[TCA_TCINDEX_HASH])
cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -366,7 +391,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
}
cp->h = p->h;
- err = tcindex_filter_result_init(&new_filter_result, net);
+ err = tcindex_filter_result_init(&new_filter_result, cp, net);
if (err < 0)
goto errout_alloc;
if (old_r)
@@ -434,7 +459,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
goto errout_alloc;
f->key = handle;
f->next = NULL;
- err = tcindex_filter_result_init(&f->result, net);
+ err = tcindex_filter_result_init(&f->result, cp, net);
if (err < 0) {
kfree(f);
goto errout_alloc;
@@ -447,7 +472,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
}
if (old_r && old_r != r) {
- err = tcindex_filter_result_init(old_r, net);
+ err = tcindex_filter_result_init(old_r, cp, net);
if (err < 0) {
kfree(f);
goto errout_alloc;
@@ -571,6 +596,14 @@ static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
for (i = 0; i < p->hash; i++) {
struct tcindex_filter_result *r = p->perfect + i;
+ /* tcf_queue_work() does not guarantee the ordering we
+ * want, so we have to take this refcnt temporarily to
+ * ensure 'p' is freed after all tcindex_filter_result
+ * here. Imperfect hash does not need this, because it
+ * uses linked lists rather than an array.
+ */
+ tcindex_data_get(p);
+
tcf_unbind_filter(tp, &r->res);
if (tcf_exts_get_net(&r->exts))
tcf_queue_work(&r->rwork,