diff options
author | David S. Miller | 2018-12-20 10:53:28 -0800 |
---|---|---|
committer | David S. Miller | 2018-12-20 11:53:36 -0800 |
commit | 2be09de7d6a06f58e768de1255a687c9aaa66606 (patch) | |
tree | 298f9e04caf105873d987e807eccba27710a49cc /net | |
parent | 44a7b3b6e3a458f9549c2cc28e74ecdc470e42f1 (diff) | |
parent | 1d51b4b1d3f2db0d6d144175e31a84e472fbd99a (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Lots of conflicts, by happily all cases of overlapping
changes, parallel adds, things of that nature.
Thanks to Stephen Rothwell, Saeed Mahameed, and others
for their guidance in these resolutions.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
47 files changed, 381 insertions, 202 deletions
diff --git a/net/can/raw.c b/net/can/raw.c index 3aab7664933f..c70207537488 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -771,7 +771,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (err < 0) goto free_skb; - sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags); + skb_setup_tx_timestamp(skb, sk->sk_tsflags); skb->dev = dev; skb->sk = sk; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 2e8d91e54179..9f2840510e63 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -783,6 +783,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, /* Pass parameters to the BPF program */ cb->qdisc_cb.flow_keys = &flow_keys; flow_keys.nhoff = nhoff; + flow_keys.thoff = nhoff; bpf_compute_data_pointers((struct sk_buff *)skb); result = BPF_PROG_RUN(attached, skb); @@ -790,9 +791,12 @@ bool __skb_flow_dissect(const struct sk_buff *skb, /* Restore state */ memcpy(cb, &cb_saved, sizeof(cb_saved)); + flow_keys.nhoff = clamp_t(u16, flow_keys.nhoff, 0, skb->len); + flow_keys.thoff = clamp_t(u16, flow_keys.thoff, + flow_keys.nhoff, skb->len); + __skb_flow_bpf_to_target(&flow_keys, flow_dissector, target_container); - key_control->thoff = min_t(u16, key_control->thoff, skb->len); rcu_read_unlock(); return result == BPF_OK; } diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c index 4b54e5f107c6..acf45ddbe924 100644 --- a/net/core/gro_cells.c +++ b/net/core/gro_cells.c @@ -84,6 +84,7 @@ void gro_cells_destroy(struct gro_cells *gcells) for_each_possible_cpu(i) { struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); + napi_disable(&cell->napi); netif_napi_del(&cell->napi); __skb_queue_purge(&cell->napi_skbs); } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8baa9ab01db6..fa384f775f1a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2629,11 +2629,16 @@ static int neigh_valid_dump_req(const struct nlmsghdr *nlh, ndm = nlmsg_data(nlh); if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex || - ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) { + ndm->ndm_state || ndm->ndm_type) { NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request"); return -EINVAL; } + if (ndm->ndm_flags & ~NTF_PROXY) { + NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request"); + return -EINVAL; + } + err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX, nda_policy, extack); } else { diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 37b4667128a3..d67ec17f2cc8 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -28,6 +28,8 @@ static int two __maybe_unused = 2; static int min_sndbuf = SOCK_MIN_SNDBUF; static int min_rcvbuf = SOCK_MIN_RCVBUF; static int max_skb_frags = MAX_SKB_FRAGS; +static long long_one __maybe_unused = 1; +static long long_max __maybe_unused = LONG_MAX; static int net_msg_warn; /* Unused, but still a sysctl */ @@ -289,6 +291,17 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } + +static int +proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); +} #endif static struct ctl_table net_core_table[] = { @@ -398,10 +411,11 @@ static struct ctl_table net_core_table[] = { { .procname = "bpf_jit_limit", .data = &bpf_jit_limit, - .maxlen = sizeof(int), + .maxlen = sizeof(long), .mode = 0600, - .proc_handler = proc_dointvec_minmax_bpf_restricted, - .extra1 = &one, + .proc_handler = proc_dolongvec_minmax_bpf_restricted, + .extra1 = &long_one, + .extra2 = &long_max, }, #endif { diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 5b9b6d497f71..04ba321ae5ce 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -952,17 +952,18 @@ static int inet_abc_len(__be32 addr) { int rc = -1; /* Something else, probably a multicast. */ - if (ipv4_is_zeronet(addr)) + if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr)) rc = 0; else { __u32 haddr = ntohl(addr); - if (IN_CLASSA(haddr)) rc = 8; else if (IN_CLASSB(haddr)) rc = 16; else if (IN_CLASSC(haddr)) rc = 24; + else if (IN_CLASSE(haddr)) + rc = 32; } return rc; diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 06ee4696703c..00ec819f949b 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -79,6 +79,7 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s if (unlikely(opt->optlen)) ip_forward_options(skb); + skb->tstamp = 0; return dst_output(net, sk, skb); } diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index aa0b22697998..867be8f7f1fa 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -346,10 +346,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct rb_node **rbn, *parent; struct sk_buff *skb1, *prev_tail; + int ihl, end, skb1_run_end; struct net_device *dev; unsigned int fragsize; int flags, offset; - int ihl, end; int err = -ENOENT; u8 ecn; @@ -419,7 +419,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) * overlapping fragment, the entire datagram (and any constituent * fragments) MUST be silently discarded. * - * We do the same here for IPv4 (and increment an snmp counter). + * We do the same here for IPv4 (and increment an snmp counter) but + * we do not want to drop the whole queue in response to a duplicate + * fragment. */ err = -EINVAL; @@ -444,13 +446,17 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) do { parent = *rbn; skb1 = rb_to_skb(parent); + skb1_run_end = skb1->ip_defrag_offset + + FRAG_CB(skb1)->frag_run_len; if (end <= skb1->ip_defrag_offset) rbn = &parent->rb_left; - else if (offset >= skb1->ip_defrag_offset + - FRAG_CB(skb1)->frag_run_len) + else if (offset >= skb1_run_end) rbn = &parent->rb_right; - else /* Found an overlap with skb1. */ - goto overlap; + else if (offset >= skb1->ip_defrag_offset && + end <= skb1_run_end) + goto err; /* No new data, potential duplicate */ + else + goto overlap; /* Found an overlap */ } while (*rbn); /* Here we have parent properly set, and rbn pointing to * one of its NULL left/right children. Insert skb. diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 208a5b4419c6..b9a9873c25c6 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -429,6 +429,8 @@ static int __init ic_defaults(void) ic_netmask = htonl(IN_CLASSB_NET); else if (IN_CLASSC(ntohl(ic_myaddr))) ic_netmask = htonl(IN_CLASSC_NET); + else if (IN_CLASSE(ntohl(ic_myaddr))) + ic_netmask = htonl(IN_CLASSE_NET); else { pr_err("IP-Config: Unable to guess netmask for address %pI4\n", &ic_myaddr); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 75c654924532..ddbf8c9a1abb 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -69,6 +69,8 @@ #include <net/nexthop.h> #include <net/switchdev.h> +#include <linux/nospec.h> + struct ipmr_rule { struct fib_rule common; }; @@ -1612,6 +1614,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) return -EFAULT; if (vr.vifi >= mrt->maxvif) return -EINVAL; + vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif); read_lock(&mrt_lock); vif = &mrt->vif_table[vr.vifi]; if (VIF_EXISTS(mrt, vr.vifi)) { @@ -1686,6 +1689,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) return -EFAULT; if (vr.vifi >= mrt->maxvif) return -EINVAL; + vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif); read_lock(&mrt_lock); vif = &mrt->vif_table[vr.vifi]; if (VIF_EXISTS(mrt, vr.vifi)) { diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 076f51646d26..c55a5432cf37 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -390,7 +390,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, skb->ip_summed = CHECKSUM_NONE; - sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); + skb_setup_tx_timestamp(skb, sockc->tsflags); if (flags & MSG_CONFIRM) skb_set_dst_pending_confirm(skb, 1); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 703a8e801c5c..5f9fa0302b5a 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -385,6 +385,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk, } #endif + skb->tstamp = 0; return dst_output(net, sk, skb); } diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index 3965d5396b0a..ad1a9ccd4b44 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c @@ -15,7 +15,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp) { - struct sockaddr_in6 udp6_addr; + struct sockaddr_in6 udp6_addr = {}; int err; struct socket *sock = NULL; @@ -58,6 +58,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, goto error; if (cfg->peer_udp_port) { + memset(&udp6_addr, 0, sizeof(udp6_addr)); udp6_addr.sin6_family = AF_INET6; memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6, sizeof(udp6_addr.sin6_addr)); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 34b8a90e6be2..8276f1224f16 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -52,6 +52,8 @@ #include <net/ip6_checksum.h> #include <linux/netconf.h> +#include <linux/nospec.h> + struct ip6mr_rule { struct fib_rule common; }; @@ -1841,6 +1843,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) return -EFAULT; if (vr.mifi >= mrt->maxvif) return -EINVAL; + vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif); read_lock(&mrt_lock); vif = &mrt->vif_table[vr.mifi]; if (VIF_EXISTS(mrt, vr.mifi)) { @@ -1915,6 +1918,7 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) return -EFAULT; if (vr.mifi >= mrt->maxvif) return -EINVAL; + vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif); read_lock(&mrt_lock); vif = &mrt->vif_table[vr.mifi]; if (VIF_EXISTS(mrt, vr.mifi)) { diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index aed7eb5c2123..5a426226c762 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -657,6 +657,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, skb->ip_summed = CHECKSUM_NONE; + skb_setup_tx_timestamp(skb, sockc->tsflags); + if (flags & MSG_CONFIRM) skb_set_dst_pending_confirm(skb, 1); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index e9652e623a31..4a6ff1482a9f 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -7,6 +7,7 @@ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (c) 2016 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1949,6 +1950,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) WARN(local->open_count, "%s: open count remains %d\n", wiphy_name(local->hw.wiphy), local->open_count); + ieee80211_txq_teardown_flows(local); + mutex_lock(&local->iflist_mtx); list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { list_del(&sdata->list); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index ada8e16d52d2..87a729926734 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1264,7 +1264,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) rtnl_unlock(); ieee80211_led_exit(local); ieee80211_wep_free(local); - ieee80211_txq_teardown_flows(local); fail_flows: destroy_workqueue(local->workqueue); fail_workqueue: @@ -1290,7 +1289,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&local->ifa6_notifier); #endif - ieee80211_txq_teardown_flows(local); rtnl_lock(); diff --git a/net/mac80211/status.c b/net/mac80211/status.c index a794ca729000..3f0b96e1e02f 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -556,6 +556,11 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local, } ieee80211_led_tx(local); + + if (skb_has_frag_list(skb)) { + kfree_skb_list(skb_shinfo(skb)->frag_list); + skb_shinfo(skb)->frag_list = NULL; + } } /* diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 4eef55da0878..8da228da53ae 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -531,8 +531,8 @@ nla_put_failure: ret = -EMSGSIZE; } else { cb->args[IPSET_CB_ARG0] = i; + ipset_nest_end(skb, atd); } - ipset_nest_end(skb, atd); out: rcu_read_unlock(); return ret; diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index b6d0f6deea86..9cd180bda092 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -427,7 +427,7 @@ insert_tree(struct net *net, count = 1; rbconn->list.count = count; - rb_link_node(&rbconn->node, parent, rbnode); + rb_link_node_rcu(&rbconn->node, parent, rbnode); rb_insert_color(&rbconn->node, root); out_unlock: spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c index a975efd6b8c3..9da303461069 100644 --- a/net/netfilter/nf_conntrack_seqadj.c +++ b/net/netfilter/nf_conntrack_seqadj.c @@ -115,12 +115,12 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb, /* TCP SACK sequence number adjustment */ static unsigned int nf_ct_sack_adjust(struct sk_buff *skb, unsigned int protoff, - struct tcphdr *tcph, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { - unsigned int dir, optoff, optend; + struct tcphdr *tcph = (void *)skb->data + protoff; struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); + unsigned int dir, optoff, optend; optoff = protoff + sizeof(struct tcphdr); optend = protoff + tcph->doff * 4; @@ -128,6 +128,7 @@ static unsigned int nf_ct_sack_adjust(struct sk_buff *skb, if (!skb_make_writable(skb, optend)) return 0; + tcph = (void *)skb->data + protoff; dir = CTINFO2DIR(ctinfo); while (optoff < optend) { @@ -207,7 +208,7 @@ int nf_ct_seq_adjust(struct sk_buff *skb, ntohl(newack)); tcph->ack_seq = newack; - res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo); + res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo); out: spin_unlock_bh(&ct->lock); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index e2b196054dfc..2268b10a9dcf 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -117,7 +117,8 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) dst = skb_dst(skb); if (dst->xfrm) dst = ((struct xfrm_dst *)dst)->route; - dst_hold(dst); + if (!dst_hold_safe(dst)) + return -EHOSTUNREACH; if (sk && !net_eq(net, sock_net(sk))) sk = NULL; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2e61aab6ed73..6e548d7c9f67 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1216,7 +1216,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) goto nla_put_failure; - if (basechain->stats && nft_dump_stats(skb, basechain->stats)) + if (rcu_access_pointer(basechain->stats) && + nft_dump_stats(skb, rcu_dereference(basechain->stats))) goto nla_put_failure; } @@ -1392,7 +1393,8 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr) return newstats; } -static void nft_chain_stats_replace(struct nft_base_chain *chain, +static void nft_chain_stats_replace(struct net *net, + struct nft_base_chain *chain, struct nft_stats __percpu *newstats) { struct nft_stats __percpu *oldstats; @@ -1400,8 +1402,9 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain, if (newstats == NULL) return; - if (chain->stats) { - oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES); + if (rcu_access_pointer(chain->stats)) { + oldstats = rcu_dereference_protected(chain->stats, + lockdep_commit_lock_is_held(net)); rcu_assign_pointer(chain->stats, newstats); synchronize_rcu(); free_percpu(oldstats); @@ -1439,9 +1442,10 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx) struct nft_base_chain *basechain = nft_base_chain(chain); module_put(basechain->type->owner); - free_percpu(basechain->stats); - if (basechain->stats) + if (rcu_access_pointer(basechain->stats)) { static_branch_dec(&nft_counters_enabled); + free_percpu(rcu_dereference_raw(basechain->stats)); + } kfree(chain->name); kfree(basechain); } else { @@ -1590,7 +1594,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, kfree(basechain); return PTR_ERR(stats); } - basechain->stats = stats; + rcu_assign_pointer(basechain->stats, stats); static_branch_inc(&nft_counters_enabled); } @@ -6180,7 +6184,8 @@ static void nft_chain_commit_update(struct nft_trans *trans) return; basechain = nft_base_chain(trans->ctx.chain); - nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans)); + nft_chain_stats_replace(trans->ctx.net, basechain, + nft_trans_chain_stats(trans)); switch (nft_trans_chain_policy(trans)) { case NF_DROP: diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 3fbce3b9c5ec..a50500232b0a 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -101,7 +101,7 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain, struct nft_stats *stats; base_chain = nft_base_chain(chain); - if (!base_chain->stats) + if (!rcu_access_pointer(base_chain->stats)) return; local_bh_disable(); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 6bb9f3cde0b0..3c023d6120f6 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1706,7 +1706,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, nlk->flags &= ~NETLINK_F_EXT_ACK; err = 0; break; - case NETLINK_DUMP_STRICT_CHK: + case NETLINK_GET_STRICT_CHK: if (val) nlk->flags |= NETLINK_F_STRICT_CHK; else @@ -1806,7 +1806,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname, return -EFAULT; err = 0; break; - case NETLINK_DUMP_STRICT_CHK: + case NETLINK_GET_STRICT_CHK: if (len < sizeof(int)) return -EINVAL; len = sizeof(int); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a74650e98f42..6655793765b2 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1965,7 +1965,7 @@ retry: skb->mark = sk->sk_mark; skb->tstamp = sockc.transmit_time; - sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); + skb_setup_tx_timestamp(skb, sockc.tsflags); if (unlikely(extra_len == 4)) skb->no_fcs = 1; @@ -2460,7 +2460,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; skb->tstamp = sockc->transmit_time; - sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); + skb_setup_tx_timestamp(skb, sockc->tsflags); skb_zcopy_set_nouarg(skb, ph.raw); skb_reserve(skb, hlen); @@ -2898,7 +2898,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) goto out_free; } - sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); + skb_setup_tx_timestamp(skb, sockc.tsflags); if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { diff --git a/net/rds/message.c b/net/rds/message.c index 4b00b1152a5f..f139420ba1f6 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -308,16 +308,27 @@ out: /* * RDS ops use this to grab SG entries from the rm's sg pool. */ -struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents) +struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents, + int *ret) { struct scatterlist *sg_first = (struct scatterlist *) &rm[1]; struct scatterlist *sg_ret; - WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs); - WARN_ON(!nents); + if (WARN_ON(!ret)) + return NULL; - if (rm->m_used_sgs + nents > rm->m_total_sgs) + if (nents <= 0) { + pr_warn("rds: alloc sgs failed! nents <= 0\n"); + *ret = -EINVAL; return NULL; + } + + if (rm->m_used_sgs + nents > rm->m_total_sgs) { + pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n", + rm->m_total_sgs, rm->m_used_sgs, nents); + *ret = -ENOMEM; + return NULL; + } sg_ret = &sg_first[rm->m_used_sgs]; sg_init_table(sg_ret, nents); @@ -332,6 +343,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in unsigned int i; int num_sgs = ceil(total_len, PAGE_SIZE); int extra_bytes = num_sgs * sizeof(struct scatterlist); + int ret; rm = rds_message_alloc(extra_bytes, GFP_NOWAIT); if (!rm) @@ -340,10 +352,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); rm->data.op_nents = ceil(total_len, PAGE_SIZE); - rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); + rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); if (!rm->data.op_sg) { rds_message_put(rm); - return ERR_PTR(-ENOMEM); + return ERR_PTR(ret); } for (i = 0; i < rm->data.op_nents; ++i) { diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 98237feb607a..182ab8430594 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -517,9 +517,10 @@ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) return tot_pages; } -int rds_rdma_extra_size(struct rds_rdma_args *args) +int rds_rdma_extra_size(struct rds_rdma_args *args, + struct rds_iov_vector *iov) { - struct rds_iovec vec; + struct rds_iovec *vec; struct rds_iovec __user *local_vec; int tot_pages = 0; unsigned int nr_pages; @@ -530,13 +531,23 @@ int rds_rdma_extra_size(struct rds_rdma_args *args) if (args->nr_local == 0) return -EINVAL; + iov->iov = kcalloc(args->nr_local, + sizeof(struct rds_iovec), + GFP_KERNEL); + if (!iov->iov) + return -ENOMEM; + + vec = &iov->iov[0]; + + if (copy_from_user(vec, local_vec, args->nr_local * + sizeof(struct rds_iovec))) + return -EFAULT; + iov->len = args->nr_local; + /* figure out the number of pages in the vector */ - for (i = 0; i < args->nr_local; i++) { - if (copy_from_user(&vec, &local_vec[i], - sizeof(struct rds_iovec))) - return -EFAULT; + for (i = 0; i < args->nr_local; i++, vec++) { - nr_pages = rds_pages_in_vec(&vec); + nr_pages = rds_pages_in_vec(vec); if (nr_pages == 0) return -EINVAL; @@ -558,15 +569,15 @@ int rds_rdma_extra_size(struct rds_rdma_args *args) * Extract all arguments and set up the rdma_op */ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, - struct cmsghdr *cmsg) + struct cmsghdr *cmsg, + struct rds_iov_vector *vec) { struct rds_rdma_args *args; struct rm_rdma_op *op = &rm->rdma; int nr_pages; unsigned int nr_bytes; struct page **pages = NULL; - struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack; - int iov_size; + struct rds_iovec *iovs; unsigned int i, j; int ret = 0; @@ -586,31 +597,23 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, goto out_ret; } - /* Check whether to allocate the iovec area */ - iov_size = args->nr_local * sizeof(struct rds_iovec); - if (args->nr_local > UIO_FASTIOV) { - iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); - if (!iovs) { - ret = -ENOMEM; - goto out_ret; - } + if (vec->len != args->nr_local) { + ret = -EINVAL; + goto out_ret; } - if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) { - ret = -EFAULT; - goto out; - } + iovs = vec->iov; nr_pages = rds_rdma_pages(iovs, args->nr_local); if (nr_pages < 0) { ret = -EINVAL; - goto out; + goto out_ret; } pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; - goto out; + goto out_ret; } op->op_write = !!(args->flags & RDS_RDMA_READWRITE); @@ -620,11 +623,9 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, op->op_active = 1; op->op_recverr = rs->rs_recverr; WARN_ON(!nr_pages); - op->op_sg = rds_message_alloc_sgs(rm, nr_pages); - if (!op->op_sg) { - ret = -ENOMEM; - goto out; - } + op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret); + if (!op->op_sg) + goto out_pages; if (op->op_notify || op->op_recverr) { /* We allocate an uninitialized notifier here, because @@ -635,7 +636,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); if (!op->op_notifier) { ret = -ENOMEM; - goto out; + goto out_pages; } op->op_notifier->n_user_token = args->user_token; op->op_notifier->n_status = RDS_RDMA_SUCCESS; @@ -681,7 +682,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, */ ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); if (ret < 0) - goto out; + goto out_pages; else ret = 0; @@ -714,13 +715,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, nr_bytes, (unsigned int) args->remote_vec.bytes); ret = -EINVAL; - goto out; + goto out_pages; } op->op_bytes = nr_bytes; -out: - if (iovs != iovstack) - sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); +out_pages: kfree(pages); out_ret: if (ret) @@ -838,11 +837,9 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT); rm->atomic.op_active = 1; rm->atomic.op_recverr = rs->rs_recverr; - rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); - if (!rm->atomic.op_sg) { - ret = -ENOMEM; + rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret); + if (!rm->atomic.op_sg) goto err; - } /* verify 8 byte-aligned */ if (args->local_addr & 0x7) { diff --git a/net/rds/rds.h b/net/rds/rds.h index 6bfaf05b63b2..02ec4a3b2799 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -386,6 +386,18 @@ static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q) INIT_LIST_HEAD(&q->zcookie_head); } +struct rds_iov_vector { + struct rds_iovec *iov; + int len; +}; + +struct rds_iov_vector_arr { + struct rds_iov_vector *vec; + int len; + int indx; + int incr; +}; + struct rds_message { refcount_t m_refcount; struct list_head m_sock_item; @@ -827,7 +839,8 @@ rds_conn_connecting(struct rds_connection *conn) /* message.c */ struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp); -struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents); +struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents, + int *ret); int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from, bool zcopy); struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len); @@ -904,13 +917,13 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen); int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen); int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen); void rds_rdma_drop_keys(struct rds_sock *rs); -int rds_rdma_extra_size(struct rds_rdma_args *args); -int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, - struct cmsghdr *cmsg); +int rds_rdma_extra_size(struct rds_rdma_args *args, + struct rds_iov_vector *iov); int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg); int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, - struct cmsghdr *cmsg); + struct cmsghdr *cmsg, + struct rds_iov_vector *vec); int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg); void rds_rdma_free_op(struct rm_rdma_op *ro); diff --git a/net/rds/send.c b/net/rds/send.c index fe785ee819dd..3d822bad7de9 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -876,13 +876,18 @@ out: * rds_message is getting to be quite complicated, and we'd like to allocate * it all in one go. This figures out how big it needs to be up front. */ -static int rds_rm_size(struct msghdr *msg, int num_sgs) +static int rds_rm_size(struct msghdr *msg, int num_sgs, + struct rds_iov_vector_arr *vct) { struct cmsghdr *cmsg; int size = 0; int cmsg_groups = 0; int retval; bool zcopy_cookie = false; + struct rds_iov_vector *iov, *tmp_iov; + + if (num_sgs < 0) + return -EINVAL; for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) @@ -893,8 +898,24 @@ static int rds_rm_size(struct msghdr *msg, int num_sgs) switch (cmsg->cmsg_type) { case RDS_CMSG_RDMA_ARGS: + if (vct->indx >= vct->len) { + vct->len += vct->incr; + tmp_iov = + krealloc(vct->vec, + vct->len * + sizeof(struct rds_iov_vector), + GFP_KERNEL); + if (!tmp_iov) { + vct->len -= vct->incr; + return -ENOMEM; + } + vct->vec = tmp_iov; + } + iov = &vct->vec[vct->indx]; + memset(iov, 0, sizeof(struct rds_iov_vector)); + vct->indx++; cmsg_groups |= 1; - retval = rds_rdma_extra_size(CMSG_DATA(cmsg)); + retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov); if (retval < 0) return retval; size += retval; @@ -951,10 +972,11 @@ static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm, } static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, - struct msghdr *msg, int *allocated_mr) + struct msghdr *msg, int *allocated_mr, + struct rds_iov_vector_arr *vct) { struct cmsghdr *cmsg; - int ret = 0; + int ret = 0, ind = 0; for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) @@ -968,7 +990,10 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, */ switch (cmsg->cmsg_type) { case RDS_CMSG_RDMA_ARGS: - ret = rds_cmsg_rdma_args(rs, rm, cmsg); + if (ind >= vct->indx) + return -ENOMEM; + ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]); + ind++; break; case RDS_CMSG_RDMA_DEST: @@ -1084,6 +1109,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); int num_sgs = ceil(payload_len, PAGE_SIZE); int namelen; + struct rds_iov_vector_arr vct; + int ind; + + memset(&vct, 0, sizeof(vct)); + + /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */ + vct.incr = 1; /* Mirror Linux UDP mirror of BSD error message compatibility */ /* XXX: Perhaps MSG_MORE someday */ @@ -1220,7 +1252,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX); } /* size of rm including all sgs */ - ret = rds_rm_size(msg, num_sgs); + ret = rds_rm_size(msg, num_sgs, &vct); if (ret < 0) goto out; @@ -1232,11 +1264,9 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) /* Attach data to the rm */ if (payload_len) { - rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); - if (!rm->data.op_sg) { - ret = -ENOMEM; + rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); + if (!rm->data.op_sg) goto out; - } ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy); if (ret) goto out; @@ -1270,7 +1300,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) rm->m_conn_path = cpath; /* Parse any control messages the user may have included. */ - ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); + ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct); if (ret) { /* Trigger connection so that its ready for the next retry */ if (ret == -EAGAIN) @@ -1348,9 +1378,18 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) if (ret) goto out; rds_message_put(rm); + + for (ind = 0; ind < vct.indx; ind++) + kfree(vct.vec[ind].iov); + kfree(vct.vec); + return payload_len; out: + for (ind = 0; ind < vct.indx; ind++) + kfree(vct.vec[ind].iov); + kfree(vct.vec); + /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN * or in any other way, we need to destroy the MR again */ diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 1eb2e2c31dd5..dad04e710493 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1372,10 +1372,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; if (fold) { - if (!tc_skip_sw(fold->flags)) - rhashtable_remove_fast(&fold->mask->ht, - &fold->ht_node, - fold->mask->filter_ht_params); + rhashtable_remove_fast(&fold->mask->ht, + &fold->ht_node, + fold->mask->filter_ht_params); if (!tc_skip_hw(fold->flags)) fl_hw_destroy_filter(tp, fold, NULL); } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 6e27c62646e9..b9ed271b7ef7 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -101,6 +101,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, if (addr) { addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_port = 0; + addr->a.v6.sin6_flowinfo = 0; addr->a.v6.sin6_addr = ifa->addr; addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; addr->valid = 1; diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 63f08b4e51d6..c4da4a78d369 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -147,8 +147,14 @@ static int smc_release(struct socket *sock) sk->sk_shutdown |= SHUTDOWN_MASK; } if (smc->clcsock) { + if (smc->use_fallback && sk->sk_state == SMC_LISTEN) { + /* wake up clcsock accept */ + rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); + } + mutex_lock(&smc->clcsock_release_lock); sock_release(smc->clcsock); smc->clcsock = NULL; + mutex_unlock(&smc->clcsock_release_lock); } if (smc->use_fallback) { if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT) @@ -205,6 +211,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, spin_lock_init(&smc->conn.send_lock); sk->sk_prot->hash(sk); sk_refcnt_debug_inc(sk); + mutex_init(&smc->clcsock_release_lock); return sk; } @@ -824,7 +831,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) struct socket *new_clcsock = NULL; struct sock *lsk = &lsmc->sk; struct sock *new_sk; - int rc; + int rc = -EINVAL; release_sock(lsk); new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol); @@ -837,7 +844,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) } *new_smc = smc_sk(new_sk); - rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0); + mutex_lock(&lsmc->clcsock_release_lock); + if (lsmc->clcsock) + rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0); + mutex_unlock(&lsmc->clcsock_release_lock); lock_sock(lsk); if (rc < 0) lsk->sk_err = -rc; diff --git a/net/smc/smc.h b/net/smc/smc.h index 08786ace6010..5721416d0605 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -219,6 +219,10 @@ struct smc_sock { /* smc sock container */ * started, waiting for unsent * data to be sent */ + struct mutex clcsock_release_lock; + /* protects clcsock of a listen + * socket + * */ }; static inline struct smc_sock *smc_sk(const struct sock *sk) diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index c6782aa47525..24cbddc44c88 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1952,6 +1952,7 @@ call_connect_status(struct rpc_task *task) /* retry with existing socket, after a delay */ rpc_delay(task, 3*HZ); /* fall through */ + case -ENOTCONN: case -EAGAIN: /* Check for timeouts before looping back to call_bind */ case -ETIMEDOUT: diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ce927002862a..73547d17d3c6 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -67,7 +67,6 @@ */ static void xprt_init(struct rpc_xprt *xprt, struct net *net); static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); -static void xprt_connect_status(struct rpc_task *task); static void xprt_destroy(struct rpc_xprt *xprt); static DEFINE_SPINLOCK(xprt_list_lock); @@ -680,7 +679,9 @@ void xprt_force_disconnect(struct rpc_xprt *xprt) /* Try to schedule an autoclose RPC call */ if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) queue_work(xprtiod_workqueue, &xprt->task_cleanup); - xprt_wake_pending_tasks(xprt, -EAGAIN); + else if (xprt->snd_task) + rpc_wake_up_queued_task_set_status(&xprt->pending, + xprt->snd_task, -ENOTCONN); spin_unlock_bh(&xprt->transport_lock); } EXPORT_SYMBOL_GPL(xprt_force_disconnect); @@ -820,7 +821,7 @@ void xprt_connect(struct rpc_task *task) if (!xprt_connected(xprt)) { task->tk_timeout = task->tk_rqstp->rq_timeout; task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; - rpc_sleep_on(&xprt->pending, task, xprt_connect_status); + rpc_sleep_on(&xprt->pending, task, NULL); if (test_bit(XPRT_CLOSING, &xprt->state)) return; @@ -839,34 +840,6 @@ void xprt_connect(struct rpc_task *task) xprt_release_write(xprt, task); } -static void xprt_connect_status(struct rpc_task *task) -{ - switch (task->tk_status) { - case 0: - dprintk("RPC: %5u xprt_connect_status: connection established\n", - task->tk_pid); - break; - case -ECONNREFUSED: - case -ECONNRESET: - case -ECONNABORTED: - case -ENETUNREACH: - case -EHOSTUNREACH: - case -EPIPE: - case -EAGAIN: - dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid); - break; - case -ETIMEDOUT: - dprintk("RPC: %5u xprt_connect_status: connect attempt timed " - "out\n", task->tk_pid); - break; - default: - dprintk("RPC: %5u xprt_connect_status: error %d connecting to " - "server %s\n", task->tk_pid, -task->tk_status, - task->tk_rqstp->rq_xprt->servername); - task->tk_status = -EIO; - } -} - enum xprt_xid_rb_cmp { XID_RB_EQUAL, XID_RB_LEFT, diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 8a5e823e0b33..f0b3700cec95 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1217,6 +1217,8 @@ static void xs_reset_transport(struct sock_xprt *transport) trace_rpc_socket_close(xprt, sock); sock_release(sock); + + xprt_disconnect_done(xprt); } /** @@ -1237,8 +1239,6 @@ static void xs_close(struct rpc_xprt *xprt) xs_reset_transport(transport); xprt->reestablish_timeout = 0; - - xprt_disconnect_done(xprt); } static void xs_inject_disconnect(struct rpc_xprt *xprt) @@ -1489,8 +1489,6 @@ static void xs_tcp_state_change(struct sock *sk) &transport->sock_state)) xprt_clear_connecting(xprt); clear_bit(XPRT_CLOSING, &xprt->state); - if (sk->sk_err) - xprt_wake_pending_tasks(xprt, -sk->sk_err); /* Trigger the socket release */ xs_tcp_force_close(xprt); } @@ -2092,8 +2090,8 @@ static void xs_udp_setup_socket(struct work_struct *work) trace_rpc_socket_connect(xprt, sock, 0); status = 0; out: - xprt_unlock_connect(xprt, transport); xprt_clear_connecting(xprt); + xprt_unlock_connect(xprt, transport); xprt_wake_pending_tasks(xprt, status); } @@ -2329,8 +2327,8 @@ static void xs_tcp_setup_socket(struct work_struct *work) } status = -EAGAIN; out: - xprt_unlock_connect(xprt, transport); xprt_clear_connecting(xprt); + xprt_unlock_connect(xprt, transport); xprt_wake_pending_tasks(xprt, status); } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 291d6bbe85f4..1217c90a363b 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -889,7 +889,6 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); int blks = tsk_blocks(GROUP_H_SIZE + dlen); struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_group *grp = tsk->group; struct net *net = sock_net(sk); struct tipc_member *mb = NULL; u32 node, port; @@ -903,7 +902,9 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, /* Block or return if destination link or member is congested */ rc = tipc_wait_for_cond(sock, &timeout, !tipc_dest_find(&tsk->cong_links, node, 0) && - !tipc_group_cong(grp, node, port, blks, &mb)); + tsk->group && + !tipc_group_cong(tsk->group, node, port, blks, + &mb)); if (unlikely(rc)) return rc; @@ -933,7 +934,6 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, struct tipc_sock *tsk = tipc_sk(sk); struct list_head *cong_links = &tsk->cong_links; int blks = tsk_blocks(GROUP_H_SIZE + dlen); - struct tipc_group *grp = tsk->group; struct tipc_msg *hdr = &tsk->phdr; struct tipc_member *first = NULL; struct tipc_member *mbr = NULL; @@ -950,9 +950,10 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, type = msg_nametype(hdr); inst = dest->addr.name.name.instance; scope = msg_lookup_scope(hdr); - exclude = tipc_group_exclude(grp); while (++lookups < 4) { + exclude = tipc_group_exclude(tsk->group); + first = NULL; /* Look for a non-congested destination member, if any */ @@ -961,7 +962,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, &dstcnt, exclude, false)) return -EHOSTUNREACH; tipc_dest_pop(&dsts, &node, &port); - cong = tipc_group_cong(grp, node, port, blks, &mbr); + cong = tipc_group_cong(tsk->group, node, port, blks, + &mbr); if (!cong) break; if (mbr == first) @@ -980,7 +982,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, /* Block or return if destination link or member is congested */ rc = tipc_wait_for_cond(sock, &timeout, !tipc_dest_find(cong_links, node, 0) && - !tipc_group_cong(grp, node, port, + tsk->group && + !tipc_group_cong(tsk->group, node, port, blks, &mbr)); if (unlikely(rc)) return rc; @@ -1015,8 +1018,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_group *grp = tsk->group; - struct tipc_nlist *dsts = tipc_group_dests(grp); + struct tipc_nlist *dsts; struct tipc_mc_method *method = &tsk->mc_method; bool ack = method->mandatory && method->rcast; int blks = tsk_blocks(MCAST_H_SIZE + dlen); @@ -1025,15 +1027,17 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, struct sk_buff_head pkts; int rc = -EHOSTUNREACH; - if (!dsts->local && !dsts->remote) - return -EHOSTUNREACH; - /* Block or return if any destination link or member is congested */ - rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt && - !tipc_group_bc_cong(grp, blks)); + rc = tipc_wait_for_cond(sock, &timeout, + !tsk->cong_link_cnt && tsk->group && + !tipc_group_bc_cong(tsk->group, blks)); if (unlikely(rc)) return rc; + dsts = tipc_group_dests(tsk->group); + if (!dsts->local && !dsts->remote) + return -EHOSTUNREACH; + /* Complete message header */ if (dest) { msg_set_type(hdr, TIPC_GRP_MCAST_MSG); @@ -1045,7 +1049,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, msg_set_hdr_sz(hdr, GROUP_H_SIZE); msg_set_destport(hdr, 0); msg_set_destnode(hdr, 0); - msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp)); + msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group)); /* Avoid getting stuck with repeated forced replicasts */ msg_set_grp_bc_ack_req(hdr, ack); @@ -2757,11 +2761,15 @@ void tipc_sk_reinit(struct net *net) rhashtable_walk_start(&iter); while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { - spin_lock_bh(&tsk->sk.sk_lock.slock); + sock_hold(&tsk->sk); + rhashtable_walk_stop(&iter); + lock_sock(&tsk->sk); msg = &tsk->phdr; msg_set_prevnode(msg, tipc_own_addr(net)); msg_set_orignode(msg, tipc_own_addr(net)); - spin_unlock_bh(&tsk->sk.sk_lock.slock); + release_sock(&tsk->sk); + rhashtable_walk_start(&iter); + sock_put(&tsk->sk); } rhashtable_walk_stop(&iter); diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 10dc59ce9c82..4d85d71f16e2 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -245,10 +245,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, } err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr); - if (err) { - kfree_skb(_skb); + if (err) goto out; - } } err = 0; out: @@ -681,6 +679,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, if (err) goto err; + if (remote.proto != local.proto) { + err = -EINVAL; + goto err; + } + /* Checking remote ip address */ rmcast = tipc_udp_is_mcast_addr(&remote); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 311cec8e533d..28887cf628b8 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -56,7 +56,7 @@ enum { static struct proto *saved_tcpv6_prot; static DEFINE_MUTEX(tcpv6_prot_mutex); static LIST_HEAD(device_list); -static DEFINE_MUTEX(device_mutex); +static DEFINE_SPINLOCK(device_spinlock); static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; static struct proto_ops tls_sw_proto_ops; @@ -538,11 +538,14 @@ static struct tls_context *create_ctx(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); struct tls_context *ctx; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); if (!ctx) return NULL; icsk->icsk_ulp_data = ctx; + ctx->setsockopt = sk->sk_prot->setsockopt; + ctx->getsockopt = sk->sk_prot->getsockopt; + ctx->sk_proto_close = sk->sk_prot->close; return ctx; } @@ -552,7 +555,7 @@ static int tls_hw_prot(struct sock *sk) struct tls_device *dev; int rc = 0; - mutex_lock(&device_mutex); + spin_lock_bh(&device_spinlock); list_for_each_entry(dev, &device_list, dev_list) { if (dev->feature && dev->feature(dev)) { ctx = create_ctx(sk); @@ -570,7 +573,7 @@ static int tls_hw_prot(struct sock *sk) } } out: - mutex_unlock(&device_mutex); + spin_unlock_bh(&device_spinlock); return rc; } @@ -579,12 +582,17 @@ static void tls_hw_unhash(struct sock *sk) struct tls_context *ctx = tls_get_ctx(sk); struct tls_device *dev; - mutex_lock(&device_mutex); + spin_lock_bh(&device_spinlock); list_for_each_entry(dev, &device_list, dev_list) { - if (dev->unhash) + if (dev->unhash) { + kref_get(&dev->kref); + spin_unlock_bh(&device_spinlock); dev->unhash(dev, sk); + kref_put(&dev->kref, dev->release); + spin_lock_bh(&device_spinlock); + } } - mutex_unlock(&device_mutex); + spin_unlock_bh(&device_spinlock); ctx->unhash(sk); } @@ -595,12 +603,17 @@ static int tls_hw_hash(struct sock *sk) int err; err = ctx->hash(sk); - mutex_lock(&device_mutex); + spin_lock_bh(&device_spinlock); list_for_each_entry(dev, &device_list, dev_list) { - if (dev->hash) + if (dev->hash) { + kref_get(&dev->kref); + spin_unlock_bh(&device_spinlock); err |= dev->hash(dev, sk); + kref_put(&dev->kref, dev->release); + spin_lock_bh(&device_spinlock); + } } - mutex_unlock(&device_mutex); + spin_unlock_bh(&device_spinlock); if (err) tls_hw_unhash(sk); @@ -675,9 +688,6 @@ static int tls_init(struct sock *sk) rc = -ENOMEM; goto out; } - ctx->setsockopt = sk->sk_prot->setsockopt; - ctx->getsockopt = sk->sk_prot->getsockopt; - ctx->sk_proto_close = sk->sk_prot->close; /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ if (ip_ver == TLSV6 && @@ -699,17 +709,17 @@ out: void tls_register_device(struct tls_device *device) { - mutex_lock(&device_mutex); + spin_lock_bh(&device_spinlock); list_add_tail(&device->dev_list, &device_list); - mutex_unlock(&device_mutex); + spin_unlock_bh(&device_spinlock); } EXPORT_SYMBOL(tls_register_device); void tls_unregister_device(struct tls_device *device) { - mutex_lock(&device_mutex); + spin_lock_bh(&device_spinlock); list_del(&device->dev_list); - mutex_unlock(&device_mutex); + spin_unlock_bh(&device_spinlock); } EXPORT_SYMBOL(tls_unregister_device); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index ab27a2872935..43a1dec08825 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -107,6 +107,7 @@ #include <linux/mutex.h> #include <linux/net.h> #include <linux/poll.h> +#include <linux/random.h> #include <linux/skbuff.h> #include <linux/smp.h> #include <linux/socket.h> @@ -504,9 +505,13 @@ out: static int __vsock_bind_stream(struct vsock_sock *vsk, struct sockaddr_vm *addr) { - static u32 port = LAST_RESERVED_PORT + 1; + static u32 port = 0; struct sockaddr_vm new_addr; + if (!port) + port = LAST_RESERVED_PORT + 1 + + prandom_u32_max(U32_MAX - LAST_RESERVED_PORT); + vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port); if (addr->svm_port == VMADDR_PORT_ANY) { diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index cb332adb84cd..c361ce782412 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -264,6 +264,31 @@ vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src, } static int +vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src, + struct sockaddr_vm *dst, + enum vmci_transport_packet_type type, + u64 size, + u64 mode, + struct vmci_transport_waiting_info *wait, + u16 proto, + struct vmci_handle handle) +{ + struct vmci_transport_packet *pkt; + int err; + + pkt = kmalloc(sizeof(*pkt), GFP_KERNEL); + if (!pkt) + return -ENOMEM; + + err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size, + mode, wait, proto, handle, + true); + kfree(pkt); + + return err; +} + +static int vmci_transport_send_control_pkt(struct sock *sk, enum vmci_transport_packet_type type, u64 size, @@ -272,9 +297,7 @@ vmci_transport_send_control_pkt(struct sock *sk, u16 proto, struct vmci_handle handle) { - struct vmci_transport_packet *pkt; struct vsock_sock *vsk; - int err; vsk = vsock_sk(sk); @@ -284,17 +307,10 @@ vmci_transport_send_control_pkt(struct sock *sk, if (!vsock_addr_bound(&vsk->remote_addr)) return -EINVAL; - pkt = kmalloc(sizeof(*pkt), GFP_KERNEL); - if (!pkt) - return -ENOMEM; - - err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr, - &vsk->remote_addr, type, size, - mode, wait, proto, handle, - true); - kfree(pkt); - - return err; + return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, + &vsk->remote_addr, + type, size, mode, + wait, proto, handle); } static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst, @@ -312,12 +328,29 @@ static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst, static int vmci_transport_send_reset(struct sock *sk, struct vmci_transport_packet *pkt) { + struct sockaddr_vm *dst_ptr; + struct sockaddr_vm dst; + struct vsock_sock *vsk; + if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) return 0; - return vmci_transport_send_control_pkt(sk, - VMCI_TRANSPORT_PACKET_TYPE_RST, - 0, 0, NULL, VSOCK_PROTO_INVALID, - VMCI_INVALID_HANDLE); + + vsk = vsock_sk(sk); + + if (!vsock_addr_bound(&vsk->local_addr)) + return -EINVAL; + + if (vsock_addr_bound(&vsk->remote_addr)) { + dst_ptr = &vsk->remote_addr; + } else { + vsock_addr_init(&dst, pkt->dg.src.context, + pkt->src_port); + dst_ptr = &dst; + } + return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr, + VMCI_TRANSPORT_PACKET_TYPE_RST, + 0, 0, NULL, VSOCK_PROTO_INVALID, + VMCI_INVALID_HANDLE); } static int vmci_transport_send_negotiate(struct sock *sk, size_t size) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 10ec05589795..5e49492d5911 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -9152,8 +9152,10 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) { int r = validate_pae_over_nl80211(rdev, info); - if (r < 0) + if (r < 0) { + kzfree(connkeys); return r; + } ibss.control_port_over_nl80211 = true; } diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 6bc817359b58..b3b613660d44 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -315,6 +315,12 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) sp->xvec[sp->len++] = x; + skb_dst_force(skb); + if (!skb_dst(skb)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); + goto drop; + } + lock: spin_lock(&x->lock); @@ -354,7 +360,6 @@ lock: XFRM_SKB_CB(skb)->seq.input.low = seq; XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; - skb_dst_force(skb); dev_hold(skb->dev); if (crypto_done) diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 757c4d11983b..9333153bafda 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -102,6 +102,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err) skb_dst_force(skb); if (!skb_dst(skb)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); + err = -EHOSTUNREACH; goto error_nolock; } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index dc4a9f1fb941..23c92891758a 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -426,6 +426,12 @@ static void xfrm_put_mode(struct xfrm_mode *mode) module_put(mode->owner); } +void xfrm_state_free(struct xfrm_state *x) +{ + kmem_cache_free(xfrm_state_cache, x); +} +EXPORT_SYMBOL(xfrm_state_free); + static void xfrm_state_gc_destroy(struct xfrm_state *x) { tasklet_hrtimer_cancel(&x->mtimer); @@ -452,7 +458,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) } xfrm_dev_state_free(x); security_xfrm_state_free(x); - kmem_cache_free(xfrm_state_cache, x); + xfrm_state_free(x); } static void xfrm_state_gc_task(struct work_struct *work) @@ -788,7 +794,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) { spin_lock_bh(&net->xfrm.xfrm_state_lock); si->sadcnt = net->xfrm.state_num; - si->sadhcnt = net->xfrm.state_hmask; + si->sadhcnt = net->xfrm.state_hmask + 1; si->sadhmcnt = xfrm_state_hashmax; spin_unlock_bh(&net->xfrm.xfrm_state_lock); } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index c9a84e22f5d5..277c1c46fe94 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2288,13 +2288,13 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, } - kfree(x); + xfrm_state_free(x); kfree(xp); return 0; free_state: - kfree(x); + xfrm_state_free(x); nomem: return err; } |