aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller2021-03-25 15:31:22 -0700
committerDavid S. Miller2021-03-25 15:31:22 -0700
commitefd13b71a3fa31413f8d15342e01d44b60b0a432 (patch)
tree2ed1b299e25538c5a60485a1047507b49d3e0ecf /net
parent84c7f6c33f42a12eb036ebf0f0e3670799304120 (diff)
parent002322402dafd846c424ffa9240a937f49b48c42 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/main.c1
-rw-r--r--net/bridge/br_switchdev.c2
-rw-r--r--net/can/isotp.c18
-rw-r--r--net/core/dev.c33
-rw-r--r--net/core/drop_monitor.c23
-rw-r--r--net/core/dst.c59
-rw-r--r--net/core/filter.c12
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/sock.c44
-rw-r--r--net/dccp/ipv6.c5
-rw-r--r--net/dsa/dsa2.c11
-rw-r--r--net/ipv4/inet_connection_sock.c7
-rw-r--r--net/ipv4/ipconfig.c14
-rw-r--r--net/ipv4/netfilter/arp_tables.c16
-rw-r--r--net/ipv4/netfilter/ip_tables.c16
-rw-r--r--net/ipv4/route.c45
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_input.c10
-rw-r--r--net/ipv6/netfilter/ip6_tables.c16
-rw-r--r--net/ipv6/route.c36
-rw-r--r--net/ipv6/tcp_ipv6.c5
-rw-r--r--net/mac80211/aead_api.c5
-rw-r--r--net/mac80211/aes_gmac.c5
-rw-r--r--net/mac80211/cfg.c4
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/main.c13
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/mptcp/options.c24
-rw-r--r--net/mptcp/protocol.c4
-rw-r--r--net/mptcp/subflow.c5
-rw-r--r--net/netfilter/nf_conntrack_netlink.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c3
-rw-r--r--net/netfilter/nf_flow_table_core.c2
-rw-r--r--net/netfilter/nf_tables_api.c22
-rw-r--r--net/netfilter/x_tables.c49
-rw-r--r--net/openvswitch/conntrack.c8
-rw-r--r--net/openvswitch/conntrack.h6
-rw-r--r--net/openvswitch/flow.c4
-rw-r--r--net/qrtr/qrtr.c5
-rw-r--r--net/sched/act_ct.c6
-rw-r--r--net/sched/cls_api.c1
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/sch_choke.c7
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_htb.c19
-rw-r--r--net/sched/sch_red.c7
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sctp/output.c7
-rw-r--r--net/sctp/outqueue.c7
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c11
-rw-r--r--net/sunrpc/sched.c5
-rw-r--r--net/sunrpc/svc.c6
-rw-r--r--net/sunrpc/svc_xprt.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c82
-rw-r--r--net/tipc/node.c11
-rw-r--r--net/vmw_vsock/af_vsock.c1
-rw-r--r--net/wireless/nl80211.c12
61 files changed, 454 insertions, 296 deletions
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index e48f7ac8a854..3ddd66e4c29e 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -702,7 +702,6 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
-MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
MODULE_VERSION(BATADV_SOURCE_VERSION);
MODULE_ALIAS_RTNL_LINK("batadv");
MODULE_ALIAS_GENL_FAMILY(BATADV_NL_NAME);
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index b89503832fcc..1e24d9a2c9a7 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -128,6 +128,8 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
{
if (!fdb->dst)
return;
+ if (test_bit(BR_FDB_LOCAL, &fdb->flags))
+ return;
switch (type) {
case RTM_DELNEIGH:
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 3ef7f78e553b..15ea1234d457 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -196,7 +196,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
nskb->dev = dev;
can_skb_set_owner(nskb, sk);
ncf = (struct canfd_frame *)nskb->data;
- skb_put(nskb, so->ll.mtu);
+ skb_put_zero(nskb, so->ll.mtu);
/* create & send flow control reply */
ncf->can_id = so->txid;
@@ -215,8 +215,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
if (ae)
ncf->data[0] = so->opt.ext_address;
- if (so->ll.mtu == CANFD_MTU)
- ncf->flags = so->ll.tx_flags;
+ ncf->flags = so->ll.tx_flags;
can_send_ret = can_send(nskb, 1);
if (can_send_ret)
@@ -780,7 +779,7 @@ isotp_tx_burst:
can_skb_prv(skb)->skbcnt = 0;
cf = (struct canfd_frame *)skb->data;
- skb_put(skb, so->ll.mtu);
+ skb_put_zero(skb, so->ll.mtu);
/* create consecutive frame */
isotp_fill_dataframe(cf, so, ae, 0);
@@ -790,8 +789,7 @@ isotp_tx_burst:
so->tx.sn %= 16;
so->tx.bs++;
- if (so->ll.mtu == CANFD_MTU)
- cf->flags = so->ll.tx_flags;
+ cf->flags = so->ll.tx_flags;
skb->dev = dev;
can_skb_set_owner(skb, sk);
@@ -897,7 +895,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
so->tx.idx = 0;
cf = (struct canfd_frame *)skb->data;
- skb_put(skb, so->ll.mtu);
+ skb_put_zero(skb, so->ll.mtu);
/* check for single frame transmission depending on TX_DL */
if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) {
@@ -939,8 +937,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
/* send the first or only CAN frame */
- if (so->ll.mtu == CANFD_MTU)
- cf->flags = so->ll.tx_flags;
+ cf->flags = so->ll.tx_flags;
skb->dev = dev;
skb->sk = sk;
@@ -1228,7 +1225,8 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname,
if (ll.mtu != CAN_MTU && ll.mtu != CANFD_MTU)
return -EINVAL;
- if (ll.mtu == CAN_MTU && ll.tx_dl > CAN_MAX_DLEN)
+ if (ll.mtu == CAN_MTU &&
+ (ll.tx_dl > CAN_MAX_DLEN || ll.tx_flags != 0))
return -EINVAL;
memcpy(&so->ll, &ll, sizeof(ll));
diff --git a/net/core/dev.c b/net/core/dev.c
index 4bb6dcdbed8b..48b529d59157 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1230,6 +1230,18 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
return -ENOMEM;
for_each_netdev(net, d) {
+ struct netdev_name_node *name_node;
+ list_for_each_entry(name_node, &d->name_node->list, list) {
+ if (!sscanf(name_node->name, name, &i))
+ continue;
+ if (i < 0 || i >= max_netdevices)
+ continue;
+
+ /* avoid cases where sscanf is not exact inverse of printf */
+ snprintf(buf, IFNAMSIZ, name, i);
+ if (!strncmp(buf, name_node->name, IFNAMSIZ))
+ set_bit(i, inuse);
+ }
if (!sscanf(d->name, name, &i))
continue;
if (i < 0 || i >= max_netdevices)
@@ -4345,6 +4357,13 @@ static inline void ____napi_schedule(struct softnet_data *sd,
*/
thread = READ_ONCE(napi->thread);
if (thread) {
+ /* Avoid doing set_bit() if the thread is in
+ * INTERRUPTIBLE state, cause napi_thread_wait()
+ * makes sure to proceed with napi polling
+ * if the thread is explicitly woken from here.
+ */
+ if (READ_ONCE(thread->state) != TASK_INTERRUPTIBLE)
+ set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
wake_up_process(thread);
return;
}
@@ -6534,6 +6553,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
+ NAPIF_STATE_SCHED_THREADED |
NAPIF_STATE_PREFER_BUSY_POLL);
/* If STATE_MISSED was set, leave STATE_SCHED set,
@@ -7017,16 +7037,25 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
static int napi_thread_wait(struct napi_struct *napi)
{
+ bool woken = false;
+
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop() && !napi_disable_pending(napi)) {
- if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
+ /* Testing SCHED_THREADED bit here to make sure the current
+ * kthread owns this napi and could poll on this napi.
+ * Testing SCHED bit is not enough because SCHED bit might be
+ * set by some other busy poll thread or by napi_disable().
+ */
+ if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
WARN_ON(!list_empty(&napi->poll_list));
__set_current_state(TASK_RUNNING);
return 0;
}
schedule();
+ /* woken being true indicates this thread owns this napi. */
+ woken = true;
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
@@ -11412,7 +11441,7 @@ static void __net_exit default_device_exit(struct net *net)
continue;
/* Leave virtual devices for the generic cleanup */
- if (dev->rtnl_link_ops)
+ if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
continue;
/* Push remaining network devices to init_net */
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 1eb02c2236f2..ead2a8aa57b4 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -1053,6 +1053,20 @@ static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
return 0;
err_module_put:
+ for_each_possible_cpu(cpu) {
+ struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
+ struct sk_buff *skb;
+
+ del_timer_sync(&hw_data->send_timer);
+ cancel_work_sync(&hw_data->dm_alert_work);
+ while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
+ struct devlink_trap_metadata *hw_metadata;
+
+ hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
+ net_dm_hw_metadata_free(hw_metadata);
+ consume_skb(skb);
+ }
+ }
module_put(THIS_MODULE);
return rc;
}
@@ -1134,6 +1148,15 @@ static int net_dm_trace_on_set(struct netlink_ext_ack *extack)
err_unregister_trace:
unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
err_module_put:
+ for_each_possible_cpu(cpu) {
+ struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
+ struct sk_buff *skb;
+
+ del_timer_sync(&data->send_timer);
+ cancel_work_sync(&data->dm_alert_work);
+ while ((skb = __skb_dequeue(&data->drop_queue)))
+ consume_skb(skb);
+ }
module_put(THIS_MODULE);
return rc;
}
diff --git a/net/core/dst.c b/net/core/dst.c
index 0c01bd8d9d81..fb3bcba87744 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -237,37 +237,62 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
}
EXPORT_SYMBOL(__dst_destroy_metrics_generic);
-static struct dst_ops md_dst_ops = {
- .family = AF_UNSPEC,
-};
+struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
+{
+ return NULL;
+}
-static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
+u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
{
- WARN_ONCE(1, "Attempting to call output on metadata dst\n");
- kfree_skb(skb);
- return 0;
+ return NULL;
}
-static int dst_md_discard(struct sk_buff *skb)
+struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
{
- WARN_ONCE(1, "Attempting to call input on metadata dst\n");
- kfree_skb(skb);
- return 0;
+ return NULL;
+}
+
+void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
+{
+}
+EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
+
+void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
+{
+}
+EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
+
+unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
+{
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst->dev->mtu;
}
+EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
+
+static struct dst_ops dst_blackhole_ops = {
+ .family = AF_UNSPEC,
+ .neigh_lookup = dst_blackhole_neigh_lookup,
+ .check = dst_blackhole_check,
+ .cow_metrics = dst_blackhole_cow_metrics,
+ .update_pmtu = dst_blackhole_update_pmtu,
+ .redirect = dst_blackhole_redirect,
+ .mtu = dst_blackhole_mtu,
+};
static void __metadata_dst_init(struct metadata_dst *md_dst,
enum metadata_type type, u8 optslen)
-
{
struct dst_entry *dst;
dst = &md_dst->dst;
- dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
+ dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE,
DST_METADATA | DST_NOCOUNT);
-
- dst->input = dst_md_discard;
- dst->output = dst_md_discard_out;
-
memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
md_dst->type = type;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index b6732000d8a2..f5eeebf6a16f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5600,7 +5600,7 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
return -EINVAL;
- if (unlikely(flags & BPF_MTU_CHK_SEGS && len_diff))
+ if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
return -EINVAL;
dev = __dev_via_ifindex(dev, ifindex);
@@ -5610,7 +5610,11 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
mtu = READ_ONCE(dev->mtu);
dev_len = mtu + dev->hard_header_len;
- skb_len = skb->len + len_diff; /* minus result pass check */
+
+ /* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
+ skb_len = *mtu_len ? *mtu_len + dev->hard_header_len : skb->len;
+
+ skb_len += len_diff; /* minus result pass check */
if (skb_len <= dev_len) {
ret = BPF_MTU_CHK_RET_SUCCESS;
goto out;
@@ -5655,6 +5659,10 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
/* Add L2-header as dev MTU is L3 size */
dev_len = mtu + dev->hard_header_len;
+ /* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
+ if (*mtu_len)
+ xdp_len = *mtu_len + dev->hard_header_len;
+
xdp_len += len_diff; /* minus result pass check */
if (xdp_len > dev_len)
ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 2ed380d096ce..5985029e43d4 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -176,7 +176,7 @@ void skb_flow_get_icmp_tci(const struct sk_buff *skb,
* avoid confusion with packets without such field
*/
if (icmp_has_id(ih->type))
- key_icmp->id = ih->un.echo.id ? : 1;
+ key_icmp->id = ih->un.echo.id ? ntohs(ih->un.echo.id) : 1;
else
key_icmp->id = 0;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 0ed98f20448a..cc31b601ae10 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3440,6 +3440,32 @@ static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
twsk_prot->twsk_slab = NULL;
}
+static int tw_prot_init(const struct proto *prot)
+{
+ struct timewait_sock_ops *twsk_prot = prot->twsk_prot;
+
+ if (!twsk_prot)
+ return 0;
+
+ twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s",
+ prot->name);
+ if (!twsk_prot->twsk_slab_name)
+ return -ENOMEM;
+
+ twsk_prot->twsk_slab =
+ kmem_cache_create(twsk_prot->twsk_slab_name,
+ twsk_prot->twsk_obj_size, 0,
+ SLAB_ACCOUNT | prot->slab_flags,
+ NULL);
+ if (!twsk_prot->twsk_slab) {
+ pr_crit("%s: Can't create timewait sock SLAB cache!\n",
+ prot->name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
{
if (!rsk_prot)
@@ -3496,22 +3522,8 @@ int proto_register(struct proto *prot, int alloc_slab)
if (req_prot_init(prot))
goto out_free_request_sock_slab;
- if (prot->twsk_prot != NULL) {
- prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
-
- if (prot->twsk_prot->twsk_slab_name == NULL)
- goto out_free_request_sock_slab;
-
- prot->twsk_prot->twsk_slab =
- kmem_cache_create(prot->twsk_prot->twsk_slab_name,
- prot->twsk_prot->twsk_obj_size,
- 0,
- SLAB_ACCOUNT |
- prot->slab_flags,
- NULL);
- if (prot->twsk_prot->twsk_slab == NULL)
- goto out_free_timewait_sock_slab;
- }
+ if (tw_prot_init(prot))
+ goto out_free_timewait_sock_slab;
}
mutex_lock(&proto_list_mutex);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 1f73603913f5..2be5c69824f9 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -319,6 +319,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!ipv6_unicast_destination(skb))
return 0; /* discard, don't send a reset here */
+ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+ return 0;
+ }
+
if (dccp_bad_service_code(sk, service)) {
dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
goto drop;
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 4d4956ed303b..d142eb2b288b 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -1066,6 +1066,7 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
{
struct dsa_switch *ds = dp->ds;
struct dsa_switch_tree *dst = ds->dst;
+ const struct dsa_device_ops *tag_ops;
enum dsa_tag_protocol tag_protocol;
tag_protocol = dsa_get_tag_protocol(dp, master);
@@ -1080,14 +1081,16 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
* nothing to do here.
*/
} else {
- dst->tag_ops = dsa_tag_driver_get(tag_protocol);
- if (IS_ERR(dst->tag_ops)) {
- if (PTR_ERR(dst->tag_ops) == -ENOPROTOOPT)
+ tag_ops = dsa_tag_driver_get(tag_protocol);
+ if (IS_ERR(tag_ops)) {
+ if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
return -EPROBE_DEFER;
dev_warn(ds->dev, "No tagger for this switch\n");
dp->master = NULL;
- return PTR_ERR(dst->tag_ops);
+ return PTR_ERR(tag_ops);
}
+
+ dst->tag_ops = tag_ops;
}
dp->master = master;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6bd7ca09af03..fd472eae4f5c 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -705,12 +705,15 @@ static bool reqsk_queue_unlink(struct request_sock *req)
return found;
}
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
{
- if (reqsk_queue_unlink(req)) {
+ bool unlinked = reqsk_queue_unlink(req);
+
+ if (unlinked) {
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
reqsk_put(req);
}
+ return unlinked;
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 47db1bfdaaa0..bc2f6ca97152 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -309,7 +309,7 @@ have_carrier:
*/
static void __init ic_close_devs(void)
{
- struct net_device *selected_dev = ic_dev->dev;
+ struct net_device *selected_dev = ic_dev ? ic_dev->dev : NULL;
struct ic_device *d, *next;
struct net_device *dev;
@@ -317,16 +317,18 @@ static void __init ic_close_devs(void)
next = ic_first_dev;
while ((d = next)) {
bool bring_down = (d != ic_dev);
- struct net_device *lower_dev;
+ struct net_device *lower;
struct list_head *iter;
next = d->next;
dev = d->dev;
- netdev_for_each_lower_dev(selected_dev, lower_dev, iter) {
- if (dev == lower_dev) {
- bring_down = false;
- break;
+ if (selected_dev) {
+ netdev_for_each_lower_dev(selected_dev, lower, iter) {
+ if (dev == lower) {
+ bring_down = false;
+ break;
+ }
}
}
if (bring_down) {
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index c576a63d09db..d1e04d2b5170 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -203,7 +203,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
local_bh_disable();
addend = xt_write_recseq_begin();
- private = rcu_access_pointer(table->private);
+ private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
@@ -649,7 +649,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
- const struct xt_table_info *private = xt_table_get_private_protected(table);
+ const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
* (other than comefrom, which userspace doesn't care
@@ -673,7 +673,7 @@ static int copy_entries_to_user(unsigned int total_size,
unsigned int off, num;
const struct arpt_entry *e;
struct xt_counters *counters;
- struct xt_table_info *private = xt_table_get_private_protected(table);
+ struct xt_table_info *private = table->private;
int ret = 0;
void *loc_cpu_entry;
@@ -807,7 +807,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
if (!IS_ERR(t)) {
struct arpt_getinfo info;
- const struct xt_table_info *private = xt_table_get_private_protected(t);
+ const struct xt_table_info *private = t->private;
#ifdef CONFIG_COMPAT
struct xt_table_info tmp;
@@ -860,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
if (!IS_ERR(t)) {
- const struct xt_table_info *private = xt_table_get_private_protected(t);
+ const struct xt_table_info *private = t->private;
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
@@ -1017,7 +1017,7 @@ static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
}
local_bh_disable();
- private = xt_table_get_private_protected(t);
+ private = t->private;
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
@@ -1330,7 +1330,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
void __user *userptr)
{
struct xt_counters *counters;
- const struct xt_table_info *private = xt_table_get_private_protected(table);
+ const struct xt_table_info *private = table->private;
void __user *pos;
unsigned int size;
int ret = 0;
@@ -1379,7 +1379,7 @@ static int compat_get_entries(struct net *net,
xt_compat_lock(NFPROTO_ARP);
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
if (!IS_ERR(t)) {
- const struct xt_table_info *private = xt_table_get_private_protected(t);
+ const struct xt_table_info *private = t->private;
struct xt_table_info info;
ret = compat_table_info(private, &info);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index e8f6f9d86237..f15bc21d7301 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -258,7 +258,7 @@ ipt_do_table(struct sk_buff *skb,
WARN_ON(!(table->valid_hooks & (1 << hook)));
local_bh_disable();
addend = xt_write_recseq_begin();
- private = rcu_access_pointer(table->private);
+ private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
@@ -791,7 +791,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
- const struct xt_table_info *private = xt_table_get_private_protected(table);
+ const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
@@ -815,7 +815,7 @@ copy_entries_to_user(unsigned int total_size,
unsigned int off, num;
const struct ipt_entry *e;
struct xt_counters *counters;
- const struct xt_table_info *private = xt_table_get_private_protected(table);
+ const struct xt_table_info *private = table->private;
int ret = 0;
const void *loc_cpu_entry;
@@ -964,7 +964,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
t = xt_request_find_table_lock(net, AF_INET, name);
if (!IS_ERR(t)) {
struct ipt_getinfo info;
- const struct xt_table_info *private = xt_table_get_private_protected(t);
+ const struct xt_table_info *private = t->private;
#ifdef CONFIG_COMPAT
struct xt_table_info tmp;
@@ -1018,7 +1018,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
t = xt_find_table_lock(net, AF_INET, get.name);
if (!IS_ERR(t)) {
- const struct xt_table_info *private = xt_table_get_private_protected(t);
+ const struct xt_table_info *private = t->private;
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
@@ -1173,7 +1173,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
}
local_bh_disable();
- private = xt_table_get_private_protected(t);
+ private = t->private;
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
@@ -1543,7 +1543,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
- const struct xt_table_info *private = xt_table_get_private_protected(table);
+ const struct xt_table_info *private = table->private;
void __user *pos;
unsigned int size;
int ret = 0;
@@ -1589,7 +1589,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
xt_compat_lock(AF_INET);
t = xt_find_table_lock(net, AF_INET, get.name);
if (!IS_ERR(t)) {
- const struct xt_table_info *private = xt_table_get_private_protected(t);
+ const struct xt_table_info *private = t->private;
struct xt_table_info info;
ret = compat_table_info(private, &info);
if (!ret && get.size == info.size)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ea916df1bbf5..f6787c55f6ab 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2669,44 +2669,15 @@ out:
return rth;
}
-static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
-{
- return NULL;
-}
-
-static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
-{
- unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
-
- return mtu ? : dst->dev->mtu;
-}
-
-static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu,
- bool confirm_neigh)
-{
-}
-
-static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb)
-{
-}
-
-static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
- unsigned long old)
-{
- return NULL;
-}
-
static struct dst_ops ipv4_dst_blackhole_ops = {
- .family = AF_INET,
- .check = ipv4_blackhole_dst_check,
- .mtu = ipv4_blackhole_mtu,
- .default_advmss = ipv4_default_advmss,
- .update_pmtu = ipv4_rt_blackhole_update_pmtu,
- .redirect = ipv4_rt_blackhole_redirect,
- .cow_metrics = ipv4_rt_blackhole_cow_metrics,
- .neigh_lookup = ipv4_neigh_lookup,
+ .family = AF_INET,
+ .default_advmss = ipv4_default_advmss,
+ .neigh_lookup = ipv4_neigh_lookup,
+ .check = dst_blackhole_check,
+ .cow_metrics = dst_blackhole_cow_metrics,
+ .update_pmtu = dst_blackhole_update_pmtu,
+ .redirect = dst_blackhole_redirect,
+ .mtu = dst_blackhole_mtu,
};
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0055ae0a3bf8..7513ba45553d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -804,8 +804,11 @@ embryonic_reset:
tcp_reset(sk, skb);
}
if (!fastopen) {
- inet_csk_reqsk_queue_drop(sk, req);
- __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+ bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
+
+ if (unlinked)
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+ *req_stolen = !unlinked;
}
return NULL;
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index ef9d022e693f..679699e953f1 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -2486,7 +2486,7 @@ static int ipv6_route_native_seq_show(struct seq_file *seq, void *v)
const struct net_device *dev;
if (rt->nh)
- fib6_nh = nexthop_fib6_nh(rt->nh);
+ fib6_nh = nexthop_fib6_nh_bh(rt->nh);
seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index e9d2a4a409aa..80256717868e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -245,16 +245,6 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
if (ipv6_addr_is_multicast(&hdr->saddr))
goto err;
- /* While RFC4291 is not explicit about v4mapped addresses
- * in IPv6 headers, it seems clear linux dual-stack
- * model can not deal properly with these.
- * Security models could be fooled by ::ffff:127.0.0.1 for example.
- *
- * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
- */
- if (ipv6_addr_v4mapped(&hdr->saddr))
- goto err;
-
skb->transport_header = skb->network_header + sizeof(*hdr);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 0d453fa9e327..2e2119bfcf13 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -280,7 +280,7 @@ ip6t_do_table(struct sk_buff *skb,
local_bh_disable();
addend = xt_write_recseq_begin();
- private = rcu_access_pointer(table->private);
+ private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
@@ -807,7 +807,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
- const struct xt_table_info *private = xt_table_get_private_protected(table);
+ const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
@@ -831,7 +831,7 @@ copy_entries_to_user(unsigned int total_size,
unsigned int off, num;
const struct ip6t_entry *e;
struct xt_counters *counters;
- const struct xt_table_info *private = xt_table_get_private_protected(table);
+ const struct xt_table_info *private = table->private;
int ret = 0;
const void *loc_cpu_entry;
@@ -980,7 +980,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
t = xt_request_find_table_lock(net, AF_INET6, name);
if (!IS_ERR(t)) {
struct ip6t_getinfo info;
- const struct xt_table_info *private = xt_table_get_private_protected(t);
+ const struct xt_table_info *private = t->private;
#ifdef CONFIG_COMPAT
struct xt_table_info tmp;
@@ -1035,7 +1035,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
t = xt_find_table_lock(net, AF_INET6, get.name);
if (!IS_ERR(t)) {
- struct xt_table_info *private = xt_table_get_private_protected(t);
+ struct xt_table_info *private = t->private;
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
@@ -1189,7 +1189,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
}
local_bh_disable();
- private = xt_table_get_private_protected(t);
+ private = t->private;
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
@@ -1552,7 +1552,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
- const struct xt_table_info *private = xt_table_get_private_protected(table);
+ const struct xt_table_info *private = table->private;
void __user *pos;
unsigned int size;
int ret = 0;
@@ -1598,7 +1598,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
xt_compat_lock(AF_INET6);
t = xt_find_table_lock(net, AF_INET6, get.name);
if (!IS_ERR(t)) {
- const struct xt_table_info *private = xt_table_get_private_protected(t);
+ const struct xt_table_info *private = t->private;
struct xt_table_info info;
ret = compat_table_info(private, &info);
if (!ret && get.size == info.size)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 60058f3dcc48..ebb7519bec2a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -260,34 +260,16 @@ static struct dst_ops ip6_dst_ops_template = {
.confirm_neigh = ip6_confirm_neigh,
};
-static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
-{
- unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
-
- return mtu ? : dst->dev->mtu;
-}
-
-static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu,
- bool confirm_neigh)
-{
-}
-
-static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb)
-{
-}
-
static struct dst_ops ip6_dst_blackhole_ops = {
- .family = AF_INET6,
- .destroy = ip6_dst_destroy,
- .check = ip6_dst_check,
- .mtu = ip6_blackhole_mtu,
- .default_advmss = ip6_default_advmss,
- .update_pmtu = ip6_rt_blackhole_update_pmtu,
- .redirect = ip6_rt_blackhole_redirect,
- .cow_metrics = dst_cow_metrics_generic,
- .neigh_lookup = ip6_dst_neigh_lookup,
+ .family = AF_INET6,
+ .default_advmss = ip6_default_advmss,
+ .neigh_lookup = ip6_dst_neigh_lookup,
+ .check = ip6_dst_check,
+ .destroy = ip6_dst_destroy,
+ .cow_metrics = dst_cow_metrics_generic,
+ .update_pmtu = dst_blackhole_update_pmtu,
+ .redirect = dst_blackhole_redirect,
+ .mtu = dst_blackhole_mtu,
};
static const u32 ip6_template_metrics[RTAX_MAX] = {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index bd44ded7e50c..d0f007741e8e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1175,6 +1175,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!ipv6_unicast_destination(skb))
goto drop;
+ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+ return 0;
+ }
+
return tcp_conn_request(&tcp6_request_sock_ops,
&tcp_request_sock_ipv6_ops, sk, skb);
diff --git a/net/mac80211/aead_api.c b/net/mac80211/aead_api.c
index d7b3d905d535..b00d6f5b33f4 100644
--- a/net/mac80211/aead_api.c
+++ b/net/mac80211/aead_api.c
@@ -23,6 +23,7 @@ int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
struct aead_request *aead_req;
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
u8 *__aad;
+ int ret;
aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC);
if (!aead_req)
@@ -40,10 +41,10 @@ int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
aead_request_set_crypt(aead_req, sg, sg, data_len, b_0);
aead_request_set_ad(aead_req, sg[0].length);
- crypto_aead_encrypt(aead_req);
+ ret = crypto_aead_encrypt(aead_req);
kfree_sensitive(aead_req);
- return 0;
+ return ret;
}
int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
index 6f3b3a0cc10a..512cab073f2e 100644
--- a/net/mac80211/aes_gmac.c
+++ b/net/mac80211/aes_gmac.c
@@ -22,6 +22,7 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
struct aead_request *aead_req;
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
const __le16 *fc;
+ int ret;
if (data_len < GMAC_MIC_LEN)
return -EINVAL;
@@ -59,10 +60,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
aead_request_set_crypt(aead_req, sg, sg, 0, iv);
aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
- crypto_aead_encrypt(aead_req);
+ ret = crypto_aead_encrypt(aead_req);
kfree_sensitive(aead_req);
- return 0;
+ return ret;
}
struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c4c70e30ad7f..68a0de02b561 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2950,14 +2950,14 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
continue;
for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
- if (~sdata->rc_rateidx_mcs_mask[i][j]) {
+ if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) {
sdata->rc_has_mcs_mask[i] = true;
break;
}
}
for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
- if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
+ if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) {
sdata->rc_has_vht_mcs_mask[i] = true;
break;
}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 1f552f374e97..a7ac53a2f00d 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -1874,6 +1874,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
/* remove beacon */
kfree(sdata->u.ibss.ie);
+ sdata->u.ibss.ie = NULL;
+ sdata->u.ibss.ie_len = 0;
/* on the next join, re-program HT parameters */
memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 4f3f8bb58e76..1b9c82616606 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -973,8 +973,19 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
continue;
if (!dflt_chandef.chan) {
+ /*
+ * Assign the first enabled channel to dflt_chandef
+ * from the list of channels
+ */
+ for (i = 0; i < sband->n_channels; i++)
+ if (!(sband->channels[i].flags &
+ IEEE80211_CHAN_DISABLED))
+ break;
+ /* if none found then use the first anyway */
+ if (i == sband->n_channels)
+ i = 0;
cfg80211_chandef_create(&dflt_chandef,
- &sband->channels[0],
+ &sband->channels[i],
NL80211_CHAN_NO_HT);
/* init channel we're on */
if (!local->use_chanctx && !local->_oper_chandef.chan) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2e33a1263518..ce4e3855fec1 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -5071,7 +5071,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION,
ies->data, ies->len);
if (he_oper_ie &&
- he_oper_ie[1] == ieee80211_he_oper_size(&he_oper_ie[3]))
+ he_oper_ie[1] >= ieee80211_he_oper_size(&he_oper_ie[3]))
he_oper = (void *)(he_oper_ie + 3);
else
he_oper = NULL;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 2f44f4919789..ecad9b10984f 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -805,7 +805,6 @@ minstrel_ht_group_min_rate_offset(struct minstrel_ht_sta *mi, int group,
static u16
minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur)
{
- struct minstrel_mcs_group_data *mg;
u8 type = MINSTREL_SAMPLE_TYPE_INC;
int i, index = 0;
u8 group;
@@ -813,7 +812,6 @@ minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur)
group = mi->sample[type].sample_group;
for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
- mg = &mi->groups[group];
index = minstrel_ht_group_min_rate_offset(mi, group,
fast_rate_dur);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index f080fcf60e45..c0fa526a45b4 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -968,7 +968,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
break;
case WLAN_EID_EXT_HE_OPERATION:
if (len >= sizeof(*elems->he_operation) &&
- len == ieee80211_he_oper_size(data) - 1) {
+ len >= ieee80211_he_oper_size(data) - 1) {
if (crc)
*crc = crc32_be(*crc, (void *)elem,
elem->datalen + 2);
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 5fabf3e9a38d..2b7eec93c9f5 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -571,15 +571,15 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
}
static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
- struct in_addr *addr)
+ struct in_addr *addr, u16 port)
{
u8 hmac[SHA256_DIGEST_SIZE];
u8 msg[7];
msg[0] = addr_id;
memcpy(&msg[1], &addr->s_addr, 4);
- msg[5] = 0;
- msg[6] = 0;
+ msg[5] = port >> 8;
+ msg[6] = port & 0xFF;
mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac);
@@ -588,15 +588,15 @@ static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id,
- struct in6_addr *addr)
+ struct in6_addr *addr, u16 port)
{
u8 hmac[SHA256_DIGEST_SIZE];
u8 msg[19];
msg[0] = addr_id;
memcpy(&msg[1], &addr->s6_addr, 16);
- msg[17] = 0;
- msg[18] = 0;
+ msg[17] = port >> 8;
+ msg[18] = port & 0xFF;
mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac);
@@ -650,7 +650,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
opts->ahmac = add_addr_generate_hmac(msk->local_key,
msk->remote_key,
opts->addr_id,
- &opts->addr);
+ &opts->addr,
+ opts->port);
}
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
@@ -661,7 +662,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
opts->ahmac = add_addr6_generate_hmac(msk->local_key,
msk->remote_key,
opts->addr_id,
- &opts->addr6);
+ &opts->addr6,
+ opts->port);
}
}
#endif
@@ -971,12 +973,14 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
if (mp_opt->family == MPTCP_ADDR_IPVERSION_4)
hmac = add_addr_generate_hmac(msk->remote_key,
msk->local_key,
- mp_opt->addr_id, &mp_opt->addr);
+ mp_opt->addr_id, &mp_opt->addr,
+ mp_opt->port);
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
else
hmac = add_addr6_generate_hmac(msk->remote_key,
msk->local_key,
- mp_opt->addr_id, &mp_opt->addr6);
+ mp_opt->addr_id, &mp_opt->addr6,
+ mp_opt->port);
#endif
pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 76958570ae7f..1590b9d4cde2 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2968,7 +2968,7 @@ static void mptcp_release_cb(struct sock *sk)
for (;;) {
flags = 0;
if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags))
- flags |= MPTCP_PUSH_PENDING;
+ flags |= BIT(MPTCP_PUSH_PENDING);
if (!flags)
break;
@@ -2981,7 +2981,7 @@ static void mptcp_release_cb(struct sock *sk)
*/
spin_unlock_bh(&sk->sk_lock.slock);
- if (flags & MPTCP_PUSH_PENDING)
+ if (flags & BIT(MPTCP_PUSH_PENDING))
__mptcp_push_pending(sk, 0);
cond_resched();
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 3d47d670e665..d17d39ccdf34 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -477,6 +477,11 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!ipv6_unicast_destination(skb))
goto drop;
+ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+ return 0;
+ }
+
return tcp_conn_request(&mptcp_subflow_request_sock_ops,
&subflow_request_sock_ipv6_ops, sk, skb);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 1469365bac7e..1d519b0e51a5 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2962,6 +2962,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
memset(&m, 0xFF, sizeof(m));
memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
m.src.u.all = mask->src.u.all;
+ m.src.l3num = tuple->src.l3num;
m.dst.protonum = tuple->dst.protonum;
nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 5b05487a60d2..db11e403d818 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -218,9 +218,6 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
- if (state->pf != NFPROTO_IPV4)
- return -NF_ACCEPT;
-
if (!nf_ct_is_confirmed(ct)) {
unsigned int *timeouts = nf_ct_timeout_lookup(ct);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 8fa7bf9d5f3f..1bce1d2805c4 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -512,7 +512,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
{
int err;
- INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
+ INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
flow_block_init(&flowtable->flow_block);
init_rwsem(&flowtable->flow_block_lock);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index bd5e8122ea5e..fc2526b8bd55 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -6787,6 +6787,9 @@ static int nft_register_flowtable_net_hooks(struct net *net,
list_for_each_entry(hook, hook_list, list) {
list_for_each_entry(ft, &table->flowtables, list) {
+ if (!nft_is_active_next(net, ft))
+ continue;
+
list_for_each_entry(hook2, &ft->hook_list, list) {
if (hook->ops.dev == hook2->ops.dev &&
hook->ops.pf == hook2->ops.pf) {
@@ -6846,6 +6849,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
struct nft_hook *hook, *next;
struct nft_trans *trans;
bool unregister = false;
+ u32 flags;
int err;
err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
@@ -6860,6 +6864,17 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
}
}
+ if (nla[NFTA_FLOWTABLE_FLAGS]) {
+ flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
+ if (flags & ~NFT_FLOWTABLE_MASK)
+ return -EOPNOTSUPP;
+ if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^
+ (flags & NFT_FLOWTABLE_HW_OFFLOAD))
+ return -EOPNOTSUPP;
+ } else {
+ flags = flowtable->data.flags;
+ }
+
err = nft_register_flowtable_net_hooks(ctx->net, ctx->table,
&flowtable_hook.list, flowtable);
if (err < 0)
@@ -6873,6 +6888,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
goto err_flowtable_update_hook;
}
+ nft_trans_flowtable_flags(trans) = flags;
nft_trans_flowtable(trans) = flowtable;
nft_trans_flowtable_update(trans) = true;
INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
@@ -6967,8 +6983,10 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
if (nla[NFTA_FLOWTABLE_FLAGS]) {
flowtable->data.flags =
ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
- if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK)
+ if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK) {
+ err = -EOPNOTSUPP;
goto err3;
+ }
}
write_pnet(&flowtable->data.net, net);
@@ -8179,6 +8197,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
break;
case NFT_MSG_NEWFLOWTABLE:
if (nft_trans_flowtable_update(trans)) {
+ nft_trans_flowtable(trans)->data.flags =
+ nft_trans_flowtable_flags(trans);
nf_tables_flowtable_notify(&trans->ctx,
nft_trans_flowtable(trans),
&nft_trans_flowtable_hooks(trans),
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index bce6ca203d46..6bd31a7a27fc 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1351,14 +1351,6 @@ struct xt_counters *xt_counters_alloc(unsigned int counters)
}
EXPORT_SYMBOL(xt_counters_alloc);
-struct xt_table_info
-*xt_table_get_private_protected(const struct xt_table *table)
-{
- return rcu_dereference_protected(table->private,
- mutex_is_locked(&xt[table->af].mutex));
-}
-EXPORT_SYMBOL(xt_table_get_private_protected);
-
struct xt_table_info *
xt_replace_table(struct xt_table *table,
unsigned int num_counters,
@@ -1366,6 +1358,7 @@ xt_replace_table(struct xt_table *table,
int *error)
{
struct xt_table_info *private;
+ unsigned int cpu;
int ret;
ret = xt_jumpstack_alloc(newinfo);
@@ -1375,20 +1368,47 @@ xt_replace_table(struct xt_table *table,
}
/* Do the substitution. */
- private = xt_table_get_private_protected(table);
+ local_bh_disable();
+ private = table->private;
/* Check inside lock: is the old number correct? */
if (num_counters != private->number) {
pr_debug("num_counters != table->private->number (%u/%u)\n",
num_counters, private->number);
+ local_bh_enable();
*error = -EAGAIN;
return NULL;
}
newinfo->initial_entries = private->initial_entries;
+ /*
+ * Ensure contents of newinfo are visible before assigning to
+ * private.
+ */
+ smp_wmb();
+ table->private = newinfo;
+
+ /* make sure all cpus see new ->private value */
+ smp_mb();
- rcu_assign_pointer(table->private, newinfo);
- synchronize_rcu();
+ /*
+ * Even though table entries have now been swapped, other CPU's
+ * may still be using the old entries...
+ */
+ local_bh_enable();
+
+ /* ... so wait for even xt_recseq on all cpus */
+ for_each_possible_cpu(cpu) {
+ seqcount_t *s = &per_cpu(xt_recseq, cpu);
+ u32 seq = raw_read_seqcount(s);
+
+ if (seq & 1) {
+ do {
+ cond_resched();
+ cpu_relax();
+ } while (seq == raw_read_seqcount(s));
+ }
+ }
audit_log_nfcfg(table->name, table->af, private->number,
!private->number ? AUDIT_XT_OP_REGISTER :
@@ -1424,12 +1444,12 @@ struct xt_table *xt_register_table(struct net *net,
}
/* Simplifies replace_table code. */
- rcu_assign_pointer(table->private, bootstrap);
+ table->private = bootstrap;
if (!xt_replace_table(table, 0, newinfo, &ret))
goto unlock;
- private = xt_table_get_private_protected(table);
+ private = table->private;
pr_debug("table->private->number = %u\n", private->number);
/* save number of initial entries */
@@ -1452,8 +1472,7 @@ void *xt_unregister_table(struct xt_table *table)
struct xt_table_info *private;
mutex_lock(&xt[table->af].mutex);
- private = xt_table_get_private_protected(table);
- RCU_INIT_POINTER(table->private, NULL);
+ private = table->private;
list_del(&table->list);
mutex_unlock(&xt[table->af].mutex);
audit_log_nfcfg(table->name, table->af, private->number,
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 5eddfe7bd391..71cec03e8612 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -271,9 +271,11 @@ static void ovs_ct_update_key(const struct sk_buff *skb,
/* This is called to initialize CT key fields possibly coming in from the local
* stack.
*/
-void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
+void ovs_ct_fill_key(const struct sk_buff *skb,
+ struct sw_flow_key *key,
+ bool post_ct)
{
- ovs_ct_update_key(skb, NULL, key, false, false);
+ ovs_ct_update_key(skb, NULL, key, post_ct, false);
}
int ovs_ct_put_key(const struct sw_flow_key *swkey,
@@ -1332,7 +1334,7 @@ int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
if (skb_nfct(skb)) {
nf_conntrack_put(skb_nfct(skb));
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
- ovs_ct_fill_key(skb, key);
+ ovs_ct_fill_key(skb, key, false);
}
return 0;
diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h
index 59dc32761b91..317e525c8a11 100644
--- a/net/openvswitch/conntrack.h
+++ b/net/openvswitch/conntrack.h
@@ -25,7 +25,8 @@ int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *,
const struct ovs_conntrack_info *);
int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key);
-void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
+void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key,
+ bool post_ct);
int ovs_ct_put_key(const struct sw_flow_key *swkey,
const struct sw_flow_key *output, struct sk_buff *skb);
void ovs_ct_free_action(const struct nlattr *a);
@@ -74,7 +75,8 @@ static inline int ovs_ct_clear(struct sk_buff *skb,
}
static inline void ovs_ct_fill_key(const struct sk_buff *skb,
- struct sw_flow_key *key)
+ struct sw_flow_key *key,
+ bool post_ct)
{
key->ct_state = 0;
key->ct_zone = 0;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index c7f34d6a9934..e586424d8b04 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -857,6 +857,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
struct tc_skb_ext *tc_ext;
#endif
+ bool post_ct = false;
int res, err;
/* Extract metadata from packet. */
@@ -895,6 +896,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
tc_ext = skb_ext_find(skb, TC_SKB_EXT);
key->recirc_id = tc_ext ? tc_ext->chain : 0;
OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
+ post_ct = tc_ext ? tc_ext->post_ct : false;
} else {
key->recirc_id = 0;
}
@@ -904,7 +906,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
err = key_extract(skb, key);
if (!err)
- ovs_ct_fill_key(skb, key); /* Must be after key_extract(). */
+ ovs_ct_fill_key(skb, key, post_ct); /* Must be after key_extract(). */
return err;
}
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index edb6ac17ceca..dfc820ee553a 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -1058,6 +1058,11 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
rc = copied;
if (addr) {
+ /* There is an anonymous 2-byte hole after sq_family,
+ * make sure to clear it.
+ */
+ memset(addr, 0, sizeof(*addr));
+
addr->sq_family = AF_QIPCRTR;
addr->sq_node = cb->src_node;
addr->sq_port = cb->src_port;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index f0a0aa125b00..16e888a9601d 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -945,13 +945,14 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
tcf_lastuse_update(&c->tcf_tm);
if (clear) {
+ qdisc_skb_cb(skb)->post_ct = false;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
nf_conntrack_put(&ct->ct_general);
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
}
- goto out;
+ goto out_clear;
}
family = tcf_ct_skb_nf_family(skb);
@@ -1030,8 +1031,9 @@ out_push:
skb_push_rcsum(skb, nh_ofs);
out:
- tcf_action_update_bstats(&c->common, skb);
qdisc_skb_cb(skb)->post_ct = true;
+out_clear:
+ tcf_action_update_bstats(&c->common, skb);
if (defrag)
qdisc_skb_cb(skb)->pkt_len = skb->len;
return retval;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ca8e177bf31b..d3db70865d66 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1629,6 +1629,7 @@ int tcf_classify_ingress(struct sk_buff *skb,
return TC_ACT_SHOT;
ext->chain = last_executed_chain;
ext->mru = qdisc_skb_cb(skb)->mru;
+ ext->post_ct = qdisc_skb_cb(skb)->post_ct;
}
return ret;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9736df97e04d..d7869a984881 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1451,7 +1451,7 @@ static int fl_set_key_ct(struct nlattr **tb,
&mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
sizeof(key->ct_state));
- err = fl_validate_ct_state(mask->ct_state,
+ err = fl_validate_ct_state(key->ct_state & mask->ct_state,
tb[TCA_FLOWER_KEY_CT_STATE_MASK],
extack);
if (err)
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 50f680f03a54..2adbd945bf15 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -345,6 +345,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
struct sk_buff **old = NULL;
unsigned int mask;
u32 max_P;
+ u8 *stab;
if (opt == NULL)
return -EINVAL;
@@ -361,8 +362,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
ctl = nla_data(tb[TCA_CHOKE_PARMS]);
-
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
+ stab = nla_data(tb[TCA_CHOKE_STAB]);
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
return -EINVAL;
if (ctl->limit > CHOKE_MAX_QUEUE)
@@ -412,7 +413,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
ctl->Plog, ctl->Scell_log,
- nla_data(tb[TCA_CHOKE_STAB]),
+ stab,
max_P);
red_set_vars(&q->vars);
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index e0bc77533acc..f4132dc25ac0 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -480,7 +480,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
struct gred_sched *table = qdisc_priv(sch);
struct gred_sched_data *q = table->tab[dp];
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) {
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
return -EINVAL;
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index dff3adf5a915..62e12cb41a3e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1020,6 +1020,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
struct nlattr *tb[TCA_HTB_MAX + 1];
struct tc_htb_glob *gopt;
unsigned int ntx;
+ bool offload;
int err;
qdisc_watchdog_init(&q->watchdog, sch);
@@ -1044,9 +1045,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
if (gopt->version != HTB_VER >> 16)
return -EINVAL;
- q->offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
+ offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
- if (q->offload) {
+ if (offload) {
if (sch->parent != TC_H_ROOT)
return -EOPNOTSUPP;
@@ -1076,7 +1077,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
q->rate2quantum = 1;
q->defcls = gopt->defcls;
- if (!q->offload)
+ if (!offload)
return 0;
for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
@@ -1107,12 +1108,14 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
if (err)
goto err_free_qdiscs;
+ /* Defer this assignment, so that htb_destroy skips offload-related
+ * parts (especially calling ndo_setup_tc) on errors.
+ */
+ q->offload = true;
+
return 0;
err_free_qdiscs:
- /* TC_HTB_CREATE call failed, avoid any further calls to the driver. */
- q->offload = false;
-
for (ntx = 0; ntx < q->num_direct_qdiscs && q->direct_qdiscs[ntx];
ntx++)
qdisc_put(q->direct_qdiscs[ntx]);
@@ -1340,8 +1343,12 @@ htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
{
struct net_device *dev = qdisc_dev(sch);
struct tc_htb_qopt_offload offload_opt;
+ struct htb_sched *q = qdisc_priv(sch);
int err;
+ if (!q->offload)
+ return sch->dev_queue;
+
offload_opt = (struct tc_htb_qopt_offload) {
.command = TC_HTB_LEAF_QUERY_QUEUE,
.classid = TC_H_MIN(tcm->tcm_parent),
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index b4ae34d7aa96..40adf1f07a82 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -242,6 +242,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
unsigned char flags;
int err;
u32 max_P;
+ u8 *stab;
if (tb[TCA_RED_PARMS] == NULL ||
tb[TCA_RED_STAB] == NULL)
@@ -250,7 +251,9 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
ctl = nla_data(tb[TCA_RED_PARMS]);
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
+ stab = nla_data(tb[TCA_RED_STAB]);
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
+ ctl->Scell_log, stab))
return -EINVAL;
err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
@@ -288,7 +291,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
red_set_parms(&q->parms,
ctl->qth_min, ctl->qth_max, ctl->Wlog,
ctl->Plog, ctl->Scell_log,
- nla_data(tb[TCA_RED_STAB]),
+ stab,
max_P);
red_set_vars(&q->vars);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index b25e51440623..066754a18569 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -647,7 +647,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
}
if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
- ctl_v1->Wlog, ctl_v1->Scell_log))
+ ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
return -EINVAL;
if (ctl_v1 && ctl_v1->qth_min) {
p = kmalloc(sizeof(*p), GFP_KERNEL);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 6614c9fdc51e..a6aa17df09ef 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -584,13 +584,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
goto out;
}
- rcu_read_lock();
- if (__sk_dst_get(sk) != tp->dst) {
- dst_hold(tp->dst);
- sk_setup_caps(sk, tp->dst);
- }
- rcu_read_unlock();
-
/* pack up chunks */
pkt_count = sctp_packet_pack(packet, head, gso, gfp);
if (!pkt_count) {
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 3fd06a27105d..5cb1aa5f067b 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1135,6 +1135,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
{
+ struct sock *sk = ctx->asoc->base.sk;
struct list_head *ltransport;
struct sctp_packet *packet;
struct sctp_transport *t;
@@ -1144,6 +1145,12 @@ static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
t = list_entry(ltransport, struct sctp_transport, send_ready);
packet = &t->packet;
if (!sctp_packet_empty(packet)) {
+ rcu_read_lock();
+ if (t->dst && __sk_dst_get(sk) != t->dst) {
+ dst_hold(t->dst);
+ sk_setup_caps(sk, t->dst);
+ }
+ rcu_read_unlock();
error = sctp_packet_transmit(packet, ctx->gfp);
if (error < 0)
ctx->q->asoc->base.sk->sk_err = -error;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index bd4678db9d76..6dff64374bfe 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1825,11 +1825,14 @@ static int
svcauth_gss_release(struct svc_rqst *rqstp)
{
struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
- struct rpc_gss_wire_cred *gc = &gsd->clcred;
+ struct rpc_gss_wire_cred *gc;
struct xdr_buf *resbuf = &rqstp->rq_res;
int stat = -EINVAL;
struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
+ if (!gsd)
+ goto out;
+ gc = &gsd->clcred;
if (gc->gc_proc != RPC_GSS_PROC_DATA)
goto out;
/* Release can be called twice, but we only wrap once. */
@@ -1870,10 +1873,10 @@ out_err:
if (rqstp->rq_cred.cr_group_info)
put_group_info(rqstp->rq_cred.cr_group_info);
rqstp->rq_cred.cr_group_info = NULL;
- if (gsd->rsci)
+ if (gsd && gsd->rsci) {
cache_put(&gsd->rsci->h, sn->rsc_cache);
- gsd->rsci = NULL;
-
+ gsd->rsci = NULL;
+ }
return stat;
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index cf702a5f7fe5..39ed0e0afe6d 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -963,8 +963,11 @@ void rpc_execute(struct rpc_task *task)
rpc_set_active(task);
rpc_make_runnable(rpciod_workqueue, task);
- if (!is_async)
+ if (!is_async) {
+ unsigned int pflags = memalloc_nofs_save();
__rpc_execute(task);
+ memalloc_nofs_restore(pflags);
+ }
}
static void rpc_async_schedule(struct work_struct *work)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 61fb8a18552c..d76dc9d95d16 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1413,7 +1413,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
sendit:
if (svc_authorise(rqstp))
- goto close;
+ goto close_xprt;
return 1; /* Caller can now send it */
release_dropit:
@@ -1425,6 +1425,8 @@ release_dropit:
return 0;
close:
+ svc_authorise(rqstp);
+close_xprt:
if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
svc_close_xprt(rqstp->rq_xprt);
dprintk("svc: svc_process close\n");
@@ -1433,7 +1435,7 @@ release_dropit:
err_short_len:
svc_printk(rqstp, "short len %zd, dropping request\n",
argv->iov_len);
- goto close;
+ goto close_xprt;
err_bad_rpc:
serv->sv_stats->rpcbadfmt++;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index dcc50ae54550..3cdd71a8df1e 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -1060,7 +1060,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
struct svc_xprt *xprt;
int ret = 0;
- spin_lock(&serv->sv_lock);
+ spin_lock_bh(&serv->sv_lock);
list_for_each_entry(xprt, xprt_list, xpt_list) {
if (xprt->xpt_net != net)
continue;
@@ -1068,7 +1068,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
set_bit(XPT_CLOSE, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
}
- spin_unlock(&serv->sv_lock);
+ spin_unlock_bh(&serv->sv_lock);
return ret;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index 4a1edbb4028e..9150df35fb6f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -252,9 +252,9 @@ xprt_setup_rdma_bc(struct xprt_create *args)
xprt->timeout = &xprt_rdma_bc_timeout;
xprt_set_bound(xprt);
xprt_set_connected(xprt);
- xprt->bind_timeout = RPCRDMA_BIND_TO;
- xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
- xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
+ xprt->bind_timeout = 0;
+ xprt->reestablish_timeout = 0;
+ xprt->idle_timeout = 0;
xprt->prot = XPRT_TRANSPORT_BC_RDMA;
xprt->ops = &xprt_rdma_bc_procs;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 6d28f23ceb35..7d34290e2ff8 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -266,46 +266,33 @@ void svc_rdma_release_rqst(struct svc_rqst *rqstp)
svc_rdma_recv_ctxt_put(rdma, ctxt);
}
-static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
- unsigned int wanted, bool temp)
+static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
+ struct svc_rdma_recv_ctxt *ctxt)
{
- const struct ib_recv_wr *bad_wr = NULL;
- struct svc_rdma_recv_ctxt *ctxt;
- struct ib_recv_wr *recv_chain;
int ret;
- recv_chain = NULL;
- while (wanted--) {
- ctxt = svc_rdma_recv_ctxt_get(rdma);
- if (!ctxt)
- break;
-
- trace_svcrdma_post_recv(ctxt);
- ctxt->rc_temp = temp;
- ctxt->rc_recv_wr.next = recv_chain;
- recv_chain = &ctxt->rc_recv_wr;
- rdma->sc_pending_recvs++;
- }
- if (!recv_chain)
- return false;
-
- ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr);
+ trace_svcrdma_post_recv(ctxt);
+ ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
if (ret)
goto err_post;
- return true;
+ return 0;
err_post:
- while (bad_wr) {
- ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt,
- rc_recv_wr);
- bad_wr = bad_wr->next;
- svc_rdma_recv_ctxt_put(rdma, ctxt);
- }
-
trace_svcrdma_rq_post_err(rdma, ret);
- /* Since we're destroying the xprt, no need to reset
- * sc_pending_recvs. */
- return false;
+ svc_rdma_recv_ctxt_put(rdma, ctxt);
+ return ret;
+}
+
+static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
+{
+ struct svc_rdma_recv_ctxt *ctxt;
+
+ if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
+ return 0;
+ ctxt = svc_rdma_recv_ctxt_get(rdma);
+ if (!ctxt)
+ return -ENOMEM;
+ return __svc_rdma_post_recv(rdma, ctxt);
}
/**
@@ -316,7 +303,20 @@ err_post:
*/
bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
{
- return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true);
+ struct svc_rdma_recv_ctxt *ctxt;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < rdma->sc_max_requests; i++) {
+ ctxt = svc_rdma_recv_ctxt_get(rdma);
+ if (!ctxt)
+ return false;
+ ctxt->rc_temp = true;
+ ret = __svc_rdma_post_recv(rdma, ctxt);
+ if (ret)
+ return false;
+ }
+ return true;
}
/**
@@ -324,6 +324,8 @@ bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
* @cq: Completion Queue context
* @wc: Work Completion object
*
+ * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
+ * the Receive completion handler could be running.
*/
static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
{
@@ -331,8 +333,6 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
struct ib_cqe *cqe = wc->wr_cqe;
struct svc_rdma_recv_ctxt *ctxt;
- rdma->sc_pending_recvs--;
-
/* WARNING: Only wc->wr_cqe and wc->status are reliable */
ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
@@ -340,6 +340,9 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
if (wc->status != IB_WC_SUCCESS)
goto flushed;
+ if (svc_rdma_post_recv(rdma))
+ goto post_err;
+
/* All wc fields are now known to be valid */
ctxt->rc_byte_len = wc->byte_len;
@@ -350,18 +353,11 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
spin_unlock(&rdma->sc_rq_dto_lock);
if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
svc_xprt_enqueue(&rdma->sc_xprt);
-
- if (!test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags) &&
- rdma->sc_pending_recvs < rdma->sc_max_requests)
- if (!svc_rdma_refresh_recvs(rdma, RPCRDMA_MAX_RECV_BATCH,
- false))
- goto post_err;
-
return;
flushed:
- svc_rdma_recv_ctxt_put(rdma, ctxt);
post_err:
+ svc_rdma_recv_ctxt_put(rdma, ctxt);
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
svc_xprt_enqueue(&rdma->sc_xprt);
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 0daf3be11ed1..61c38eaaa298 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2900,17 +2900,22 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
#ifdef CONFIG_TIPC_CRYPTO
static int tipc_nl_retrieve_key(struct nlattr **attrs,
- struct tipc_aead_key **key)
+ struct tipc_aead_key **pkey)
{
struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
+ struct tipc_aead_key *key;
if (!attr)
return -ENODATA;
- *key = (struct tipc_aead_key *)nla_data(attr);
- if (nla_len(attr) < tipc_aead_key_size(*key))
+ if (nla_len(attr) < sizeof(*key))
+ return -EINVAL;
+ key = (struct tipc_aead_key *)nla_data(attr);
+ if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
+ nla_len(attr) < tipc_aead_key_size(key))
return -EINVAL;
+ *pkey = key;
return 0;
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 5546710d8ac1..bc7fb9bf3351 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -755,6 +755,7 @@ static struct sock *__vsock_create(struct net *net,
vsk->buffer_size = psk->buffer_size;
vsk->buffer_min_size = psk->buffer_min_size;
vsk->buffer_max_size = psk->buffer_max_size;
+ security_sk_clone(parent, sk);
} else {
vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
vsk->owner = get_current_cred();
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 521d36bb0803..034af85f79d8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -70,7 +70,7 @@ __cfg80211_wdev_from_attrs(struct cfg80211_registered_device *rdev,
struct wireless_dev *result = NULL;
bool have_ifidx = attrs[NL80211_ATTR_IFINDEX];
bool have_wdev_id = attrs[NL80211_ATTR_WDEV];
- u64 wdev_id;
+ u64 wdev_id = 0;
int wiphy_idx = -1;
int ifidx = -1;
@@ -14789,6 +14789,7 @@ bad_tid_conf:
#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\
NL80211_FLAG_CHECK_NETDEV_UP)
#define NL80211_FLAG_CLEAR_SKB 0x20
+#define NL80211_FLAG_NO_WIPHY_MTX 0x40
static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info)
@@ -14840,7 +14841,7 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
info->user_ptr[0] = rdev;
}
- if (rdev) {
+ if (rdev && !(ops->internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) {
wiphy_lock(&rdev->wiphy);
/* we keep the mutex locked until post_doit */
__release(&rdev->wiphy.mtx);
@@ -14865,7 +14866,8 @@ static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
}
}
- if (info->user_ptr[0]) {
+ if (info->user_ptr[0] &&
+ !(ops->internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) {
struct cfg80211_registered_device *rdev = info->user_ptr[0];
/* we kept the mutex locked since pre_doit */
@@ -15329,7 +15331,9 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_wiphy_netns,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WIPHY,
+ .internal_flags = NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_NO_WIPHY_MTX,
},
{
.cmd = NL80211_CMD_GET_SURVEY,