diff options
author | Linus Torvalds | 2021-06-30 15:51:09 -0700 |
---|---|---|
committer | Linus Torvalds | 2021-06-30 15:51:09 -0700 |
commit | dbe69e43372212527abf48609aba7fc39a6daa27 (patch) | |
tree | 96cfafdf70f5325ceeac1054daf7deca339c9730 /net/sctp | |
parent | a6eaf3850cb171c328a8b0db6d3c79286a1eba9d (diff) | |
parent | b6df00789e2831fff7a2c65aa7164b2a4dcbe599 (diff) |
Merge tag 'net-next-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski:
"Core:
- BPF:
- add syscall program type and libbpf support for generating
instructions and bindings for in-kernel BPF loaders (BPF loaders
for BPF), this is a stepping stone for signed BPF programs
- infrastructure to migrate TCP child sockets from one listener to
another in the same reuseport group/map to improve flexibility
of service hand-off/restart
- add broadcast support to XDP redirect
- allow bypass of the lockless qdisc to improving performance (for
pktgen: +23% with one thread, +44% with 2 threads)
- add a simpler version of "DO_ONCE()" which does not require jump
labels, intended for slow-path usage
- virtio/vsock: introduce SOCK_SEQPACKET support
- add getsocketopt to retrieve netns cookie
- ip: treat lowest address of a IPv4 subnet as ordinary unicast
address allowing reclaiming of precious IPv4 addresses
- ipv6: use prandom_u32() for ID generation
- ip: add support for more flexible field selection for hashing
across multi-path routes (w/ offload to mlxsw)
- icmp: add support for extended RFC 8335 PROBE (ping)
- seg6: add support for SRv6 End.DT46 behavior
- mptcp:
- DSS checksum support (RFC 8684) to detect middlebox meddling
- support Connection-time 'C' flag
- time stamping support
- sctp: packetization Layer Path MTU Discovery (RFC 8899)
- xfrm: speed up state addition with seq set
- WiFi:
- hidden AP discovery on 6 GHz and other HE 6 GHz improvements
- aggregation handling improvements for some drivers
- minstrel improvements for no-ack frames
- deferred rate control for TXQs to improve reaction times
- switch from round robin to virtual time-based airtime scheduler
- add trace points:
- tcp checksum errors
- openvswitch - action execution, upcalls
- socket errors via sk_error_report
Device APIs:
- devlink: add rate API for hierarchical control of max egress rate
of virtual devices (VFs, SFs etc.)
- don't require RCU read lock to be held around BPF hooks in NAPI
context
- page_pool: generic buffer recycling
New hardware/drivers:
- mobile:
- iosm: PCIe Driver for Intel M.2 Modem
- support for Qualcomm MSM8998 (ipa)
- WiFi: Qualcomm QCN9074 and WCN6855 PCI devices
- sparx5: Microchip SparX-5 family of Enterprise Ethernet switches
- Mellanox BlueField Gigabit Ethernet (control NIC of the DPU)
- NXP SJA1110 Automotive Ethernet 10-port switch
- Qualcomm QCA8327 switch support (qca8k)
- Mikrotik 10/25G NIC (atl1c)
Driver changes:
- ACPI support for some MDIO, MAC and PHY devices from Marvell and
NXP (our first foray into MAC/PHY description via ACPI)
- HW timestamping (PTP) support: bnxt_en, ice, sja1105, hns3, tja11xx
- Mellanox/Nvidia NIC (mlx5)
- NIC VF offload of L2 bridging
- support IRQ distribution to Sub-functions
- Marvell (prestera):
- add flower and match all
- devlink trap
- link aggregation
- Netronome (nfp): connection tracking offload
- Intel 1GE (igc): add AF_XDP support
- Marvell DPU (octeontx2): ingress ratelimit offload
- Google vNIC (gve): new ring/descriptor format support
- Qualcomm mobile (rmnet & ipa): inline checksum offload support
- MediaTek WiFi (mt76)
- mt7915 MSI support
- mt7915 Tx status reporting
- mt7915 thermal sensors support
- mt7921 decapsulation offload
- mt7921 enable runtime pm and deep sleep
- Realtek WiFi (rtw88)
- beacon filter support
- Tx antenna path diversity support
- firmware crash information via devcoredump
- Qualcomm WiFi (wcn36xx)
- Wake-on-WLAN support with magic packets and GTK rekeying
- Micrel PHY (ksz886x/ksz8081): add cable test support"
* tag 'net-next-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2168 commits)
tcp: change ICSK_CA_PRIV_SIZE definition
tcp_yeah: check struct yeah size at compile time
gve: DQO: Fix off by one in gve_rx_dqo()
stmmac: intel: set PCI_D3hot in suspend
stmmac: intel: Enable PHY WOL option in EHL
net: stmmac: option to enable PHY WOL with PMT enabled
net: say "local" instead of "static" addresses in ndo_dflt_fdb_{add,del}
net: use netdev_info in ndo_dflt_fdb_{add,del}
ptp: Set lookup cookie when creating a PTP PPS source.
net: sock: add trace for socket errors
net: sock: introduce sk_error_report
net: dsa: replay the local bridge FDB entries pointing to the bridge dev too
net: dsa: ensure during dsa_fdb_offload_notify that dev_hold and dev_put are on the same dev
net: dsa: include fdb entries pointing to bridge in the host fdb list
net: dsa: include bridge addresses which are local in the host fdb list
net: dsa: sync static FDB entries on foreign interfaces to hardware
net: dsa: install the host MDB and FDB entries in the master's RX filter
net: dsa: reference count the FDB addresses at the cross-chip notifier level
net: dsa: introduce a separate cross-chip notifier type for host FDBs
net: dsa: reference count the MDB entries at the cross-chip notifier level
...
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/associola.c | 6 | ||||
-rw-r--r-- | net/sctp/bind_addr.c | 19 | ||||
-rw-r--r-- | net/sctp/debug.c | 1 | ||||
-rw-r--r-- | net/sctp/input.c | 144 | ||||
-rw-r--r-- | net/sctp/ipv6.c | 121 | ||||
-rw-r--r-- | net/sctp/output.c | 33 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 13 | ||||
-rw-r--r-- | net/sctp/protocol.c | 29 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 73 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 37 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 70 | ||||
-rw-r--r-- | net/sctp/sm_statetable.c | 43 | ||||
-rw-r--r-- | net/sctp/socket.c | 123 | ||||
-rw-r--r-- | net/sctp/sysctl.c | 35 | ||||
-rw-r--r-- | net/sctp/transport.c | 150 |
15 files changed, 728 insertions, 169 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 336df4b36655..be29da09cc7a 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -98,6 +98,7 @@ static struct sctp_association *sctp_association_init( * sock configured value. */ asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); + asoc->probe_interval = msecs_to_jiffies(sp->probe_interval); asoc->encap_port = sp->encap_port; @@ -625,6 +626,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, * association configured value. */ peer->hbinterval = asoc->hbinterval; + peer->probe_interval = asoc->probe_interval; peer->encap_port = asoc->encap_port; @@ -714,6 +716,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, return NULL; } + sctp_transport_pl_reset(peer); + /* Attach the remote transport to our asoc. */ list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list); asoc->peer.transport_count++; @@ -812,6 +816,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, spc_state = SCTP_ADDR_CONFIRMED; transport->state = SCTP_ACTIVE; + sctp_transport_pl_reset(transport); break; case SCTP_TRANSPORT_DOWN: @@ -821,6 +826,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, */ if (transport->state != SCTP_UNCONFIRMED) { transport->state = SCTP_INACTIVE; + sctp_transport_pl_reset(transport); spc_state = SCTP_ADDR_UNREACHABLE; } else { sctp_transport_dst_release(transport); diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 53e5ed79f63f..59e653b528b1 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -270,22 +270,19 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, rawaddr = (union sctp_addr_param *)raw_addr_list; af = sctp_get_af_specific(param_type2af(param->type)); - if (unlikely(!af)) { + if (unlikely(!af) || + !af->from_addr_param(&addr, rawaddr, htons(port), 0)) { retval = -EINVAL; - sctp_bind_addr_clean(bp); - break; + goto out_err; } - af->from_addr_param(&addr, rawaddr, htons(port), 0); if (sctp_bind_addr_state(bp, &addr) != -1) goto next; retval = sctp_add_bind_addr(bp, &addr, sizeof(addr), SCTP_ADDR_SRC, gfp); - if (retval) { + if (retval) /* Can't finish building the list, clean up. */ - sctp_bind_addr_clean(bp); - break; - } + goto out_err; next: len = ntohs(param->length); @@ -294,6 +291,12 @@ next: } return retval; + +out_err: + if (retval) + sctp_bind_addr_clean(bp); + + return retval; } /******************************************************************** diff --git a/net/sctp/debug.c b/net/sctp/debug.c index c4d9c7feffb9..ccd773e4c371 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c @@ -154,6 +154,7 @@ static const char *const sctp_timer_tbl[] = { "TIMEOUT_T5_SHUTDOWN_GUARD", "TIMEOUT_HEARTBEAT", "TIMEOUT_RECONF", + "TIMEOUT_PROBE", "TIMEOUT_SACK", "TIMEOUT_AUTOCLOSE", }; diff --git a/net/sctp/input.c b/net/sctp/input.c index 5ceaf75105ba..eb3c2a34a31c 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -385,7 +385,9 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t, __u32 pmtu) { - if (!t || (t->pathmtu <= pmtu)) + if (!t || + (t->pathmtu <= pmtu && + t->pl.probe_size + sctp_transport_pl_hlen(t) <= pmtu)) return; if (sock_owned_by_user(sk)) { @@ -554,6 +556,50 @@ void sctp_err_finish(struct sock *sk, struct sctp_transport *t) sctp_transport_put(t); } +static void sctp_v4_err_handle(struct sctp_transport *t, struct sk_buff *skb, + __u8 type, __u8 code, __u32 info) +{ + struct sctp_association *asoc = t->asoc; + struct sock *sk = asoc->base.sk; + int err = 0; + + switch (type) { + case ICMP_PARAMETERPROB: + err = EPROTO; + break; + case ICMP_DEST_UNREACH: + if (code > NR_ICMP_UNREACH) + return; + if (code == ICMP_FRAG_NEEDED) { + sctp_icmp_frag_needed(sk, asoc, t, SCTP_TRUNC4(info)); + return; + } + if (code == ICMP_PROT_UNREACH) { + sctp_icmp_proto_unreachable(sk, asoc, t); + return; + } + err = icmp_err_convert[code].errno; + break; + case ICMP_TIME_EXCEEDED: + if (code == ICMP_EXC_FRAGTIME) + return; + + err = EHOSTUNREACH; + break; + case ICMP_REDIRECT: + sctp_icmp_redirect(sk, t, skb); + return; + default: + return; + } + if (!sock_owned_by_user(sk) && inet_sk(sk)->recverr) { + sk->sk_err = err; + sk_error_report(sk); + } else { /* Only an error on timeout */ + sk->sk_err_soft = err; + } +} + /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should @@ -572,22 +618,19 @@ void sctp_err_finish(struct sock *sk, struct sctp_transport *t) int sctp_v4_err(struct sk_buff *skb, __u32 info) { const struct iphdr *iph = (const struct iphdr *)skb->data; - const int ihlen = iph->ihl * 4; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; - struct sock *sk; - struct sctp_association *asoc = NULL; + struct net *net = dev_net(skb->dev); struct sctp_transport *transport; - struct inet_sock *inet; + struct sctp_association *asoc; __u16 saveip, savesctp; - int err; - struct net *net = dev_net(skb->dev); + struct sock *sk; /* Fix up skb to look at the embedded net header. */ saveip = skb->network_header; savesctp = skb->transport_header; skb_reset_network_header(skb); - skb_set_transport_header(skb, ihlen); + skb_set_transport_header(skb, iph->ihl * 4); sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport); /* Put back, the original values. */ skb->network_header = saveip; @@ -596,59 +639,41 @@ int sctp_v4_err(struct sk_buff *skb, __u32 info) __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return -ENOENT; } - /* Warning: The sock lock is held. Remember to call - * sctp_err_finish! - */ - switch (type) { - case ICMP_PARAMETERPROB: - err = EPROTO; - break; - case ICMP_DEST_UNREACH: - if (code > NR_ICMP_UNREACH) - goto out_unlock; + sctp_v4_err_handle(transport, skb, type, code, info); + sctp_err_finish(sk, transport); - /* PMTU discovery (RFC1191) */ - if (ICMP_FRAG_NEEDED == code) { - sctp_icmp_frag_needed(sk, asoc, transport, - SCTP_TRUNC4(info)); - goto out_unlock; - } else { - if (ICMP_PROT_UNREACH == code) { - sctp_icmp_proto_unreachable(sk, asoc, - transport); - goto out_unlock; - } - } - err = icmp_err_convert[code].errno; - break; - case ICMP_TIME_EXCEEDED: - /* Ignore any time exceeded errors due to fragment reassembly - * timeouts. - */ - if (ICMP_EXC_FRAGTIME == code) - goto out_unlock; + return 0; +} - err = EHOSTUNREACH; - break; - case ICMP_REDIRECT: - sctp_icmp_redirect(sk, transport, skb); - goto out_unlock; - default: - goto out_unlock; +int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb) +{ + struct net *net = dev_net(skb->dev); + struct sctp_association *asoc; + struct sctp_transport *t; + struct icmphdr *hdr; + __u32 info = 0; + + skb->transport_header += sizeof(struct udphdr); + sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &t); + if (!sk) { + __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); + return -ENOENT; } - inet = inet_sk(sk); - if (!sock_owned_by_user(sk) && inet->recverr) { - sk->sk_err = err; - sk->sk_error_report(sk); - } else { /* Only an error on timeout */ - sk->sk_err_soft = err; + skb->transport_header -= sizeof(struct udphdr); + hdr = (struct icmphdr *)(skb_network_header(skb) - sizeof(struct icmphdr)); + if (hdr->type == ICMP_REDIRECT) { + /* can't be handled without outer iphdr known, leave it to udp_err */ + sctp_err_finish(sk, t); + return 0; } + if (hdr->type == ICMP_DEST_UNREACH && hdr->code == ICMP_FRAG_NEEDED) + info = ntohs(hdr->un.frag.mtu); + sctp_v4_err_handle(t, skb, hdr->type, hdr->code, info); -out_unlock: - sctp_err_finish(sk, transport); - return 0; + sctp_err_finish(sk, t); + return 1; } /* @@ -1131,7 +1156,8 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net, if (!af) continue; - af->from_addr_param(paddr, params.addr, sh->source, 0); + if (!af->from_addr_param(paddr, params.addr, sh->source, 0)) + continue; asoc = __sctp_lookup_association(net, laddr, paddr, transportp); if (asoc) @@ -1167,6 +1193,9 @@ static struct sctp_association *__sctp_rcv_asconf_lookup( union sctp_addr_param *param; union sctp_addr paddr; + if (ntohs(ch->length) < sizeof(*asconf) + sizeof(struct sctp_paramhdr)) + return NULL; + /* Skip over the ADDIP header and find the Address parameter */ param = (union sctp_addr_param *)(asconf + 1); @@ -1174,7 +1203,8 @@ static struct sctp_association *__sctp_rcv_asconf_lookup( if (unlikely(!af)) return NULL; - af->from_addr_param(&paddr, param, peer_port, 0); + if (af->from_addr_param(&paddr, param, peer_port, 0)) + return NULL; return __sctp_lookup_association(net, laddr, &paddr, transportp); } @@ -1246,7 +1276,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net, ch = (struct sctp_chunkhdr *)ch_end; chunk_num++; - } while (ch_end < skb_tail_pointer(skb)); + } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb)); return asoc; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index bd08807c9e44..e48dd909dee5 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -122,54 +122,28 @@ static struct notifier_block sctp_inet6addr_notifier = { .notifier_call = sctp_inet6addr_event, }; -/* ICMP error handler. */ -static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - u8 type, u8 code, int offset, __be32 info) +static void sctp_v6_err_handle(struct sctp_transport *t, struct sk_buff *skb, + __u8 type, __u8 code, __u32 info) { - struct inet6_dev *idev; - struct sock *sk; - struct sctp_association *asoc; - struct sctp_transport *transport; + struct sctp_association *asoc = t->asoc; + struct sock *sk = asoc->base.sk; struct ipv6_pinfo *np; - __u16 saveip, savesctp; - int err, ret = 0; - struct net *net = dev_net(skb->dev); - - idev = in6_dev_get(skb->dev); - - /* Fix up skb to look at the embedded net header. */ - saveip = skb->network_header; - savesctp = skb->transport_header; - skb_reset_network_header(skb); - skb_set_transport_header(skb, offset); - sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport); - /* Put back, the original pointers. */ - skb->network_header = saveip; - skb->transport_header = savesctp; - if (!sk) { - __ICMP6_INC_STATS(net, idev, ICMP6_MIB_INERRORS); - ret = -ENOENT; - goto out; - } - - /* Warning: The sock lock is held. Remember to call - * sctp_err_finish! - */ + int err = 0; switch (type) { case ICMPV6_PKT_TOOBIG: if (ip6_sk_accept_pmtu(sk)) - sctp_icmp_frag_needed(sk, asoc, transport, ntohl(info)); - goto out_unlock; + sctp_icmp_frag_needed(sk, asoc, t, info); + return; case ICMPV6_PARAMPROB: if (ICMPV6_UNK_NEXTHDR == code) { - sctp_icmp_proto_unreachable(sk, asoc, transport); - goto out_unlock; + sctp_icmp_proto_unreachable(sk, asoc, t); + return; } break; case NDISC_REDIRECT: - sctp_icmp_redirect(sk, transport, skb); - goto out_unlock; + sctp_icmp_redirect(sk, t, skb); + return; default: break; } @@ -178,18 +152,70 @@ static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, icmpv6_err_convert(type, code, &err); if (!sock_owned_by_user(sk) && np->recverr) { sk->sk_err = err; - sk->sk_error_report(sk); - } else { /* Only an error on timeout */ + sk_error_report(sk); + } else { sk->sk_err_soft = err; } +} + +/* ICMP error handler. */ +static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + u8 type, u8 code, int offset, __be32 info) +{ + struct net *net = dev_net(skb->dev); + struct sctp_transport *transport; + struct sctp_association *asoc; + __u16 saveip, savesctp; + struct sock *sk; + + /* Fix up skb to look at the embedded net header. */ + saveip = skb->network_header; + savesctp = skb->transport_header; + skb_reset_network_header(skb); + skb_set_transport_header(skb, offset); + sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport); + /* Put back, the original pointers. */ + skb->network_header = saveip; + skb->transport_header = savesctp; + if (!sk) { + __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); + return -ENOENT; + } -out_unlock: + sctp_v6_err_handle(transport, skb, type, code, ntohl(info)); sctp_err_finish(sk, transport); -out: - if (likely(idev != NULL)) - in6_dev_put(idev); - return ret; + return 0; +} + +int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb) +{ + struct net *net = dev_net(skb->dev); + struct sctp_association *asoc; + struct sctp_transport *t; + struct icmp6hdr *hdr; + __u32 info = 0; + + skb->transport_header += sizeof(struct udphdr); + sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &t); + if (!sk) { + __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); + return -ENOENT; + } + + skb->transport_header -= sizeof(struct udphdr); + hdr = (struct icmp6hdr *)(skb_network_header(skb) - sizeof(struct icmp6hdr)); + if (hdr->icmp6_type == NDISC_REDIRECT) { + /* can't be handled without outer ip6hdr known, leave it to udpv6_err */ + sctp_err_finish(sk, t); + return 0; + } + if (hdr->icmp6_type == ICMPV6_PKT_TOOBIG) + info = ntohl(hdr->icmp6_mtu); + sctp_v6_err_handle(t, skb, hdr->icmp6_type, hdr->icmp6_code, info); + + sctp_err_finish(sk, t); + return 1; } static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t) @@ -551,15 +577,20 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk) } /* Initialize a sctp_addr from an address parameter. */ -static void sctp_v6_from_addr_param(union sctp_addr *addr, +static bool sctp_v6_from_addr_param(union sctp_addr *addr, union sctp_addr_param *param, __be16 port, int iif) { + if (ntohs(param->v6.param_hdr.length) < sizeof(struct sctp_ipv6addr_param)) + return false; + addr->v6.sin6_family = AF_INET6; addr->v6.sin6_port = port; addr->v6.sin6_flowinfo = 0; /* BUG */ addr->v6.sin6_addr = param->v6.addr; addr->v6.sin6_scope_id = iif; + + return true; } /* Initialize an address parameter from a sctp_addr and return the length diff --git a/net/sctp/output.c b/net/sctp/output.c index a6aa17df09ef..9032ce60d50e 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -103,7 +103,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, sctp_transport_route(tp, NULL, sp); if (asoc->param_flags & SPP_PMTUD_ENABLE) sctp_assoc_sync_pmtu(asoc); - } else if (!sctp_transport_pmtu_check(tp)) { + } else if (!sctp_transport_pl_enabled(tp) && + !sctp_transport_pmtu_check(tp)) { if (asoc->param_flags & SPP_PMTUD_ENABLE) sctp_assoc_sync_pmtu(asoc); } @@ -211,6 +212,30 @@ enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet, return retval; } +/* Try to bundle a pad chunk into a packet with a heartbeat chunk for PLPMTUTD probe */ +static enum sctp_xmit sctp_packet_bundle_pad(struct sctp_packet *pkt, struct sctp_chunk *chunk) +{ + struct sctp_transport *t = pkt->transport; + struct sctp_chunk *pad; + int overhead = 0; + + if (!chunk->pmtu_probe) + return SCTP_XMIT_OK; + + /* calculate the Padding Data size for the pad chunk */ + overhead += sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); + overhead += sizeof(struct sctp_sender_hb_info) + sizeof(struct sctp_pad_chunk); + pad = sctp_make_pad(t->asoc, t->pl.probe_size - overhead); + if (!pad) + return SCTP_XMIT_DELAY; + + list_add_tail(&pad->list, &pkt->chunk_list); + pkt->size += SCTP_PAD4(ntohs(pad->chunk_hdr->length)); + chunk->transport = t; + + return SCTP_XMIT_OK; +} + /* Try to bundle an auth chunk into the packet. */ static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt, struct sctp_chunk *chunk) @@ -382,6 +407,10 @@ enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet, goto finish; retval = __sctp_packet_append_chunk(packet, chunk); + if (retval != SCTP_XMIT_OK) + goto finish; + + retval = sctp_packet_bundle_pad(packet, chunk); finish: return retval; @@ -553,7 +582,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) sk = chunk->skb->sk; /* check gso */ - if (packet->size > tp->pathmtu && !packet->ipfragok) { + if (packet->size > tp->pathmtu && !packet->ipfragok && !chunk->pmtu_probe) { if (!sk_can_gso(sk)) { pr_err_once("Trying to GSO but underlying device doesn't support it."); goto out; diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 5cb1aa5f067b..ff47091c385e 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -769,7 +769,11 @@ static int sctp_packet_singleton(struct sctp_transport *transport, sctp_packet_init(&singleton, transport, sport, dport); sctp_packet_config(&singleton, vtag, 0); - sctp_packet_append_chunk(&singleton, chunk); + if (sctp_packet_append_chunk(&singleton, chunk) != SCTP_XMIT_OK) { + list_del_init(&chunk->list); + sctp_chunk_free(chunk); + return -ENOMEM; + } return sctp_packet_transmit(&singleton, gfp); } @@ -929,8 +933,13 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) one_packet = 1; fallthrough; - case SCTP_CID_SACK: case SCTP_CID_HEARTBEAT: + if (chunk->pmtu_probe) { + sctp_packet_singleton(ctx->transport, chunk, ctx->gfp); + break; + } + fallthrough; + case SCTP_CID_SACK: case SCTP_CID_SHUTDOWN: case SCTP_CID_ECN_ECNE: case SCTP_CID_ASCONF: diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 6f2bbfeec3a4..3c1fbf38f4f7 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -254,14 +254,19 @@ static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) } /* Initialize a sctp_addr from an address parameter. */ -static void sctp_v4_from_addr_param(union sctp_addr *addr, +static bool sctp_v4_from_addr_param(union sctp_addr *addr, union sctp_addr_param *param, __be16 port, int iif) { + if (ntohs(param->v4.param_hdr.length) < sizeof(struct sctp_ipv4addr_param)) + return false; + addr->v4.sin_family = AF_INET; addr->v4.sin_port = port; addr->v4.sin_addr.s_addr = param->v4.addr.s_addr; memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); + + return true; } /* Initialize an address parameter from a sctp_addr and return the length @@ -850,23 +855,6 @@ static int sctp_udp_rcv(struct sock *sk, struct sk_buff *skb) return 0; } -static int sctp_udp_err_lookup(struct sock *sk, struct sk_buff *skb) -{ - struct sctp_association *asoc; - struct sctp_transport *t; - int family; - - skb->transport_header += sizeof(struct udphdr); - family = (ip_hdr(skb)->version == 4) ? AF_INET : AF_INET6; - sk = sctp_err_lookup(dev_net(skb->dev), family, skb, sctp_hdr(skb), - &asoc, &t); - if (!sk) - return -ENOENT; - - sctp_err_finish(sk, t); - return 0; -} - int sctp_udp_sock_start(struct net *net) { struct udp_tunnel_sock_cfg tuncfg = {NULL}; @@ -885,7 +873,7 @@ int sctp_udp_sock_start(struct net *net) tuncfg.encap_type = 1; tuncfg.encap_rcv = sctp_udp_rcv; - tuncfg.encap_err_lookup = sctp_udp_err_lookup; + tuncfg.encap_err_lookup = sctp_udp_v4_err; setup_udp_tunnel_sock(net, sock, &tuncfg); net->sctp.udp4_sock = sock->sk; @@ -907,7 +895,7 @@ int sctp_udp_sock_start(struct net *net) tuncfg.encap_type = 1; tuncfg.encap_rcv = sctp_udp_rcv; - tuncfg.encap_err_lookup = sctp_udp_err_lookup; + tuncfg.encap_err_lookup = sctp_udp_v6_err; setup_udp_tunnel_sock(net, sock, &tuncfg); net->sctp.udp6_sock = sock->sk; #endif @@ -1171,7 +1159,6 @@ static const struct net_protocol sctp_protocol = { .handler = sctp4_rcv, .err_handler = sctp_v4_err, .no_policy = 1, - .netns_ok = 1, .icmp_strict_tag_validation = 1, }; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 5b44d228b6ca..6c08e5048d38 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1160,7 +1160,8 @@ nodata: /* Make a HEARTBEAT chunk. */ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, - const struct sctp_transport *transport) + const struct sctp_transport *transport, + __u32 probe_size) { struct sctp_sender_hb_info hbinfo; struct sctp_chunk *retval; @@ -1176,6 +1177,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, hbinfo.daddr = transport->ipaddr; hbinfo.sent_at = jiffies; hbinfo.hb_nonce = transport->hb_nonce; + hbinfo.probe_size = probe_size; /* Cast away the 'const', as this is just telling the chunk * what transport it belongs to. @@ -1183,6 +1185,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, retval->transport = (struct sctp_transport *) transport; retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo), &hbinfo); + retval->pmtu_probe = !!probe_size; nodata: return retval; @@ -1218,6 +1221,32 @@ nodata: return retval; } +/* RFC4820 3. Padding Chunk (PAD) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 0x84 | Flags=0 | Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * \ Padding Data / + * / \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct sctp_chunk *sctp_make_pad(const struct sctp_association *asoc, int len) +{ + struct sctp_chunk *retval; + + retval = sctp_make_control(asoc, SCTP_CID_PAD, 0, len, GFP_ATOMIC); + if (!retval) + return NULL; + + skb_put_zero(retval->skb, len); + retval->chunk_hdr->length = htons(ntohs(retval->chunk_hdr->length) + len); + retval->chunk_end = skb_tail_pointer(retval->skb); + + return retval; +} + /* Create an Operation Error chunk with the specified space reserved. * This routine can be used for containing multiple causes in the chunk. */ @@ -2166,9 +2195,16 @@ static enum sctp_ierror sctp_verify_param(struct net *net, break; case SCTP_PARAM_SET_PRIMARY: - if (ep->asconf_enable) - break; - goto unhandled; + if (!ep->asconf_enable) + goto unhandled; + + if (ntohs(param.p->length) < sizeof(struct sctp_addip_param) + + sizeof(struct sctp_paramhdr)) { + sctp_process_inv_paramlength(asoc, param.p, + chunk, err_chunk); + retval = SCTP_IERROR_ABORT; + } + break; case SCTP_PARAM_HOST_NAME_ADDRESS: /* Tell the peer, we won't support this param. */ @@ -2346,11 +2382,13 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, /* Process the initialization parameters. */ sctp_walk_params(param, peer_init, init_hdr.params) { - if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS || - param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { + if (!src_match && + (param.p->type == SCTP_PARAM_IPV4_ADDRESS || + param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { af = sctp_get_af_specific(param_type2af(param.p->type)); - af->from_addr_param(&addr, param.addr, - chunk->sctp_hdr->source, 0); + if (!af->from_addr_param(&addr, param.addr, + chunk->sctp_hdr->source, 0)) + continue; if (sctp_cmp_addr_exact(sctp_source(chunk), &addr)) src_match = 1; } @@ -2531,7 +2569,8 @@ static int sctp_process_param(struct sctp_association *asoc, break; do_addr_param: af = sctp_get_af_specific(param_type2af(param.p->type)); - af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); + if (!af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0)) + break; scope = sctp_scope(peer_addr); if (sctp_in_scope(net, &addr, scope)) if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) @@ -2632,15 +2671,13 @@ do_addr_param: addr_param = param.v + sizeof(struct sctp_addip_param); af = sctp_get_af_specific(param_type2af(addr_param->p.type)); - if (af == NULL) + if (!af) break; - af->from_addr_param(&addr, addr_param, - htons(asoc->peer.port), 0); + if (!af->from_addr_param(&addr, addr_param, + htons(asoc->peer.port), 0)) + break; - /* if the address is invalid, we can't process it. - * XXX: see spec for what to do. - */ if (!af->addr_valid(&addr, NULL, NULL)) break; @@ -3054,7 +3091,8 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, if (unlikely(!af)) return SCTP_ERROR_DNS_FAILED; - af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); + if (!af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0)) + return SCTP_ERROR_DNS_FAILED; /* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast * or multicast address. @@ -3331,7 +3369,8 @@ static void sctp_asconf_param_success(struct sctp_association *asoc, /* We have checked the packet before, so we do not check again. */ af = sctp_get_af_specific(param_type2af(addr_param->p.type)); - af->from_addr_param(&addr, addr_param, htons(bp->port), 0); + if (!af->from_addr_param(&addr, addr_param, htons(bp->port), 0)) + return; switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index ce15d590a615..b3815b568e8e 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -471,6 +471,38 @@ out_unlock: sctp_transport_put(transport); } +/* Handle the timeout of the probe timer. */ +void sctp_generate_probe_event(struct timer_list *t) +{ + struct sctp_transport *transport = from_timer(transport, t, probe_timer); + struct sctp_association *asoc = transport->asoc; + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); + int error = 0; + + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { + pr_debug("%s: sock is busy\n", __func__); + + /* Try again later. */ + if (!mod_timer(&transport->probe_timer, jiffies + (HZ / 20))) + sctp_transport_hold(transport); + goto out_unlock; + } + + error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, + SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_PROBE), + asoc->state, asoc->ep, asoc, + transport, GFP_ATOMIC); + + if (error) + sk->sk_err = -error; + +out_unlock: + bh_unlock_sock(sk); + sctp_transport_put(transport); +} + /* Inject a SACK Timeout event into the state machine. */ static void sctp_generate_sack_event(struct timer_list *t) { @@ -1641,6 +1673,11 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, sctp_cmd_hb_timers_stop(commands, asoc); break; + case SCTP_CMD_PROBE_TIMER_UPDATE: + t = cmd->obj.transport; + sctp_transport_reset_probe_timer(t); + break; + case SCTP_CMD_REPORT_ERROR: error = cmd->obj.error; break; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index fd1e319eda00..09a8f23ec709 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -361,7 +361,7 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net, /* If the INIT is coming toward a closing socket, we'll send back * and ABORT. Essentially, this catches the race of INIT being - * backloged to the socket at the same time as the user isses close(). + * backloged to the socket at the same time as the user issues close(). * Since the socket and all its associations are going away, we * can treat this OOTB */ @@ -608,8 +608,8 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_COOKIE_ECHOED)); - /* SCTP-AUTH: genereate the assocition shared keys so that - * we can potentially signe the COOKIE-ECHO. + /* SCTP-AUTH: generate the association shared keys so that + * we can potentially sign the COOKIE-ECHO. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL()); @@ -787,7 +787,7 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net, goto nomem_init; /* SCTP-AUTH: Now that we've populate required fields in - * sctp_process_init, set up the assocaition shared keys as + * sctp_process_init, set up the association shared keys as * necessary so that we can potentially authenticate the ACK */ error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC); @@ -838,7 +838,7 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net, /* Add all the state machine commands now since we've created * everything. This way we don't introduce memory corruptions - * during side-effect processing and correclty count established + * during side-effect processing and correctly count established * associations. */ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); @@ -923,7 +923,7 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net, commands); /* Reset init error count upon receipt of COOKIE-ACK, - * to avoid problems with the managemement of this + * to avoid problems with the management of this * counter in stale cookie situations when a transition back * from the COOKIE-ECHOED state to the COOKIE-WAIT * state is performed. @@ -1004,7 +1004,7 @@ static enum sctp_disposition sctp_sf_heartbeat( struct sctp_chunk *reply; /* Send a heartbeat to our peer. */ - reply = sctp_make_heartbeat(asoc, transport); + reply = sctp_make_heartbeat(asoc, transport, 0); if (!reply) return SCTP_DISPOSITION_NOMEM; @@ -1095,6 +1095,32 @@ enum sctp_disposition sctp_sf_send_reconf(struct net *net, return SCTP_DISPOSITION_CONSUME; } +/* send hb chunk with padding for PLPMUTD. */ +enum sctp_disposition sctp_sf_send_probe(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) +{ + struct sctp_transport *transport = (struct sctp_transport *)arg; + struct sctp_chunk *reply; + + if (!sctp_transport_pl_enabled(transport)) + return SCTP_DISPOSITION_CONSUME; + + sctp_transport_pl_send(transport); + + reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size); + if (!reply) + return SCTP_DISPOSITION_NOMEM; + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE, + SCTP_TRANSPORT(transport)); + + return SCTP_DISPOSITION_CONSUME; +} + /* * Process an heartbeat request. * @@ -1243,6 +1269,18 @@ enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net, if (hbinfo->hb_nonce != link->hb_nonce) return SCTP_DISPOSITION_DISCARD; + if (hbinfo->probe_size) { + if (hbinfo->probe_size != link->pl.probe_size || + !sctp_transport_pl_enabled(link)) + return SCTP_DISPOSITION_DISCARD; + + sctp_transport_pl_recv(link); + if (link->pl.state == SCTP_PL_COMPLETE) + return SCTP_DISPOSITION_CONSUME; + + return sctp_sf_send_probe(net, ep, asoc, type, link, commands); + } + max_interval = link->hbinterval + link->rto; /* Check if the timestamp looks valid. */ @@ -2950,7 +2988,7 @@ enum sctp_disposition sctp_sf_do_9_2_reshutack( commands); /* Since we are not going to really process this INIT, there - * is no point in verifying chunk boundries. Just generate + * is no point in verifying chunk boundaries. Just generate * the SHUTDOWN ACK. */ reply = sctp_make_shutdown_ack(asoc, chunk); @@ -3560,7 +3598,7 @@ enum sctp_disposition sctp_sf_do_9_2_final(struct net *net, goto nomem_chunk; /* Do all the commands now (after allocation), so that we - * have consistent state if memory allocation failes + * have consistent state if memory allocation fails */ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); @@ -3747,7 +3785,7 @@ static enum sctp_disposition sctp_sf_shut_8_4_5( return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* We need to discard the rest of the packet to prevent - * potential bomming attacks from additional bundled chunks. + * potential boomming attacks from additional bundled chunks. * This is documented in SCTP Threats ID. */ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); @@ -4257,7 +4295,7 @@ gen_shutdown: } /* - * SCTP-AUTH Section 6.3 Receiving authenticated chukns + * SCTP-AUTH Section 6.3 Receiving authenticated chunks * * The receiver MUST use the HMAC algorithm indicated in the HMAC * Identifier field. If this algorithm was not specified by the @@ -4812,7 +4850,7 @@ static enum sctp_disposition sctp_sf_violation_ctsn( /* Handle protocol violation of an invalid chunk bundling. For example, * when we have an association and we receive bundled INIT-ACK, or - * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle" + * SHUTDOWN-COMPLETE, our peer is clearly violating the "MUST NOT bundle" * statement from the specs. Additionally, there might be an attacker * on the path and we may not want to continue this communication. */ @@ -5208,7 +5246,7 @@ enum sctp_disposition sctp_sf_cookie_wait_prm_shutdown( * Inputs * (endpoint, asoc) * - * The RFC does not explcitly address this issue, but is the route through the + * The RFC does not explicitly address this issue, but is the route through the * state table when someone issues a shutdown while in COOKIE_ECHOED state. * * Outputs @@ -5932,7 +5970,7 @@ enum sctp_disposition sctp_sf_t1_cookie_timer_expire( /* RFC2960 9.2 If the timer expires, the endpoint must re-send the SHUTDOWN * with the updated last sequential TSN received from its peer. * - * An endpoint should limit the number of retransmissions of the + * An endpoint should limit the number of retransmission of the * SHUTDOWN chunk to the protocol parameter 'Association.Max.Retrans'. * If this threshold is exceeded the endpoint should destroy the TCB and * MUST report the peer endpoint unreachable to the upper layer (and @@ -6010,7 +6048,7 @@ nomem: } /* - * ADDIP Section 4.1 ASCONF CHunk Procedures + * ADDIP Section 4.1 ASCONF Chunk Procedures * If the T4 RTO timer expires the endpoint should do B1 to B5 */ enum sctp_disposition sctp_sf_t4_timer_expire( @@ -6441,7 +6479,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, chunk->ecn_ce_done = 1; if (af->is_ce(sctp_gso_headskb(chunk->skb))) { - /* Do real work as sideffect. */ + /* Do real work as side effect. */ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE, SCTP_U32(tsn)); } diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 88ea87f4f0e7..1816a4410b2b 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -527,6 +527,26 @@ auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { }; /*state_fn_t auth_chunk_event_table[][] */ static const struct sctp_sm_table_entry +pad_chunk_event_table[SCTP_STATE_NUM_STATES] = { + /* SCTP_STATE_CLOSED */ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), + /* SCTP_STATE_COOKIE_WAIT */ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), + /* SCTP_STATE_COOKIE_ECHOED */ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), + /* SCTP_STATE_ESTABLISHED */ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), + /* SCTP_STATE_SHUTDOWN_PENDING */ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), + /* SCTP_STATE_SHUTDOWN_SENT */ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), + /* SCTP_STATE_SHUTDOWN_RECEIVED */ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), +}; /* chunk pad */ + +static const struct sctp_sm_table_entry chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { /* SCTP_STATE_CLOSED */ TYPE_SCTP_FUNC(sctp_sf_ootb), @@ -947,6 +967,25 @@ other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ } +#define TYPE_SCTP_EVENT_TIMEOUT_PROBE { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_send_probe), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ +} + static const struct sctp_sm_table_entry timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_EVENT_TIMEOUT_NONE, @@ -958,6 +997,7 @@ timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD, TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT, TYPE_SCTP_EVENT_TIMEOUT_RECONF, + TYPE_SCTP_EVENT_TIMEOUT_PROBE, TYPE_SCTP_EVENT_TIMEOUT_SACK, TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE, }; @@ -992,6 +1032,9 @@ static const struct sctp_sm_table_entry *sctp_chunk_event_lookup( case SCTP_CID_AUTH: return &auth_chunk_event_table[0][state]; + + case SCTP_CID_PAD: + return &pad_chunk_event_table[state]; } return &chunk_event_table_unknown[state]; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a79d193ff872..e64e01f61b11 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2496,6 +2496,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, sctp_transport_pmtu(trans, sctp_opt2sk(sp)); sctp_assoc_sync_pmtu(asoc); } + sctp_transport_pl_reset(trans); } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; @@ -4481,6 +4482,61 @@ static int sctp_setsockopt_encap_port(struct sock *sk, return 0; } +static int sctp_setsockopt_probe_interval(struct sock *sk, + struct sctp_probeinterval *params, + unsigned int optlen) +{ + struct sctp_association *asoc; + struct sctp_transport *t; + __u32 probe_interval; + + if (optlen != sizeof(*params)) + return -EINVAL; + + probe_interval = params->spi_interval; + if (probe_interval && probe_interval < SCTP_PROBE_TIMER_MIN) + return -EINVAL; + + /* If an address other than INADDR_ANY is specified, and + * no transport is found, then the request is invalid. + */ + if (!sctp_is_any(sk, (union sctp_addr *)¶ms->spi_address)) { + t = sctp_addr_id2transport(sk, ¶ms->spi_address, + params->spi_assoc_id); + if (!t) + return -EINVAL; + + t->probe_interval = msecs_to_jiffies(probe_interval); + sctp_transport_pl_reset(t); + return 0; + } + + /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the + * socket is a one to many style socket, and an association + * was not found, then the id was invalid. + */ + asoc = sctp_id2assoc(sk, params->spi_assoc_id); + if (!asoc && params->spi_assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) + return -EINVAL; + + /* If changes are for association, also apply probe_interval to + * each transport. + */ + if (asoc) { + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { + t->probe_interval = msecs_to_jiffies(probe_interval); + sctp_transport_pl_reset(t); + } + + asoc->probe_interval = msecs_to_jiffies(probe_interval); + return 0; + } + + sctp_sk(sk)->probe_interval = probe_interval; + return 0; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -4703,6 +4759,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_REMOTE_UDP_ENCAPS_PORT: retval = sctp_setsockopt_encap_port(sk, kopt, optlen); break; + case SCTP_PLPMTUD_PROBE_INTERVAL: + retval = sctp_setsockopt_probe_interval(sk, kopt, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -4989,6 +5048,7 @@ static int sctp_init_sock(struct sock *sk) atomic_set(&sp->pd_mode, 0); skb_queue_head_init(&sp->pd_lobby); sp->frag_interleave = 0; + sp->probe_interval = net->sctp.probe_interval; /* Create a per socket endpoint structure. Even if we * change the data structure relationships, this may still @@ -7905,6 +7965,66 @@ out: return 0; } +static int sctp_getsockopt_probe_interval(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_probeinterval params; + struct sctp_association *asoc; + struct sctp_transport *t; + __u32 probe_interval; + + if (len < sizeof(params)) + return -EINVAL; + + len = sizeof(params); + if (copy_from_user(¶ms, optval, len)) + return -EFAULT; + + /* If an address other than INADDR_ANY is specified, and + * no transport is found, then the request is invalid. + */ + if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spi_address)) { + t = sctp_addr_id2transport(sk, ¶ms.spi_address, + params.spi_assoc_id); + if (!t) { + pr_debug("%s: failed no transport\n", __func__); + return -EINVAL; + } + + probe_interval = jiffies_to_msecs(t->probe_interval); + goto out; + } + + /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the + * socket is a one to many style socket, and an association + * was not found, then the id was invalid. + */ + asoc = sctp_id2assoc(sk, params.spi_assoc_id); + if (!asoc && params.spi_assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) { + pr_debug("%s: failed no association\n", __func__); + return -EINVAL; + } + + if (asoc) { + probe_interval = jiffies_to_msecs(asoc->probe_interval); + goto out; + } + + probe_interval = sctp_sk(sk)->probe_interval; + +out: + params.spi_interval = probe_interval; + if (copy_to_user(optval, ¶ms, len)) + return -EFAULT; + + if (put_user(len, optlen)) + return -EFAULT; + + return 0; +} + static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -8128,6 +8248,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, case SCTP_REMOTE_UDP_ENCAPS_PORT: retval = sctp_getsockopt_encap_port(sk, len, optval, optlen); break; + case SCTP_PLPMTUD_PROBE_INTERVAL: + retval = sctp_getsockopt_probe_interval(sk, len, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 55871b277f47..b46a416787ec 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -55,6 +55,8 @@ static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos); static int proc_sctp_do_auth(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos); +static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write, + void *buffer, size_t *lenp, loff_t *ppos); static struct ctl_table sctp_table[] = { { @@ -294,6 +296,13 @@ static struct ctl_table sctp_net_table[] = { .proc_handler = proc_dointvec, }, { + .procname = "plpmtud_probe_interval", + .data = &init_net.sctp.probe_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_sctp_do_probe_interval, + }, + { .procname = "udp_port", .data = &init_net.sctp.udp_port, .maxlen = sizeof(int), @@ -539,6 +548,32 @@ static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, return ret; } +static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + struct net *net = current->nsproxy->net_ns; + struct ctl_table tbl; + int ret, new_value; + + memset(&tbl, 0, sizeof(struct ctl_table)); + tbl.maxlen = sizeof(unsigned int); + + if (write) + tbl.data = &new_value; + else + tbl.data = &net->sctp.probe_interval; + + ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); + if (write && ret == 0) { + if (new_value && new_value < SCTP_PROBE_TIMER_MIN) + return -EINVAL; + + net->sctp.probe_interval = new_value; + } + + return ret; +} + int sctp_sysctl_net_register(struct net *net) { struct ctl_table *table; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index bf0ac467e757..5f23804f21c7 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -75,6 +75,7 @@ static struct sctp_transport *sctp_transport_init(struct net *net, timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0); timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0); timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0); + timer_setup(&peer->probe_timer, sctp_generate_probe_event, 0); timer_setup(&peer->proto_unreach_timer, sctp_generate_proto_unreach_event, 0); @@ -131,6 +132,9 @@ void sctp_transport_free(struct sctp_transport *transport) if (del_timer(&transport->reconf_timer)) sctp_transport_put(transport); + if (del_timer(&transport->probe_timer)) + sctp_transport_put(transport); + /* Delete the ICMP proto unreachable timer if it's active. */ if (del_timer(&transport->proto_unreach_timer)) sctp_transport_put(transport); @@ -207,6 +211,15 @@ void sctp_transport_reset_reconf_timer(struct sctp_transport *transport) sctp_transport_hold(transport); } +void sctp_transport_reset_probe_timer(struct sctp_transport *transport) +{ + if (timer_pending(&transport->probe_timer)) + return; + if (!mod_timer(&transport->probe_timer, + jiffies + transport->probe_interval)) + sctp_transport_hold(transport); +} + /* This transport has been assigned to an association. * Initialize fields from the association or from the sock itself. * Register the reference count in the association. @@ -241,12 +254,143 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) transport->pathmtu = sctp_dst_mtu(transport->dst); else transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; + + sctp_transport_pl_update(transport); +} + +void sctp_transport_pl_send(struct sctp_transport *t) +{ + pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", + __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); + + if (t->pl.probe_count < SCTP_MAX_PROBES) { + t->pl.probe_count++; + return; + } + + if (t->pl.state == SCTP_PL_BASE) { + if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */ + t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ + + t->pl.pmtu = SCTP_MIN_PLPMTU; + t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + sctp_assoc_sync_pmtu(t->asoc); + } + } else if (t->pl.state == SCTP_PL_SEARCH) { + if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */ + t->pl.state = SCTP_PL_BASE; /* Search -> Base */ + t->pl.probe_size = SCTP_BASE_PLPMTU; + t->pl.probe_high = 0; + + t->pl.pmtu = SCTP_BASE_PLPMTU; + t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + sctp_assoc_sync_pmtu(t->asoc); + } else { /* Normal probe failure. */ + t->pl.probe_high = t->pl.probe_size; + t->pl.probe_size = t->pl.pmtu; + } + } else if (t->pl.state == SCTP_PL_COMPLETE) { + if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */ + t->pl.state = SCTP_PL_BASE; /* Search Complete -> Base */ + t->pl.probe_size = SCTP_BASE_PLPMTU; + + t->pl.pmtu = SCTP_BASE_PLPMTU; + t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + sctp_assoc_sync_pmtu(t->asoc); + } + } + t->pl.probe_count = 1; +} + +void sctp_transport_pl_recv(struct sctp_transport *t) +{ + pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", + __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); + + t->pl.pmtu = t->pl.probe_size; + t->pl.probe_count = 0; + if (t->pl.state == SCTP_PL_BASE) { + t->pl.state = SCTP_PL_SEARCH; /* Base -> Search */ + t->pl.probe_size += SCTP_PL_BIG_STEP; + } else if (t->pl.state == SCTP_PL_ERROR) { + t->pl.state = SCTP_PL_SEARCH; /* Error -> Search */ + + t->pl.pmtu = t->pl.probe_size; + t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + sctp_assoc_sync_pmtu(t->asoc); + t->pl.probe_size += SCTP_PL_BIG_STEP; + } else if (t->pl.state == SCTP_PL_SEARCH) { + if (!t->pl.probe_high) { + t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP, + SCTP_MAX_PLPMTU); + return; + } + t->pl.probe_size += SCTP_PL_MIN_STEP; + if (t->pl.probe_size >= t->pl.probe_high) { + t->pl.probe_high = 0; + t->pl.raise_count = 0; + t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */ + + t->pl.probe_size = t->pl.pmtu; + t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + sctp_assoc_sync_pmtu(t->asoc); + } + } else if (t->pl.state == SCTP_PL_COMPLETE && ++t->pl.raise_count == 30) { + /* Raise probe_size again after 30 * interval in Search Complete */ + t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ + t->pl.probe_size += SCTP_PL_MIN_STEP; + } +} + +static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu) +{ + pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, ptb: %d\n", + __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, pmtu); + + if (pmtu < SCTP_MIN_PLPMTU || pmtu >= t->pl.probe_size) + return false; + + if (t->pl.state == SCTP_PL_BASE) { + if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) { + t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ + + t->pl.pmtu = SCTP_MIN_PLPMTU; + t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + } + } else if (t->pl.state == SCTP_PL_SEARCH) { + if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { + t->pl.state = SCTP_PL_BASE; /* Search -> Base */ + t->pl.probe_size = SCTP_BASE_PLPMTU; + t->pl.probe_count = 0; + + t->pl.probe_high = 0; + t->pl.pmtu = SCTP_BASE_PLPMTU; + t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + } else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) { + t->pl.probe_size = pmtu; + t->pl.probe_count = 0; + + return false; + } + } else if (t->pl.state == SCTP_PL_COMPLETE) { + if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { + t->pl.state = SCTP_PL_BASE; /* Complete -> Base */ + t->pl.probe_size = SCTP_BASE_PLPMTU; + t->pl.probe_count = 0; + + t->pl.probe_high = 0; + t->pl.pmtu = SCTP_BASE_PLPMTU; + t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + } + } + + return true; } bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) { - struct dst_entry *dst = sctp_transport_dst_check(t); struct sock *sk = t->asoc->base.sk; + struct dst_entry *dst; bool change = true; if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { @@ -257,6 +401,10 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) } pmtu = SCTP_TRUNC4(pmtu); + if (sctp_transport_pl_enabled(t)) + return sctp_transport_pl_toobig(t, pmtu - sctp_transport_pl_hlen(t)); + + dst = sctp_transport_dst_check(t); if (dst) { struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family); union sctp_addr addr; |