aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFlorian Westphal2021-04-15 16:44:56 -0700
committerDavid S. Miller2021-04-16 15:23:10 -0700
commit5d0a6bc82d38d773c20b44aa1b9f312c4294b594 (patch)
tree38a3193aa1434150a94dcb21ab286ea8b1cc00f1
parent1b3e7ede1365a24db1b4fd837e58a595f52fa4ad (diff)
mptcp: setsockopt: handle receive/send buffer and device bind
Similar to previous patch: needs to be mirrored to all subflows. Device bind is simpler: it is only done on the initial (listener) sk. Acked-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/mptcp/sockopt.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 9be4c94ff4d4..bfb9db04d26b 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -85,6 +85,16 @@ static void mptcp_sol_socket_sync_intval(struct mptcp_sock *msk, int optname, in
case SO_PRIORITY:
ssk->sk_priority = val;
break;
+ case SO_SNDBUF:
+ case SO_SNDBUFFORCE:
+ ssk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
+ break;
+ case SO_RCVBUF:
+ case SO_RCVBUFFORCE:
+ ssk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+ WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf);
+ break;
}
subflow->setsockopt_seq = msk->setsockopt_seq;
@@ -123,6 +133,10 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
mptcp_sol_socket_sync_intval(msk, optname, val);
return 0;
case SO_PRIORITY:
+ case SO_SNDBUF:
+ case SO_SNDBUFFORCE:
+ case SO_RCVBUF:
+ case SO_RCVBUFFORCE:
return mptcp_sol_socket_intval(msk, optname, val);
}
@@ -139,6 +153,8 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
switch (optname) {
case SO_REUSEPORT:
case SO_REUSEADDR:
+ case SO_BINDTODEVICE:
+ case SO_BINDTOIFINDEX:
lock_sock(sk);
ssock = __mptcp_nmpc_socket(msk);
if (!ssock) {
@@ -152,11 +168,19 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
sk->sk_reuseport = ssock->sk->sk_reuseport;
else if (optname == SO_REUSEADDR)
sk->sk_reuse = ssock->sk->sk_reuse;
+ else if (optname == SO_BINDTODEVICE)
+ sk->sk_bound_dev_if = ssock->sk->sk_bound_dev_if;
+ else if (optname == SO_BINDTOIFINDEX)
+ sk->sk_bound_dev_if = ssock->sk->sk_bound_dev_if;
}
release_sock(sk);
return ret;
case SO_KEEPALIVE:
case SO_PRIORITY:
+ case SO_SNDBUF:
+ case SO_SNDBUFFORCE:
+ case SO_RCVBUF:
+ case SO_RCVBUFFORCE:
return mptcp_setsockopt_sol_socket_int(msk, optname, optval, optlen);
}
@@ -460,6 +484,7 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname,
static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
{
+ static const unsigned int tx_rx_locks = SOCK_RCVBUF_LOCK | SOCK_SNDBUF_LOCK;
struct sock *sk = (struct sock *)msk;
if (ssk->sk_prot->keepalive) {
@@ -470,6 +495,33 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
}
ssk->sk_priority = sk->sk_priority;
+ ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
+ ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
+
+ if (sk->sk_userlocks & tx_rx_locks) {
+ ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks;
+ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
+ WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
+ WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf);
+ }
+
+ if (sock_flag(sk, SOCK_LINGER)) {
+ ssk->sk_lingertime = sk->sk_lingertime;
+ sock_set_flag(ssk, SOCK_LINGER);
+ } else {
+ sock_reset_flag(ssk, SOCK_LINGER);
+ }
+
+ if (sk->sk_mark != ssk->sk_mark) {
+ ssk->sk_mark = sk->sk_mark;
+ sk_dst_reset(ssk);
+ }
+
+ sock_valbool_flag(ssk, SOCK_DBG, sock_flag(sk, SOCK_DBG));
+
+ if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops)
+ tcp_set_congestion_control(ssk, inet_csk(sk)->icsk_ca_ops->name, false, true);
}
static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)