aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWei Wang2017-05-05 12:53:23 -0700
committerDavid S. Miller2017-05-08 14:37:07 -0400
commit1b1fc3fddabfb8739ef2c8f04e05a9858b42c1f7 (patch)
tree96e7fc35b0e4ab664d4e68a0ed7a68398402090c
parent82486aa6f1b9bc8145e6d0fa2bc0b44307f3b875 (diff)
tcp: make congestion control optionally skip slow start after idle
Congestion control modules that want full control over congestion control behavior do not want the cwnd modifications controlled by the sysctl_tcp_slow_start_after_idle code path. So skip those code paths for CC modules that use the cong_control() API. As an example, those cwnd effects are not desired for the BBR congestion control algorithm. Fixes: c0402760f565 ("tcp: new CC hook to set sending rate with rate_sample in any CA state") Signed-off-by: Wei Wang <weiwan@google.com> Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/tcp.h4
-rw-r--r--net/ipv4/tcp_output.c4
2 files changed, 6 insertions, 2 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 8c0e5a901d64..38a7427ae902 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1234,10 +1234,12 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta);
static inline void tcp_slow_start_after_idle_check(struct sock *sk)
{
+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
struct tcp_sock *tp = tcp_sk(sk);
s32 delta;
- if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
+ if (!sysctl_tcp_slow_start_after_idle || tp->packets_out ||
+ ca_ops->cong_control)
return;
delta = tcp_time_stamp - tp->lsndtime;
if (delta > inet_csk(sk)->icsk_rto)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 60111a0fc201..4858e190f6ac 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1514,6 +1514,7 @@ static void tcp_cwnd_application_limited(struct sock *sk)
static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
{
+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
struct tcp_sock *tp = tcp_sk(sk);
/* Track the maximum number of outstanding packets in each
@@ -1536,7 +1537,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
tp->snd_cwnd_used = tp->packets_out;
if (sysctl_tcp_slow_start_after_idle &&
- (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
+ (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
+ !ca_ops->cong_control)
tcp_cwnd_application_limited(sk);
/* The following conditions together indicate the starvation