aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c89
1 files changed, 63 insertions, 26 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 76b148bcb0dc..2451aeb5ac23 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -661,6 +661,47 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
return NULL;
}
+static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
+ int large_allowed)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 xmit_size_goal, old_size_goal;
+
+ xmit_size_goal = mss_now;
+
+ if (large_allowed && sk_can_gso(sk)) {
+ xmit_size_goal = ((sk->sk_gso_max_size - 1) -
+ inet_csk(sk)->icsk_af_ops->net_header_len -
+ inet_csk(sk)->icsk_ext_hdr_len -
+ tp->tcp_header_len);
+
+ xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
+
+ /* We try hard to avoid divides here */
+ old_size_goal = tp->xmit_size_goal_segs * mss_now;
+
+ if (likely(old_size_goal <= xmit_size_goal &&
+ old_size_goal + mss_now > xmit_size_goal)) {
+ xmit_size_goal = old_size_goal;
+ } else {
+ tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
+ xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
+ }
+ }
+
+ return max(xmit_size_goal, mss_now);
+}
+
+static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
+{
+ int mss_now;
+
+ mss_now = tcp_current_mss(sk);
+ *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
+
+ return mss_now;
+}
+
static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
size_t psize, int flags)
{
@@ -677,13 +718,12 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
- size_goal = tp->xmit_size_goal;
+ mss_now = tcp_send_mss(sk, &size_goal, flags);
copied = 0;
err = -EPIPE;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
- goto do_error;
+ goto out_err;
while (psize > 0) {
struct sk_buff *skb = tcp_write_queue_tail(sk);
@@ -761,8 +801,7 @@ wait_for_memory:
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
- mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
- size_goal = tp->xmit_size_goal;
+ mss_now = tcp_send_mss(sk, &size_goal, flags);
}
out:
@@ -844,8 +883,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
/* This should be in poll */
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
- size_goal = tp->xmit_size_goal;
+ mss_now = tcp_send_mss(sk, &size_goal, flags);
/* Ok commence sending. */
iovlen = msg->msg_iovlen;
@@ -854,7 +892,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
err = -EPIPE;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
- goto do_error;
+ goto out_err;
while (--iovlen >= 0) {
int seglen = iov->iov_len;
@@ -1007,8 +1045,7 @@ wait_for_memory:
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
- mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
- size_goal = tp->xmit_size_goal;
+ mss_now = tcp_send_mss(sk, &size_goal, flags);
}
}
@@ -1045,8 +1082,7 @@ out_err:
*/
static int tcp_recv_urg(struct sock *sk, long timeo,
- struct msghdr *msg, int len, int flags,
- int *addr_len)
+ struct msghdr *msg, int len, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -1661,7 +1697,7 @@ out:
return err;
recv_urg:
- err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
+ err = tcp_recv_urg(sk, timeo, msg, len, flags);
goto out;
}
@@ -2478,23 +2514,23 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
struct tcphdr *th2;
unsigned int thlen;
unsigned int flags;
- unsigned int total;
unsigned int mss = 1;
int flush = 1;
+ int i;
- if (!pskb_may_pull(skb, sizeof(*th)))
+ th = skb_gro_header(skb, sizeof(*th));
+ if (unlikely(!th))
goto out;
- th = tcp_hdr(skb);
thlen = th->doff * 4;
if (thlen < sizeof(*th))
goto out;
- if (!pskb_may_pull(skb, thlen))
+ th = skb_gro_header(skb, thlen);
+ if (unlikely(!th))
goto out;
- th = tcp_hdr(skb);
- __skb_pull(skb, thlen);
+ skb_gro_pull(skb, thlen);
flags = tcp_flag_word(th);
@@ -2504,7 +2540,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
th2 = tcp_hdr(p);
- if (th->source != th2->source || th->dest != th2->dest) {
+ if ((th->source ^ th2->source) | (th->dest ^ th2->dest)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
@@ -2519,14 +2555,15 @@ found:
flush |= flags & TCP_FLAG_CWR;
flush |= (flags ^ tcp_flag_word(th2)) &
~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
- flush |= th->ack_seq != th2->ack_seq || th->window != th2->window;
- flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th));
+ flush |= (th->ack_seq ^ th2->ack_seq) | (th->window ^ th2->window);
+ for (i = sizeof(*th); !flush && i < thlen; i += 4)
+ flush |= *(u32 *)((u8 *)th + i) ^
+ *(u32 *)((u8 *)th2 + i);
- total = p->len;
mss = skb_shinfo(p)->gso_size;
- flush |= skb->len > mss || skb->len <= 0;
- flush |= ntohl(th2->seq) + total != ntohl(th->seq);
+ flush |= (skb_gro_len(skb) > mss) | !skb_gro_len(skb);
+ flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
if (flush || skb_gro_receive(head, skb)) {
mss = 1;
@@ -2538,7 +2575,7 @@ found:
tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
out_check_final:
- flush = skb->len < mss;
+ flush = skb_gro_len(skb) < mss;
flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
TCP_FLAG_SYN | TCP_FLAG_FIN);