aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorIngo Molnar2009-03-04 02:29:19 +0100
committerIngo Molnar2009-03-04 02:29:19 +0100
commit91d75e209bd59695f0708d66964d928d45b3b2f3 (patch)
tree32cab1359d951e4193bebb181a0f0319824a2b95 /net/ipv4
parent9976b39b5031bbf76f715893cf080b6a17683881 (diff)
parent8b0e5860cb099d7958d13b00ffbc35ad02735700 (diff)
Merge branch 'x86/core' into core/percpu
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/tcp_scalable.c2
3 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a6961d75c7ea..c28976a7e596 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1374,7 +1374,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
struct tcp_sacktag_state *state,
- unsigned int pcount, int shifted, int mss)
+ unsigned int pcount, int shifted, int mss,
+ int dup_sack)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1410,7 +1411,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
}
/* We discard results */
- tcp_sacktag_one(skb, sk, state, 0, pcount);
+ tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
/* Difference in this won't matter, both ACKed by the same cumul. ACK */
TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1561,7 +1562,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
if (!skb_shift(prev, skb, len))
goto fallback;
- if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss))
+ if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
goto out;
/* Hole filled allows collapsing with the next as well, this is very
@@ -1580,7 +1581,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
len = skb->len;
if (skb_shift(prev, skb, len)) {
pcount += tcp_skb_pcount(skb);
- tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss);
+ tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
}
out:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dda42f0bd7a3..da2c3b8794f2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2023,7 +2023,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
last_lost = tp->snd_una;
}
- /* First pass: retransmit lost packets. */
tcp_for_write_queue_from(skb, sk) {
__u8 sacked = TCP_SKB_CB(skb)->sacked;
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 2747ec7bfb63..4660b088a8ce 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -1,6 +1,6 @@
/* Tom Kelly's Scalable TCP
*
- * See htt://www-lce.eng.cam.ac.uk/~ctk21/scalable/
+ * See http://www.deneholme.net/tom/scalable/
*
* John Heffner <jheffner@sc.edu>
*/