diff options
author | Eric Dumazet | 2023-04-28 04:32:31 +0000 |
---|---|---|
committer | Greg Kroah-Hartman | 2023-05-17 11:53:32 +0200 |
commit | 7fa93e39fbb0566019c388a8038a4d58552e0910 (patch) | |
tree | 473a71c6153c20df395f8560634475681a3753cd /net | |
parent | 449280afaa053f74678c9dce4e9db723e1f84a32 (diff) |
tcp: fix skb_copy_ubufs() vs BIG TCP
[ Upstream commit 7e692df3933628d974acb9f5b334d2b3e885e2a6 ]
David Ahern reported crashes in skb_copy_ubufs() caused by TCP tx zerocopy
using hugepages, and skb length bigger than ~68 KB.
skb_copy_ubufs() assumed it could copy all payload using up to
MAX_SKB_FRAGS order-0 pages.
This assumption broke when BIG TCP was able to put up to 512 KB per skb.
We did not hit this bug at Google because we use CONFIG_MAX_SKB_FRAGS=45
and limit gso_max_size to 180000.
A solution is to use higher order pages if needed.
v2: add missing __GFP_COMP, or we leak memory.
Fixes: 7c4e983c4f3c ("net: allow gso_max_size to exceed 65536")
Reported-by: David Ahern <dsahern@kernel.org>
Link: https://lore.kernel.org/netdev/c70000f6-baa4-4a05-46d0-4b3e0dc1ccc8@gmail.com/T/
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Xin Long <lucien.xin@gmail.com>
Cc: Willem de Bruijn <willemb@google.com>
Cc: Coco Li <lixiaoyan@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/skbuff.c | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 597c1f17d388..ccfd9053754a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1544,7 +1544,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) { int num_frags = skb_shinfo(skb)->nr_frags; struct page *page, *head = NULL; - int i, new_frags; + int i, order, psize, new_frags; u32 d_off; if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) @@ -1553,9 +1553,17 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) if (!num_frags) goto release; - new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; + /* We might have to allocate high order pages, so compute what minimum + * page order is needed. + */ + order = 0; + while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) + order++; + psize = (PAGE_SIZE << order); + + new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); for (i = 0; i < new_frags; i++) { - page = alloc_page(gfp_mask); + page = alloc_pages(gfp_mask | __GFP_COMP, order); if (!page) { while (head) { struct page *next = (struct page *)page_private(head); @@ -1582,11 +1590,11 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) vaddr = kmap_atomic(p); while (done < p_len) { - if (d_off == PAGE_SIZE) { + if (d_off == psize) { d_off = 0; page = (struct page *)page_private(page); } - copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); + copy = min_t(u32, psize - d_off, p_len - done); memcpy(page_address(page) + d_off, vaddr + p_off + done, copy); done += copy; @@ -1602,7 +1610,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) /* skb frags point to kernel buffers */ for (i = 0; i < new_frags - 1; i++) { - __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); + __skb_fill_page_desc(skb, i, head, 0, psize); head = (struct page *)page_private(head); } __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); |