aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Xing2023-03-08 10:11:53 +0800
committerJakub Kicinski2023-03-09 23:32:12 -0800
commitfd9c31f834416e3060061dbcb45ababaa25cdfe2 (patch)
tree017bbd9b05dd8793eedbe4d206ce12a1ab0090e4
parentd0928c1c5b3db0cd80e867eb49f171f096f03b8c (diff)
udp: introduce __sk_mem_schedule() usage
Keep the accounting schema consistent across different protocols with __sk_mem_schedule(). Besides, it adjusts a little bit on how to calculate forward allocated memory compared to before. After applied this patch, we could avoid receive path scheduling extra amount of memory. Link: https://lore.kernel.org/lkml/20230221110344.82818-1-kerneljasonxing@gmail.com/ Signed-off-by: Jason Xing <kernelxing@tencent.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Link: https://lore.kernel.org/r/20230308021153.99777-1-kerneljasonxing@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--net/ipv4/udp.c27
1 files changed, 16 insertions, 11 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index c605d171eb2d..dc8feb54d835 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1531,10 +1531,21 @@ static void busylock_release(spinlock_t *busy)
spin_unlock(busy);
}
+static int udp_rmem_schedule(struct sock *sk, int size)
+{
+ int delta;
+
+ delta = size - sk->sk_forward_alloc;
+ if (delta > 0 && !__sk_mem_schedule(sk, delta, SK_MEM_RECV))
+ return -ENOBUFS;
+
+ return 0;
+}
+
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff_head *list = &sk->sk_receive_queue;
- int rmem, delta, amt, err = -ENOMEM;
+ int rmem, err = -ENOMEM;
spinlock_t *busy = NULL;
int size;
@@ -1567,16 +1578,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
goto uncharge_drop;
spin_lock(&list->lock);
- if (size >= sk->sk_forward_alloc) {
- amt = sk_mem_pages(size);
- delta = amt << PAGE_SHIFT;
- if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
- err = -ENOBUFS;
- spin_unlock(&list->lock);
- goto uncharge_drop;
- }
-
- sk->sk_forward_alloc += delta;
+ err = udp_rmem_schedule(sk, size);
+ if (err) {
+ spin_unlock(&list->lock);
+ goto uncharge_drop;
}
sk->sk_forward_alloc -= size;