aboutsummaryrefslogtreecommitdiff
path: root/net/ipv6
diff options
context:
space:
mode:
authorEric Dumazet2023-02-02 09:41:00 +0000
committerGreg Kroah-Hartman2023-04-13 16:55:23 +0200
commit53a0031217c9b8d7a53edfb70d1633a1935c4395 (patch)
treeacbb4df6550728d2100bd7592e55cb3a331eb1e5 /net/ipv6
parent73b99db789ef6c18a0fd55a2ac6f7c1e59f06e01 (diff)
raw: use net_hash_mix() in hash function
[ Upstream commit 6579f5bacc2c4cbc5ef6abb45352416939d1f844 ] Some applications seem to rely on RAW sockets. If they use private netns, we can avoid piling all RAW sockets bound to a given protocol into a single bucket. Also place (struct raw_hashinfo).lock into its own cache line to limit false sharing. Alternative would be to have per-netns hashtables, but this seems too expensive for most netns where RAW sockets are not used. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org> Stable-dep-of: 0a78cf7264d2 ("raw: Fix NULL deref in raw_get_next().") Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/raw.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 8ffeac745656..350fb81eda14 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -152,7 +152,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
saddr = &ipv6_hdr(skb)->saddr;
daddr = saddr + 1;
- hash = nexthdr & (RAW_HTABLE_SIZE - 1);
+ hash = raw_hashfunc(net, nexthdr);
hlist = &raw_v6_hashinfo.ht[hash];
rcu_read_lock();
sk_nulls_for_each(sk, hnode, hlist) {
@@ -338,7 +338,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
struct sock *sk;
int hash;
- hash = nexthdr & (RAW_HTABLE_SIZE - 1);
+ hash = raw_hashfunc(net, nexthdr);
hlist = &raw_v6_hashinfo.ht[hash];
rcu_read_lock();
sk_nulls_for_each(sk, hnode, hlist) {