aboutsummaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet2019-10-09 15:32:35 -0700
committerJakub Kicinski2019-10-09 21:43:00 -0700
commiteac66402d1c342f07ff38f8d631ff95eb7ad3220 (patch)
treed25d4b62d8f9a01ed18b820307bfb0080a26a28f /net/core
parent8265792bf8871acc2d00fd03883d830e2249d395 (diff)
net: annotate sk->sk_rcvlowat lockless reads
sock_rcvlowat() or int_sk_rcvlowat() might be called without the socket lock for example from tcp_poll(). Use READ_ONCE() to document the fact that other cpus might change sk->sk_rcvlowat under us and avoid KCSAN splats. Use WRITE_ONCE() on write sides too. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/sock.c2
2 files changed, 2 insertions, 2 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index ed6563622ce3..a50c0b6846f2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4274,7 +4274,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
case SO_RCVLOWAT:
if (val < 0)
val = INT_MAX;
- sk->sk_rcvlowat = val ? : 1;
+ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
break;
case SO_MARK:
if (sk->sk_mark != val) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 1cf06934da50..b7c5c6ea51ba 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -974,7 +974,7 @@ set_rcvbuf:
if (sock->ops->set_rcvlowat)
ret = sock->ops->set_rcvlowat(sk, val);
else
- sk->sk_rcvlowat = val ? : 1;
+ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
break;
case SO_RCVTIMEO_OLD: