summaryrefslogtreecommitdiff
path: root/net/sctp
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2019-11-05 14:11:54 -0800
committerDavid S. Miller <davem@davemloft.net>2019-11-06 16:14:48 -0800
commit099ecf59f05b5f30f42ebac0ab8cb94f9b18c90c (patch)
tree06d62b7b24635bbb78ad38f24933cad260b5c346 /net/sctp
parent288efe8606b62d0753ba6722b36ef241877251fd (diff)
net: annotate lockless accesses to sk->sk_max_ack_backlog
sk->sk_max_ack_backlog can be read without any lock being held at least in TCP/DCCP cases. We need to use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing and/or potential KCSAN warnings. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/diag.c2
-rw-r--r--net/sctp/socket.c4
2 files changed, 3 insertions, 3 deletions
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index f873f15407de..8a15146faaeb 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -426,7 +426,7 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
r->idiag_wqueue = infox->asoc->sndbuf_used;
} else {
r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
- r->idiag_wqueue = sk->sk_max_ack_backlog;
+ r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
}
if (infox->sctpinfo)
sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ffd3262b7a41..53abb97e0061 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8376,7 +8376,7 @@ static int sctp_listen_start(struct sock *sk, int backlog)
}
}
- sk->sk_max_ack_backlog = backlog;
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
return sctp_hash_endpoint(ep);
}
@@ -8430,7 +8430,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
/* If we are already listening, just update the backlog */
if (sctp_sstate(sk, LISTENING))
- sk->sk_max_ack_backlog = backlog;
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
else {
err = sctp_listen_start(sk, backlog);
if (err)