aboutsummaryrefslogtreecommitdiff
path: root/net/sctp
diff options
context:
space:
mode:
authorMarcelo Ricardo Leitner2016-04-13 19:12:29 -0300
committerDavid S. Miller2016-04-15 17:22:20 -0400
commit311b21774f1389f9c34eac4da90c43c95fc2b62b (patch)
treea4efcdfab7d015bd8fbf2d040d8c5b7f2f64fe4d /net/sctp
parent936d4b41b08f566d0901e81321e4f51ae35c1f45 (diff)
sctp: simplify sk_receive_queue locking
SCTP already serializes access to rcvbuf through its sock lock: sctp_recvmsg takes it right in the start and release at the end, while rx path will also take the lock before doing any socket processing. On sctp_rcv() it will check if there is an user using the socket and, if there is, it will queue incoming packets to the backlog. The backlog processing will do the same. Even timers will do such check and re-schedule if an user is using the socket. Simplifying this will allow us to remove sctp_skb_list_tail and get ride of some expensive lockings. The lists that it is used on are also mangled with functions like __skb_queue_tail and __skb_unlink in the same context, like on sctp_ulpq_tail_event() and sctp_clear_pd(). sctp_close() will also purge those while using only the sock lock. Therefore the lockings performed by sctp_skb_list_tail() are not necessary. This patch removes this function and replaces its calls with just skb_queue_splice_tail_init() instead. The biggest gain is at sctp_ulpq_tail_event(), because the events always contain a list, even if it's queueing a single skb and this was triggering expensive calls to spin_lock_irqsave/_irqrestore for every data chunk received. As SCTP will deliver each data chunk on a corresponding recvmsg, the more effective the change will be. Before this patch, with chunks with 30 bytes: netperf -t SCTP_STREAM -H 192.168.1.2 -cC -l 60 -- -m 30 -S 400000 400000 -s 400000 400000 on a 10Gbit link with 1500 MTU: SCTP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 192.168.1.1 () port 0 AF_INET Recv Send Send Utilization Service Demand Socket Socket Message Elapsed Send Recv Send Recv Size Size Size Time Throughput local remote local remote bytes bytes bytes secs. 10^6bits/s % S % S us/KB us/KB 425984 425984 30 60.00 137.45 7.34 7.36 52.504 52.608 With it: SCTP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 192.168.1.1 () port 0 AF_INET Recv Send Send Utilization Service Demand Socket Socket Message Elapsed Send Recv Send Recv Size Size Size Time Throughput local remote local remote bytes bytes bytes secs. 10^6bits/s % S % S us/KB us/KB 425984 425984 30 60.00 179.10 7.97 6.70 43.740 36.788 Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/ulpqueue.c5
2 files changed, 4 insertions, 5 deletions
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 36697f85ce48..bf265a4bba6e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -6766,13 +6766,11 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
* However, this function was correct in any case. 8)
*/
if (flags & MSG_PEEK) {
- spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
atomic_inc(&skb->users);
- spin_unlock_bh(&sk->sk_receive_queue.lock);
} else {
- skb = skb_dequeue(&sk->sk_receive_queue);
+ skb = __skb_dequeue(&sk->sk_receive_queue);
}
if (skb)
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 72e5b3e41cdd..ec12a8920e5f 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -141,7 +141,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
*/
if (!skb_queue_empty(&sp->pd_lobby)) {
struct list_head *list;
- sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
+ skb_queue_splice_tail_init(&sp->pd_lobby,
+ &sk->sk_receive_queue);
list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
INIT_LIST_HEAD(list);
return 1;
@@ -252,7 +253,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
* collected on a list.
*/
if (skb_list)
- sctp_skb_list_tail(skb_list, queue);
+ skb_queue_splice_tail_init(skb_list, queue);
else
__skb_queue_tail(queue, skb);