aboutsummaryrefslogtreecommitdiff
path: root/net/ipv6
diff options
context:
space:
mode:
authorJesper Dangaard Brouer2017-01-09 16:04:14 +0100
committerDavid S. Miller2017-01-09 15:49:12 -0500
commit7ba91ecb16824f74ba4fcbc4e88cd4d24a839b25 (patch)
tree23bd866126b5115cf4a14541f7700117a38efc29 /net/ipv6
parentc0303efeab7391ec51c337e0ac5740860ad01fe7 (diff)
net: for rate-limited ICMP replies save one atomic operation
It is possible to avoid the atomic operation in icmp{v6,}_xmit_lock, by checking the sysctl_icmp_msgs_per_sec ratelimit before these calls, as pointed out by Eric Dumazet, but the BH disabled state must be correct. The icmp_global_allow() call states it must be called with BH disabled. This protection was given by the calls icmp_xmit_lock and icmpv6_xmit_lock. Thus, split out local_bh_disable/enable from these functions and maintain it explicitly at callers. Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/icmp.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index b26ae8b5c1ce..230b5aac9f03 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -110,19 +110,17 @@ static const struct inet6_protocol icmpv6_protocol = {
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
+/* Called with BH disabled */
static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
{
struct sock *sk;
- local_bh_disable();
-
sk = icmpv6_sk(net);
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
/* This can happen if the output path (f.e. SIT or
* ip6ip6 tunnel) signals dst_link_failure() for an
* outgoing ICMP6 packet.
*/
- local_bh_enable();
return NULL;
}
return sk;
@@ -130,7 +128,7 @@ static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
{
- spin_unlock_bh(&sk->sk_lock.slock);
+ spin_unlock(&sk->sk_lock.slock);
}
/*
@@ -489,6 +487,13 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
return;
}
+ /* Needed by both icmp_global_allow and icmpv6_xmit_lock */
+ local_bh_disable();
+
+ /* Check global sysctl_icmp_msgs_per_sec ratelimit */
+ if (!icmpv6_global_allow(type))
+ goto out_bh_enable;
+
mip6_addr_swap(skb);
memset(&fl6, 0, sizeof(fl6));
@@ -507,10 +512,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
sk = icmpv6_xmit_lock(net);
if (!sk)
- return;
-
- if (!icmpv6_global_allow(type))
- goto out;
+ goto out_bh_enable;
sk->sk_mark = mark;
np = inet6_sk(sk);
@@ -571,6 +573,8 @@ out_dst_release:
dst_release(dst);
out:
icmpv6_xmit_unlock(sk);
+out_bh_enable:
+ local_bh_enable();
}
/* Slightly more convenient version of icmp6_send.
@@ -684,9 +688,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
fl6.flowi6_uid = sock_net_uid(net, NULL);
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+ local_bh_disable();
sk = icmpv6_xmit_lock(net);
if (!sk)
- return;
+ goto out_bh_enable;
sk->sk_mark = mark;
np = inet6_sk(sk);
@@ -728,6 +733,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
dst_release(dst);
out:
icmpv6_xmit_unlock(sk);
+out_bh_enable:
+ local_bh_enable();
}
void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)