aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIlya Maximets2019-07-03 15:09:16 +0300
committerDaniel Borkmann2019-07-09 01:43:26 +0200
commitbf0bdd1343efbbf65b4d53aef1fce14acbd79d50 (patch)
tree596c5002c0217bb4c9ab8e1ea2fbf49cf0e78f75
parentbc2d8afecbec33bd4549ce1ef904383fde21d385 (diff)
xdp: fix race on generic receive path
Unlike driver mode, generic xdp receive could be triggered by different threads on different CPU cores at the same time leading to the fill and rx queue breakage. For example, this could happen while sending packets from two processes to the first interface of veth pair while the second part of it is open with AF_XDP socket. Need to take a lock for each generic receive to avoid race. Fixes: c497176cb2e4 ("xsk: add Rx receive functions and poll support") Signed-off-by: Ilya Maximets <i.maximets@samsung.com> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Tested-by: William Tu <u9012063@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r--include/net/xdp_sock.h2
-rw-r--r--net/xdp/xsk.c31
2 files changed, 24 insertions, 9 deletions
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 057b159ff8b9..de4e3a353df3 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -67,6 +67,8 @@ struct xdp_sock {
* in the SKB destructor callback.
*/
spinlock_t tx_completion_lock;
+ /* Protects generic receive. */
+ spinlock_t rx_lock;
u64 rx_dropped;
};
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 74417a851ed5..0574f008954c 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -129,13 +129,17 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
u64 addr;
int err;
- if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
- return -EINVAL;
+ spin_lock_bh(&xs->rx_lock);
+
+ if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
if (!xskq_peek_addr(xs->umem->fq, &addr) ||
len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
- xs->rx_dropped++;
- return -ENOSPC;
+ err = -ENOSPC;
+ goto out_drop;
}
addr += xs->umem->headroom;
@@ -144,13 +148,21 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
memcpy(buffer, xdp->data_meta, len + metalen);
addr += metalen;
err = xskq_produce_batch_desc(xs->rx, addr, len);
- if (!err) {
- xskq_discard_addr(xs->umem->fq);
- xsk_flush(xs);
- return 0;
- }
+ if (err)
+ goto out_drop;
+
+ xskq_discard_addr(xs->umem->fq);
+ xskq_produce_flush_desc(xs->rx);
+ spin_unlock_bh(&xs->rx_lock);
+
+ xs->sk.sk_data_ready(&xs->sk);
+ return 0;
+
+out_drop:
xs->rx_dropped++;
+out_unlock:
+ spin_unlock_bh(&xs->rx_lock);
return err;
}
@@ -787,6 +799,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
xs = xdp_sk(sk);
mutex_init(&xs->mutex);
+ spin_lock_init(&xs->rx_lock);
spin_lock_init(&xs->tx_completion_lock);
mutex_lock(&net->xdp.lock);