diff options
-rw-r--r-- | include/net/xdp_sock.h | 4 | ||||
-rw-r--r-- | net/xdp/xdp_umem.c | 14 | ||||
-rw-r--r-- | net/xdp/xsk.c | 8 | ||||
-rw-r--r-- | net/xdp/xsk_queue.c | 4 | ||||
-rw-r--r-- | net/xdp/xsk_queue.h | 8 |
5 files changed, 19 insertions, 19 deletions
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index e86ec48ef627..b72f1f4c3b15 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -62,8 +62,8 @@ struct xdp_umem { struct net_device *dev; struct xdp_umem_fq_reuse *fq_reuse; bool zc; - spinlock_t xsk_list_lock; - struct list_head xsk_list; + spinlock_t xsk_tx_list_lock; + struct list_head xsk_tx_list; }; /* Nodes are linked in the struct xdp_sock map_list field, and used to diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index ed7a6060f73c..7211f4572760 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -30,9 +30,9 @@ void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) if (!xs->tx) return; - spin_lock_irqsave(&umem->xsk_list_lock, flags); - list_add_rcu(&xs->list, &umem->xsk_list); - spin_unlock_irqrestore(&umem->xsk_list_lock, flags); + spin_lock_irqsave(&umem->xsk_tx_list_lock, flags); + list_add_rcu(&xs->list, &umem->xsk_tx_list); + spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags); } void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) @@ -42,9 +42,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) if (!xs->tx) return; - spin_lock_irqsave(&umem->xsk_list_lock, flags); + spin_lock_irqsave(&umem->xsk_tx_list_lock, flags); list_del_rcu(&xs->list); - spin_unlock_irqrestore(&umem->xsk_list_lock, flags); + spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags); } /* The umem is stored both in the _rx struct and the _tx struct as we do @@ -395,8 +395,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) umem->pgs = NULL; umem->user = NULL; umem->flags = mr->flags; - INIT_LIST_HEAD(&umem->xsk_list); - spin_lock_init(&umem->xsk_list_lock); + INIT_LIST_HEAD(&umem->xsk_tx_list); + spin_lock_init(&umem->xsk_tx_list_lock); refcount_set(&umem->users, 1); diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index f6e6609f70a3..45ffd67b367d 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -75,7 +75,7 @@ void xsk_set_tx_need_wakeup(struct xdp_umem *umem) return; rcu_read_lock(); - list_for_each_entry_rcu(xs, &umem->xsk_list, list) { + list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; } rcu_read_unlock(); @@ -102,7 +102,7 @@ void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) return; rcu_read_lock(); - list_for_each_entry_rcu(xs, &umem->xsk_list, list) { + list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; } rcu_read_unlock(); @@ -305,7 +305,7 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem) struct xdp_sock *xs; rcu_read_lock(); - list_for_each_entry_rcu(xs, &umem->xsk_list, list) { + list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { __xskq_cons_release(xs->tx); xs->sk.sk_write_space(&xs->sk); } @@ -318,7 +318,7 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc) struct xdp_sock *xs; rcu_read_lock(); - list_for_each_entry_rcu(xs, &umem->xsk_list, list) { + list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { if (!xskq_cons_peek_desc(xs->tx, desc, umem)) continue; diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c index c90e9c1e3c63..57fb81bd593c 100644 --- a/net/xdp/xsk_queue.c +++ b/net/xdp/xsk_queue.c @@ -9,12 +9,12 @@ #include "xsk_queue.h" -void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask) +void xskq_set_umem(struct xsk_queue *q, u64 umem_size, u64 chunk_mask) { if (!q) return; - q->size = size; + q->umem_size = umem_size; q->chunk_mask = chunk_mask; } diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index b50bb5c76da5..648733ec24ac 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -30,7 +30,7 @@ struct xdp_umem_ring { struct xsk_queue { u64 chunk_mask; - u64 size; + u64 umem_size; u32 ring_mask; u32 nentries; u32 cached_prod; @@ -123,7 +123,7 @@ static inline bool xskq_cons_is_valid_unaligned(struct xsk_queue *q, u64 base_addr = xsk_umem_extract_addr(addr); addr = xsk_umem_add_offset_to_addr(addr); - if (base_addr >= q->size || addr >= q->size || + if (base_addr >= q->umem_size || addr >= q->umem_size || xskq_cons_crosses_non_contig_pg(umem, addr, length)) { q->invalid_descs++; return false; @@ -134,7 +134,7 @@ static inline bool xskq_cons_is_valid_unaligned(struct xsk_queue *q, static inline bool xskq_cons_is_valid_addr(struct xsk_queue *q, u64 addr) { - if (addr >= q->size) { + if (addr >= q->umem_size) { q->invalid_descs++; return false; } @@ -379,7 +379,7 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) return q ? q->invalid_descs : 0; } -void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask); +void xskq_set_umem(struct xsk_queue *q, u64 umem_size, u64 chunk_mask); struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); void xskq_destroy(struct xsk_queue *q_ops); |