aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorTushar Vyavahare2024-04-02 11:45:24 +0000
committerDaniel Borkmann2024-04-03 16:00:57 +0200
commitc3bd015090f24dcd2e839db1401e948ad95ce803 (patch)
tree778b04e30d8b6a1e4f71f4a5bb13fd811b567dcc /tools
parent7effe3fdc049a34c56a68671100b5570e53e8f0a (diff)
selftests/xsk: Make batch size variable
Convert the constant BATCH_SIZE into a variable named batch_size to allow dynamic modification at runtime. This is required for the forthcoming changes to support testing different hardware ring sizes. While running these tests, a bug was identified when the batch size is roughly the same as the NIC ring size. This has now been addressed by Maciej's fix in commit 913eda2b08cc ("i40e: xsk: remove count_mask"). Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Link: https://lore.kernel.org/bpf/20240402114529.545475-3-tushar.vyavahare@intel.com
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c20
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h3
2 files changed, 13 insertions, 10 deletions
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index b1102ee13faa..eaa102c8098b 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -239,7 +239,7 @@ static void enable_busy_poll(struct xsk_socket_info *xsk)
(void *)&sock_opt, sizeof(sock_opt)) < 0)
exit_with_error(errno);
- sock_opt = BATCH_SIZE;
+ sock_opt = xsk->batch_size;
if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
(void *)&sock_opt, sizeof(sock_opt)) < 0)
exit_with_error(errno);
@@ -439,6 +439,7 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
for (j = 0; j < MAX_SOCKETS; j++) {
memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ ifobj->xsk_arr[j].batch_size = DEFAULT_BATCH_SIZE;
if (i == 0)
ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
else
@@ -1087,7 +1088,7 @@ static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
return TEST_CONTINUE;
}
- rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
+ rcvd = xsk_ring_cons__peek(&xsk->rx, xsk->batch_size, &idx_rx);
if (!rcvd)
return TEST_CONTINUE;
@@ -1239,7 +1240,8 @@ static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, b
buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
/* pkts_in_flight might be negative if many invalid packets are sent */
- if (pkts_in_flight >= (int)((umem_size(umem) - BATCH_SIZE * buffer_len) / buffer_len)) {
+ if (pkts_in_flight >= (int)((umem_size(umem) - xsk->batch_size * buffer_len) /
+ buffer_len)) {
ret = kick_tx(xsk);
if (ret)
return TEST_FAILURE;
@@ -1249,7 +1251,7 @@ static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, b
fds.fd = xsk_socket__fd(xsk->xsk);
fds.events = POLLOUT;
- while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
+ while (xsk_ring_prod__reserve(&xsk->tx, xsk->batch_size, &idx) < xsk->batch_size) {
if (use_poll) {
ret = poll(&fds, 1, POLL_TMOUT);
if (timeout) {
@@ -1269,10 +1271,10 @@ static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, b
}
}
- complete_pkts(xsk, BATCH_SIZE);
+ complete_pkts(xsk, xsk->batch_size);
}
- for (i = 0; i < BATCH_SIZE; i++) {
+ for (i = 0; i < xsk->batch_size; i++) {
struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
u32 nb_frags_left, nb_frags, bytes_written = 0;
@@ -1280,9 +1282,9 @@ static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, b
break;
nb_frags = pkt_nb_frags(umem->frame_size, pkt_stream, pkt);
- if (nb_frags > BATCH_SIZE - i) {
+ if (nb_frags > xsk->batch_size - i) {
pkt_stream_cancel(pkt_stream);
- xsk_ring_prod__cancel(&xsk->tx, BATCH_SIZE - i);
+ xsk_ring_prod__cancel(&xsk->tx, xsk->batch_size - i);
break;
}
nb_frags_left = nb_frags;
@@ -1370,7 +1372,7 @@ static int wait_for_tx_completion(struct xsk_socket_info *xsk)
return TEST_FAILURE;
}
- complete_pkts(xsk, BATCH_SIZE);
+ complete_pkts(xsk, xsk->batch_size);
}
return TEST_PASS;
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index f174df2d693f..425304e52f35 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -44,7 +44,7 @@
#define MAX_ETH_JUMBO_SIZE 9000
#define USLEEP_MAX 10000
#define SOCK_RECONF_CTR 10
-#define BATCH_SIZE 64
+#define DEFAULT_BATCH_SIZE 64
#define POLL_TMOUT 1000
#define THREAD_TMOUT 3
#define DEFAULT_PKT_CNT (4 * 1024)
@@ -91,6 +91,7 @@ struct xsk_socket_info {
struct pkt_stream *pkt_stream;
u32 outstanding_tx;
u32 rxqsize;
+ u32 batch_size;
u8 dst_mac[ETH_ALEN];
u8 src_mac[ETH_ALEN];
};