diff options
author | Daniel Borkmann | 2019-12-19 22:19:51 +0100 |
---|---|---|
committer | Alexei Starovoitov | 2019-12-19 13:39:22 -0800 |
commit | 3123d8018d4686cf193806c4e27a9853550ed895 (patch) | |
tree | 7995fcdf2db0c9bc3d6e5015b256b71c81537723 /tools/testing | |
parent | cc52d9140aa920d8d61c7f6de3fff5fea6692ea9 (diff) |
bpf: Add further test_verifier cases for record_func_key
Expand dummy prog generation such that we can easily check on return
codes and add few more test cases to make sure we keep on tracking
pruning behavior.
# ./test_verifier
[...]
#1066/p XDP pkt read, pkt_data <= pkt_meta', bad access 1 OK
#1067/p XDP pkt read, pkt_data <= pkt_meta', bad access 2 OK
Summary: 1580 PASSED, 0 SKIPPED, 0 FAILED
Also verified that JIT dump of added test cases looks good.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/df7200b6021444fd369376d227de917357285b65.1576789878.git.daniel@iogearbox.net
Diffstat (limited to 'tools/testing')
-rw-r--r-- | tools/testing/selftests/bpf/test_verifier.c | 43 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/ref_tracking.c | 6 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/runtime_jit.c | 151 |
3 files changed, 176 insertions, 24 deletions
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index d27fd929abb9..87eaa49609a0 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -408,10 +408,10 @@ static void update_map(int fd, int index) assert(!bpf_map_update_elem(fd, &index, &value, 0)); } -static int create_prog_dummy1(enum bpf_prog_type prog_type) +static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret) { struct bpf_insn prog[] = { - BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, ret), BPF_EXIT_INSN(), }; @@ -419,14 +419,15 @@ static int create_prog_dummy1(enum bpf_prog_type prog_type) ARRAY_SIZE(prog), "GPL", 0, NULL, 0); } -static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx) +static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd, + int idx, int ret) { struct bpf_insn prog[] = { BPF_MOV64_IMM(BPF_REG_3, idx), BPF_LD_MAP_FD(BPF_REG_2, mfd), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 41), + BPF_MOV64_IMM(BPF_REG_0, ret), BPF_EXIT_INSN(), }; @@ -435,10 +436,9 @@ static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx) } static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem, - int p1key) + int p1key, int p2key, int p3key) { - int p2key = 1; - int mfd, p1fd, p2fd; + int mfd, p1fd, p2fd, p3fd; mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), sizeof(int), max_elem, 0); @@ -449,23 +449,24 @@ static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem, return -1; } - p1fd = create_prog_dummy1(prog_type); - p2fd = create_prog_dummy2(prog_type, mfd, p2key); - if (p1fd < 0 || p2fd < 0) - goto out; + p1fd = create_prog_dummy_simple(prog_type, 42); + p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41); + p3fd = create_prog_dummy_simple(prog_type, 24); + if (p1fd < 0 || p2fd < 0 || p3fd < 0) + goto err; if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0) - goto out; + goto err; if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0) - goto out; + goto err; + if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) { +err: + close(mfd); + mfd = -1; + } + close(p3fd); close(p2fd); close(p1fd); - return mfd; -out: - close(p2fd); - close(p1fd); - close(mfd); - return -1; } static int create_map_in_map(void) @@ -684,7 +685,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, } if (*fixup_prog1) { - map_fds[4] = create_prog_array(prog_type, 4, 0); + map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2); do { prog[*fixup_prog1].imm = map_fds[4]; fixup_prog1++; @@ -692,7 +693,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, } if (*fixup_prog2) { - map_fds[5] = create_prog_array(prog_type, 8, 7); + map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2); do { prog[*fixup_prog2].imm = map_fds[5]; fixup_prog2++; diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c index ebcbf154c460..604b46151736 100644 --- a/tools/testing/selftests/bpf/verifier/ref_tracking.c +++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c @@ -455,7 +455,7 @@ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7), /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_3, 3), BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), @@ -478,7 +478,7 @@ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), BPF_EMIT_CALL(BPF_FUNC_sk_release), /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_3, 3), BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), @@ -497,7 +497,7 @@ BPF_SK_LOOKUP(sk_lookup_tcp), /* bpf_tail_call() */ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_3, 3), BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), diff --git a/tools/testing/selftests/bpf/verifier/runtime_jit.c b/tools/testing/selftests/bpf/verifier/runtime_jit.c index a9a8f620e71c..94c399d1faca 100644 --- a/tools/testing/selftests/bpf/verifier/runtime_jit.c +++ b/tools/testing/selftests/bpf/verifier/runtime_jit.c @@ -27,6 +27,19 @@ { "runtime/jit: tail_call within bounds, no prog", .insns = { + BPF_MOV64_IMM(BPF_REG_3, 3), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 1 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "runtime/jit: tail_call within bounds, key 2", + .insns = { BPF_MOV64_IMM(BPF_REG_3, 2), BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), @@ -35,9 +48,147 @@ }, .fixup_prog1 = { 1 }, .result = ACCEPT, + .retval = 24, +}, +{ + "runtime/jit: tail_call within bounds, key 2 / key 2, first branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 13), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5, 9 }, + .result = ACCEPT, + .retval = 24, +}, +{ + "runtime/jit: tail_call within bounds, key 2 / key 2, second branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 14), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5, 9 }, + .result = ACCEPT, + .retval = 24, +}, +{ + "runtime/jit: tail_call within bounds, key 0 / key 2, first branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 13), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5, 9 }, + .result = ACCEPT, + .retval = 24, +}, +{ + "runtime/jit: tail_call within bounds, key 0 / key 2, second branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 14), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5, 9 }, + .result = ACCEPT, + .retval = 42, +}, +{ + "runtime/jit: tail_call within bounds, different maps, first branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 13), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5 }, + .fixup_prog2 = { 9 }, + .result_unpriv = REJECT, + .errstr_unpriv = "tail_call abusing map_ptr", + .result = ACCEPT, .retval = 1, }, { + "runtime/jit: tail_call within bounds, different maps, second branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 14), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5 }, + .fixup_prog2 = { 9 }, + .result_unpriv = REJECT, + .errstr_unpriv = "tail_call abusing map_ptr", + .result = ACCEPT, + .retval = 42, +}, +{ "runtime/jit: tail_call out of bounds", .insns = { BPF_MOV64_IMM(BPF_REG_3, 256), |