diff options
author | David S. Miller | 2018-12-15 10:58:32 -0800 |
---|---|---|
committer | David S. Miller | 2018-12-15 10:58:32 -0800 |
commit | 10589a568f2ec531975504c98c1bed88c233a63d (patch) | |
tree | f2e986932c85bdbb5c1531fdd890b9f05664cf50 /tools | |
parent | 143ece654f9f5b37bedea252a990be37e48ae3a5 (diff) | |
parent | 7640ead939247e91e84b7ec6ec001f30193cc7df (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says:
====================
pull-request: bpf 2018-12-15
The following pull-request contains BPF updates for your *net* tree.
The main changes are:
1) fix liveness propagation of callee saved registers, from Jakub.
2) fix overflow in bpf_jit_limit knob, from Daniel.
3) bpf_flow_dissector api fix, from Stanislav.
4) bpf_perf_event api fix on powerpc, from Sandipan.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/testing/selftests/bpf/bpf_flow.c | 36 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_verifier.c | 38 |
2 files changed, 50 insertions, 24 deletions
diff --git a/tools/testing/selftests/bpf/bpf_flow.c b/tools/testing/selftests/bpf/bpf_flow.c index 107350a7821d..df9d32fd2055 100644 --- a/tools/testing/selftests/bpf/bpf_flow.c +++ b/tools/testing/selftests/bpf/bpf_flow.c @@ -70,18 +70,18 @@ static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb, { void *data_end = (void *)(long)skb->data_end; void *data = (void *)(long)skb->data; - __u16 nhoff = skb->flow_keys->nhoff; + __u16 thoff = skb->flow_keys->thoff; __u8 *hdr; /* Verifies this variable offset does not overflow */ - if (nhoff > (USHRT_MAX - hdr_size)) + if (thoff > (USHRT_MAX - hdr_size)) return NULL; - hdr = data + nhoff; + hdr = data + thoff; if (hdr + hdr_size <= data_end) return hdr; - if (bpf_skb_load_bytes(skb, nhoff, buffer, hdr_size)) + if (bpf_skb_load_bytes(skb, thoff, buffer, hdr_size)) return NULL; return buffer; @@ -158,13 +158,13 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) /* Only inspect standard GRE packets with version 0 */ return BPF_OK; - keys->nhoff += sizeof(*gre); /* Step over GRE Flags and Proto */ + keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */ if (GRE_IS_CSUM(gre->flags)) - keys->nhoff += 4; /* Step over chksum and Padding */ + keys->thoff += 4; /* Step over chksum and Padding */ if (GRE_IS_KEY(gre->flags)) - keys->nhoff += 4; /* Step over key */ + keys->thoff += 4; /* Step over key */ if (GRE_IS_SEQ(gre->flags)) - keys->nhoff += 4; /* Step over sequence number */ + keys->thoff += 4; /* Step over sequence number */ keys->is_encap = true; @@ -174,7 +174,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) if (!eth) return BPF_DROP; - keys->nhoff += sizeof(*eth); + keys->thoff += sizeof(*eth); return parse_eth_proto(skb, eth->h_proto); } else { @@ -191,7 +191,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) if ((__u8 *)tcp + (tcp->doff << 2) > data_end) return BPF_DROP; - keys->thoff = keys->nhoff; keys->sport = tcp->source; keys->dport = tcp->dest; return BPF_OK; @@ -201,7 +200,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) if (!udp) return BPF_DROP; - keys->thoff = keys->nhoff; keys->sport = udp->source; keys->dport = udp->dest; return BPF_OK; @@ -252,8 +250,8 @@ PROG(IP)(struct __sk_buff *skb) keys->ipv4_src = iph->saddr; keys->ipv4_dst = iph->daddr; - keys->nhoff += iph->ihl << 2; - if (data + keys->nhoff > data_end) + keys->thoff += iph->ihl << 2; + if (data + keys->thoff > data_end) return BPF_DROP; if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) { @@ -285,7 +283,7 @@ PROG(IPV6)(struct __sk_buff *skb) keys->addr_proto = ETH_P_IPV6; memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr)); - keys->nhoff += sizeof(struct ipv6hdr); + keys->thoff += sizeof(struct ipv6hdr); return parse_ipv6_proto(skb, ip6h->nexthdr); } @@ -301,7 +299,7 @@ PROG(IPV6OP)(struct __sk_buff *skb) /* hlen is in 8-octets and does not include the first 8 bytes * of the header */ - skb->flow_keys->nhoff += (1 + ip6h->hdrlen) << 3; + skb->flow_keys->thoff += (1 + ip6h->hdrlen) << 3; return parse_ipv6_proto(skb, ip6h->nexthdr); } @@ -315,7 +313,7 @@ PROG(IPV6FR)(struct __sk_buff *skb) if (!fragh) return BPF_DROP; - keys->nhoff += sizeof(*fragh); + keys->thoff += sizeof(*fragh); keys->is_frag = true; if (!(fragh->frag_off & bpf_htons(IP6_OFFSET))) keys->is_first_frag = true; @@ -341,7 +339,7 @@ PROG(VLAN)(struct __sk_buff *skb) __be16 proto; /* Peek back to see if single or double-tagging */ - if (bpf_skb_load_bytes(skb, keys->nhoff - sizeof(proto), &proto, + if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto, sizeof(proto))) return BPF_DROP; @@ -354,14 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb) if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) return BPF_DROP; - keys->nhoff += sizeof(*vlan); + keys->thoff += sizeof(*vlan); } vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); if (!vlan) return BPF_DROP; - keys->nhoff += sizeof(*vlan); + keys->thoff += sizeof(*vlan); /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index df6f751cc1e8..f8eac4a544f4 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -13915,6 +13915,34 @@ static struct bpf_test tests[] = { .result_unpriv = REJECT, .result = ACCEPT, }, + { + "calls: cross frame pruning", + .insns = { + /* r8 = !!random(); + * call pruner() + * if (r8) + * do something bad; + */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_prandom_u32), + BPF_MOV64_IMM(BPF_REG_8, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_MOV64_IMM(BPF_REG_8, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .errstr = "!read_ok", + .result = REJECT, + }, }; static int probe_filter_length(const struct bpf_insn *fp) @@ -13940,7 +13968,7 @@ static int create_map(uint32_t type, uint32_t size_key, return fd; } -static int create_prog_dummy1(enum bpf_map_type prog_type) +static int create_prog_dummy1(enum bpf_prog_type prog_type) { struct bpf_insn prog[] = { BPF_MOV64_IMM(BPF_REG_0, 42), @@ -13951,7 +13979,7 @@ static int create_prog_dummy1(enum bpf_map_type prog_type) ARRAY_SIZE(prog), "GPL", 0, NULL, 0); } -static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx) +static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx) { struct bpf_insn prog[] = { BPF_MOV64_IMM(BPF_REG_3, idx), @@ -13966,7 +13994,7 @@ static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx) ARRAY_SIZE(prog), "GPL", 0, NULL, 0); } -static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem, +static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem, int p1key) { int p2key = 1; @@ -14037,7 +14065,7 @@ static int create_cgroup_storage(bool percpu) static char bpf_vlog[UINT_MAX >> 8]; -static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type, +static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, struct bpf_insn *prog, int *map_fds) { int *fixup_map_hash_8b = test->fixup_map_hash_8b; @@ -14166,7 +14194,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type, do { prog[*fixup_map_stacktrace].imm = map_fds[12]; fixup_map_stacktrace++; - } while (fixup_map_stacktrace); + } while (*fixup_map_stacktrace); } } |