aboutsummaryrefslogtreecommitdiff
path: root/tools/lib
diff options
context:
space:
mode:
authorDavid S. Miller2020-01-23 08:10:16 +0100
committerDavid S. Miller2020-01-23 08:10:16 +0100
commit954b3c4397792c8614aa4aaf25030ae87ece8307 (patch)
treeb2648c3d97fe2332863d7baac069c23cab0addc4 /tools/lib
parentc5d19a6ecfce72d0352191d75f03eea4748a8c45 (diff)
parent85cc12f85138f2ce3edf24833edd2179690306db (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-01-22 The following pull-request contains BPF updates for your *net-next* tree. We've added 92 non-merge commits during the last 16 day(s) which contain a total of 320 files changed, 7532 insertions(+), 1448 deletions(-). The main changes are: 1) function by function verification and program extensions from Alexei. 2) massive cleanup of selftests/bpf from Toke and Andrii. 3) batched bpf map operations from Brian and Yonghong. 4) tcp congestion control in bpf from Martin. 5) bulking for non-map xdp_redirect form Toke. 6) bpf_send_signal_thread helper from Yonghong. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/lib')
-rw-r--r--tools/lib/bpf/Makefile11
-rw-r--r--tools/lib/bpf/bpf.c72
-rw-r--r--tools/lib/bpf/bpf.h27
-rw-r--r--tools/lib/bpf/bpf_prog_linfo.c3
-rw-r--r--tools/lib/bpf/btf.c105
-rw-r--r--tools/lib/bpf/btf.h2
-rw-r--r--tools/lib/bpf/btf_dump.c4
-rw-r--r--tools/lib/bpf/hashmap.c3
-rw-r--r--tools/lib/bpf/libbpf.c952
-rw-r--r--tools/lib/bpf/libbpf.h8
-rw-r--r--tools/lib/bpf/libbpf.map11
-rw-r--r--tools/lib/bpf/libbpf_errno.c3
-rw-r--r--tools/lib/bpf/libbpf_probes.c27
-rw-r--r--tools/lib/bpf/netlink.c3
-rw-r--r--tools/lib/bpf/nlattr.c3
-rw-r--r--tools/lib/bpf/str_error.c3
-rw-r--r--tools/lib/bpf/xsk.c3
17 files changed, 1064 insertions, 176 deletions
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index d87830e7ea63..aee7f1a83c77 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -183,7 +183,7 @@ $(BPF_IN_STATIC): force elfdep zdep bpfdep $(BPF_HELPER_DEFS)
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
- $(Q)$(srctree)/scripts/bpf_helpers_doc.py --header \
+ $(QUIET_GEN)$(srctree)/scripts/bpf_helpers_doc.py --header \
--file $(srctree)/tools/include/uapi/linux/bpf.h > $(BPF_HELPER_DEFS)
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
@@ -273,10 +273,11 @@ config-clean:
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
clean:
- $(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
- *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
- *.pc LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \
- $(SHARED_OBJDIR) $(STATIC_OBJDIR)
+ $(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
+ *~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \
+ $(SHARED_OBJDIR) $(STATIC_OBJDIR) \
+ $(addprefix $(OUTPUT), \
+ *.o *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) *.pc)
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index a787d53699c8..c6dafe563176 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -32,6 +32,9 @@
#include "libbpf.h"
#include "libbpf_internal.h"
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
/*
* When building perf, unistd.h is overridden. __NR_bpf is
* required to be defined explicitly.
@@ -95,7 +98,11 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
attr.btf_key_type_id = create_attr->btf_key_type_id;
attr.btf_value_type_id = create_attr->btf_value_type_id;
attr.map_ifindex = create_attr->map_ifindex;
- attr.inner_map_fd = create_attr->inner_map_fd;
+ if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS)
+ attr.btf_vmlinux_value_type_id =
+ create_attr->btf_vmlinux_value_type_id;
+ else
+ attr.inner_map_fd = create_attr->inner_map_fd;
return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
}
@@ -228,7 +235,10 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
- if (attr.prog_type == BPF_PROG_TYPE_TRACING) {
+ if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS) {
+ attr.attach_btf_id = load_attr->attach_btf_id;
+ } else if (attr.prog_type == BPF_PROG_TYPE_TRACING ||
+ attr.prog_type == BPF_PROG_TYPE_EXT) {
attr.attach_btf_id = load_attr->attach_btf_id;
attr.attach_prog_fd = load_attr->attach_prog_fd;
} else {
@@ -443,6 +453,64 @@ int bpf_map_freeze(int fd)
return sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
}
+static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
+ void *out_batch, void *keys, void *values,
+ __u32 *count,
+ const struct bpf_map_batch_opts *opts)
+{
+ union bpf_attr attr;
+ int ret;
+
+ if (!OPTS_VALID(opts, bpf_map_batch_opts))
+ return -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.batch.map_fd = fd;
+ attr.batch.in_batch = ptr_to_u64(in_batch);
+ attr.batch.out_batch = ptr_to_u64(out_batch);
+ attr.batch.keys = ptr_to_u64(keys);
+ attr.batch.values = ptr_to_u64(values);
+ attr.batch.count = *count;
+ attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0);
+ attr.batch.flags = OPTS_GET(opts, flags, 0);
+
+ ret = sys_bpf(cmd, &attr, sizeof(attr));
+ *count = attr.batch.count;
+
+ return ret;
+}
+
+int bpf_map_delete_batch(int fd, void *keys, __u32 *count,
+ const struct bpf_map_batch_opts *opts)
+{
+ return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
+ NULL, keys, NULL, count, opts);
+}
+
+int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
+ void *values, __u32 *count,
+ const struct bpf_map_batch_opts *opts)
+{
+ return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch,
+ out_batch, keys, values, count, opts);
+}
+
+int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
+ void *keys, void *values, __u32 *count,
+ const struct bpf_map_batch_opts *opts)
+{
+ return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH,
+ fd, in_batch, out_batch, keys, values,
+ count, opts);
+}
+
+int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count,
+ const struct bpf_map_batch_opts *opts)
+{
+ return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
+ keys, values, count, opts);
+}
+
int bpf_obj_pin(int fd, const char *pathname)
{
union bpf_attr attr;
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index f0ab8519986e..b976e77316cc 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -46,7 +46,10 @@ struct bpf_create_map_attr {
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 map_ifindex;
- __u32 inner_map_fd;
+ union {
+ __u32 inner_map_fd;
+ __u32 btf_vmlinux_value_type_id;
+ };
};
LIBBPF_API int
@@ -124,6 +127,28 @@ LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key);
LIBBPF_API int bpf_map_freeze(int fd);
+
+struct bpf_map_batch_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u64 elem_flags;
+ __u64 flags;
+};
+#define bpf_map_batch_opts__last_field flags
+
+LIBBPF_API int bpf_map_delete_batch(int fd, void *keys,
+ __u32 *count,
+ const struct bpf_map_batch_opts *opts);
+LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch,
+ void *keys, void *values, __u32 *count,
+ const struct bpf_map_batch_opts *opts);
+LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch,
+ void *out_batch, void *keys,
+ void *values, __u32 *count,
+ const struct bpf_map_batch_opts *opts);
+LIBBPF_API int bpf_map_update_batch(int fd, void *keys, void *values,
+ __u32 *count,
+ const struct bpf_map_batch_opts *opts);
+
LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
LIBBPF_API int bpf_obj_get(const char *pathname);
diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c
index 3ed1a27b5f7c..bafca49cb1e6 100644
--- a/tools/lib/bpf/bpf_prog_linfo.c
+++ b/tools/lib/bpf/bpf_prog_linfo.c
@@ -8,6 +8,9 @@
#include "libbpf.h"
#include "libbpf_internal.h"
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
struct bpf_prog_linfo {
void *raw_linfo;
void *raw_jited_linfo;
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 5f04f56e1eb6..3d1c25fc97ae 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -8,6 +8,10 @@
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
+#include <sys/utsname.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/btf.h>
#include <gelf.h>
@@ -17,8 +21,11 @@
#include "libbpf_internal.h"
#include "hashmap.h"
-#define BTF_MAX_NR_TYPES 0x7fffffff
-#define BTF_MAX_STR_OFFSET 0x7fffffff
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
+#define BTF_MAX_NR_TYPES 0x7fffffffU
+#define BTF_MAX_STR_OFFSET 0x7fffffffU
static struct btf_type btf_void;
@@ -50,7 +57,7 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
if (btf->types_size == BTF_MAX_NR_TYPES)
return -E2BIG;
- expand_by = max(btf->types_size >> 2, 16);
+ expand_by = max(btf->types_size >> 2, 16U);
new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by);
new_types = realloc(btf->types, sizeof(*new_types) * new_size);
@@ -286,7 +293,7 @@ int btf__align_of(const struct btf *btf, __u32 id)
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
- return min(sizeof(void *), t->size);
+ return min(sizeof(void *), (size_t)t->size);
case BTF_KIND_PTR:
return sizeof(void *);
case BTF_KIND_TYPEDEF:
@@ -1398,7 +1405,7 @@ static int btf_dedup_hypot_map_add(struct btf_dedup *d,
if (d->hypot_cnt == d->hypot_cap) {
__u32 *new_list;
- d->hypot_cap += max(16, d->hypot_cap / 2);
+ d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
if (!new_list)
return -ENOMEM;
@@ -1694,7 +1701,7 @@ static int btf_dedup_strings(struct btf_dedup *d)
if (strs.cnt + 1 > strs.cap) {
struct btf_str_ptr *new_ptrs;
- strs.cap += max(strs.cnt / 2, 16);
+ strs.cap += max(strs.cnt / 2, 16U);
new_ptrs = realloc(strs.ptrs,
sizeof(strs.ptrs[0]) * strs.cap);
if (!new_ptrs) {
@@ -2928,3 +2935,89 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
}
return 0;
}
+
+static struct btf *btf_load_raw(const char *path)
+{
+ struct btf *btf;
+ size_t read_cnt;
+ struct stat st;
+ void *data;
+ FILE *f;
+
+ if (stat(path, &st))
+ return ERR_PTR(-errno);
+
+ data = malloc(st.st_size);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ f = fopen(path, "rb");
+ if (!f) {
+ btf = ERR_PTR(-errno);
+ goto cleanup;
+ }
+
+ read_cnt = fread(data, 1, st.st_size, f);
+ fclose(f);
+ if (read_cnt < st.st_size) {
+ btf = ERR_PTR(-EBADF);
+ goto cleanup;
+ }
+
+ btf = btf__new(data, read_cnt);
+
+cleanup:
+ free(data);
+ return btf;
+}
+
+/*
+ * Probe few well-known locations for vmlinux kernel image and try to load BTF
+ * data out of it to use for target BTF.
+ */
+struct btf *libbpf_find_kernel_btf(void)
+{
+ struct {
+ const char *path_fmt;
+ bool raw_btf;
+ } locations[] = {
+ /* try canonical vmlinux BTF through sysfs first */
+ { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
+ /* fall back to trying to find vmlinux ELF on disk otherwise */
+ { "/boot/vmlinux-%1$s" },
+ { "/lib/modules/%1$s/vmlinux-%1$s" },
+ { "/lib/modules/%1$s/build/vmlinux" },
+ { "/usr/lib/modules/%1$s/kernel/vmlinux" },
+ { "/usr/lib/debug/boot/vmlinux-%1$s" },
+ { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
+ { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
+ };
+ char path[PATH_MAX + 1];
+ struct utsname buf;
+ struct btf *btf;
+ int i;
+
+ uname(&buf);
+
+ for (i = 0; i < ARRAY_SIZE(locations); i++) {
+ snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
+
+ if (access(path, R_OK))
+ continue;
+
+ if (locations[i].raw_btf)
+ btf = btf_load_raw(path);
+ else
+ btf = btf__parse_elf(path, NULL);
+
+ pr_debug("loading kernel BTF '%s': %ld\n",
+ path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
+ if (IS_ERR(btf))
+ continue;
+
+ return btf;
+ }
+
+ pr_warn("failed to find valid kernel BTF\n");
+ return ERR_PTR(-ESRCH);
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 8d73f7f5551f..70c1b7ec2bd0 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -102,6 +102,8 @@ LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf,
LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
+LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
+
struct btf_dedup_opts {
unsigned int dedup_table_size;
bool dont_resolve_fwds;
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index e95f7710f210..bd09ed1710f1 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -18,6 +18,9 @@
#include "libbpf.h"
#include "libbpf_internal.h"
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
@@ -139,6 +142,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
if (IS_ERR(d->type_names)) {
err = PTR_ERR(d->type_names);
d->type_names = NULL;
+ goto err;
}
d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
if (IS_ERR(d->ident_names)) {
diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c
index 6122272943e6..54c30c802070 100644
--- a/tools/lib/bpf/hashmap.c
+++ b/tools/lib/bpf/hashmap.c
@@ -12,6 +12,9 @@
#include <linux/err.h>
#include "hashmap.h"
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
/* start with 4 buckets */
#define HASHMAP_MIN_CAP_BITS 2
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 7513165b104f..ae34b681ae82 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -55,6 +55,9 @@
#include "libbpf_internal.h"
#include "hashmap.h"
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
#ifndef EM_BPF
#define EM_BPF 247
#endif
@@ -70,6 +73,12 @@
#define __printf(a, b) __attribute__((format(printf, a, b)))
+static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
+static struct bpf_program *bpf_object__find_prog_by_idx(struct bpf_object *obj,
+ int idx);
+static const struct btf_type *
+skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
+
static int __base_pr(enum libbpf_print_level level, const char *format,
va_list args)
{
@@ -166,6 +175,8 @@ struct bpf_capabilities {
__u32 btf_datasec:1;
/* BPF_F_MMAPABLE is supported for arrays */
__u32 array_mmap:1;
+ /* BTF_FUNC_GLOBAL is supported */
+ __u32 btf_func_global:1;
};
enum reloc_type {
@@ -229,10 +240,32 @@ struct bpf_program {
__u32 prog_flags;
};
+struct bpf_struct_ops {
+ const char *tname;
+ const struct btf_type *type;
+ struct bpf_program **progs;
+ __u32 *kern_func_off;
+ /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
+ void *data;
+ /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
+ * btf_vmlinux's format.
+ * struct bpf_struct_ops_tcp_congestion_ops {
+ * [... some other kernel fields ...]
+ * struct tcp_congestion_ops data;
+ * }
+ * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
+ * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
+ * from "data".
+ */
+ void *kern_vdata;
+ __u32 type_id;
+};
+
#define DATA_SEC ".data"
#define BSS_SEC ".bss"
#define RODATA_SEC ".rodata"
#define KCONFIG_SEC ".kconfig"
+#define STRUCT_OPS_SEC ".struct_ops"
enum libbpf_map_type {
LIBBPF_MAP_UNSPEC,
@@ -259,10 +292,12 @@ struct bpf_map {
struct bpf_map_def def;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
+ __u32 btf_vmlinux_value_type_id;
void *priv;
bpf_map_clear_priv_t clear_priv;
enum libbpf_map_type libbpf_type;
void *mmaped;
+ struct bpf_struct_ops *st_ops;
char *pin_path;
bool pinned;
bool reused;
@@ -326,6 +361,7 @@ struct bpf_object {
Elf_Data *data;
Elf_Data *rodata;
Elf_Data *bss;
+ Elf_Data *st_ops_data;
size_t strtabidx;
struct {
GElf_Shdr shdr;
@@ -339,6 +375,7 @@ struct bpf_object {
int data_shndx;
int rodata_shndx;
int bss_shndx;
+ int st_ops_shndx;
} efile;
/*
* All loaded bpf_object is linked in a list, which is
@@ -348,6 +385,10 @@ struct bpf_object {
struct list_head list;
struct btf *btf;
+ /* Parse and load BTF vmlinux if any of the programs in the object need
+ * it at load time.
+ */
+ struct btf *btf_vmlinux;
struct btf_ext *btf_ext;
void *priv;
@@ -566,6 +607,348 @@ static __u32 get_kernel_version(void)
return KERNEL_VERSION(major, minor, patch);
}
+static const struct btf_member *
+find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
+{
+ struct btf_member *m;
+ int i;
+
+ for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
+ if (btf_member_bit_offset(t, i) == bit_offset)
+ return m;
+ }
+
+ return NULL;
+}
+
+static const struct btf_member *
+find_member_by_name(const struct btf *btf, const struct btf_type *t,
+ const char *name)
+{
+ struct btf_member *m;
+ int i;
+
+ for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
+ if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
+ return m;
+ }
+
+ return NULL;
+}
+
+#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
+static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
+ const char *name, __u32 kind);
+
+static int
+find_struct_ops_kern_types(const struct btf *btf, const char *tname,
+ const struct btf_type **type, __u32 *type_id,
+ const struct btf_type **vtype, __u32 *vtype_id,
+ const struct btf_member **data_member)
+{
+ const struct btf_type *kern_type, *kern_vtype;
+ const struct btf_member *kern_data_member;
+ __s32 kern_vtype_id, kern_type_id;
+ __u32 i;
+
+ kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
+ if (kern_type_id < 0) {
+ pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
+ tname);
+ return kern_type_id;
+ }
+ kern_type = btf__type_by_id(btf, kern_type_id);
+
+ /* Find the corresponding "map_value" type that will be used
+ * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
+ * find "struct bpf_struct_ops_tcp_congestion_ops" from the
+ * btf_vmlinux.
+ */
+ kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
+ tname, BTF_KIND_STRUCT);
+ if (kern_vtype_id < 0) {
+ pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
+ STRUCT_OPS_VALUE_PREFIX, tname);
+ return kern_vtype_id;
+ }
+ kern_vtype = btf__type_by_id(btf, kern_vtype_id);
+
+ /* Find "struct tcp_congestion_ops" from
+ * struct bpf_struct_ops_tcp_congestion_ops {
+ * [ ... ]
+ * struct tcp_congestion_ops data;
+ * }
+ */
+ kern_data_member = btf_members(kern_vtype);
+ for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
+ if (kern_data_member->type == kern_type_id)
+ break;
+ }
+ if (i == btf_vlen(kern_vtype)) {
+ pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
+ tname, STRUCT_OPS_VALUE_PREFIX, tname);
+ return -EINVAL;
+ }
+
+ *type = kern_type;
+ *type_id = kern_type_id;
+ *vtype = kern_vtype;
+ *vtype_id = kern_vtype_id;
+ *data_member = kern_data_member;
+
+ return 0;
+}
+
+static bool bpf_map__is_struct_ops(const struct bpf_map *map)
+{
+ return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
+}
+
+/* Init the map's fields that depend on kern_btf */
+static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf *kern_btf)
+{
+ const struct btf_member *member, *kern_member, *kern_data_member;
+ const struct btf_type *type, *kern_type, *kern_vtype;
+ __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
+ struct bpf_struct_ops *st_ops;
+ void *data, *kern_data;
+ const char *tname;
+ int err;
+
+ st_ops = map->st_ops;
+ type = st_ops->type;
+ tname = st_ops->tname;
+ err = find_struct_ops_kern_types(kern_btf, tname,
+ &kern_type, &kern_type_id,
+ &kern_vtype, &kern_vtype_id,
+ &kern_data_member);
+ if (err)
+ return err;
+
+ pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
+ map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
+
+ map->def.value_size = kern_vtype->size;
+ map->btf_vmlinux_value_type_id = kern_vtype_id;
+
+ st_ops->kern_vdata = calloc(1, kern_vtype->size);
+ if (!st_ops->kern_vdata)
+ return -ENOMEM;
+
+ data = st_ops->data;
+ kern_data_off = kern_data_member->offset / 8;
+ kern_data = st_ops->kern_vdata + kern_data_off;
+
+ member = btf_members(type);
+ for (i = 0; i < btf_vlen(type); i++, member++) {
+ const struct btf_type *mtype, *kern_mtype;
+ __u32 mtype_id, kern_mtype_id;
+ void *mdata, *kern_mdata;
+ __s64 msize, kern_msize;
+ __u32 moff, kern_moff;
+ __u32 kern_member_idx;
+ const char *mname;
+
+ mname = btf__name_by_offset(btf, member->name_off);
+ kern_member = find_member_by_name(kern_btf, kern_type, mname);
+ if (!kern_member) {
+ pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
+ map->name, mname);
+ return -ENOTSUP;
+ }
+
+ kern_member_idx = kern_member - btf_members(kern_type);
+ if (btf_member_bitfield_size(type, i) ||
+ btf_member_bitfield_size(kern_type, kern_member_idx)) {
+ pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
+ map->name, mname);
+ return -ENOTSUP;
+ }
+
+ moff = member->offset / 8;
+ kern_moff = kern_member->offset / 8;
+
+ mdata = data + moff;
+ kern_mdata = kern_data + kern_moff;
+
+ mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
+ kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
+ &kern_mtype_id);
+ if (BTF_INFO_KIND(mtype->info) !=
+ BTF_INFO_KIND(kern_mtype->info)) {
+ pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
+ map->name, mname, BTF_INFO_KIND(mtype->info),
+ BTF_INFO_KIND(kern_mtype->info));
+ return -ENOTSUP;
+ }
+
+ if (btf_is_ptr(mtype)) {
+ struct bpf_program *prog;
+
+ mtype = skip_mods_and_typedefs(btf, mtype->type, &mtype_id);
+ kern_mtype = skip_mods_and_typedefs(kern_btf,
+ kern_mtype->type,
+ &kern_mtype_id);
+ if (!btf_is_func_proto(mtype) ||
+ !btf_is_func_proto(kern_mtype)) {
+ pr_warn("struct_ops init_kern %s: non func ptr %s is not supported\n",
+ map->name, mname);
+ return -ENOTSUP;
+ }
+
+ prog = st_ops->progs[i];
+ if (!prog) {
+ pr_debug("struct_ops init_kern %s: func ptr %s is not set\n",
+ map->name, mname);
+ continue;
+ }
+
+ prog->attach_btf_id = kern_type_id;
+ prog->expected_attach_type = kern_member_idx;
+
+ st_ops->kern_func_off[i] = kern_data_off + kern_moff;
+
+ pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
+ map->name, mname, prog->name, moff,
+ kern_moff);
+
+ continue;
+ }
+
+ msize = btf__resolve_size(btf, mtype_id);
+ kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
+ if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
+ pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
+ map->name, mname, (ssize_t)msize,
+ (ssize_t)kern_msize);
+ return -ENOTSUP;
+ }
+
+ pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
+ map->name, mname, (unsigned int)msize,
+ moff, kern_moff);
+ memcpy(kern_mdata, mdata, msize);
+ }
+
+ return 0;
+}
+
+static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
+{
+ struct bpf_map *map;
+ size_t i;
+ int err;
+
+ for (i = 0; i < obj->nr_maps; i++) {
+ map = &obj->maps[i];
+
+ if (!bpf_map__is_struct_ops(map))
+ continue;
+
+ err = bpf_map__init_kern_struct_ops(map, obj->btf,
+ obj->btf_vmlinux);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
+{
+ const struct btf_type *type, *datasec;
+ const struct btf_var_secinfo *vsi;
+ struct bpf_struct_ops *st_ops;
+ const char *tname, *var_name;
+ __s32 type_id, datasec_id;
+ const struct btf *btf;
+ struct bpf_map *map;
+ __u32 i;
+
+ if (obj->efile.st_ops_shndx == -1)
+ return 0;
+
+ btf = obj->btf;
+ datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
+ BTF_KIND_DATASEC);
+ if (datasec_id < 0) {
+ pr_warn("struct_ops init: DATASEC %s not found\n",
+ STRUCT_OPS_SEC);
+ return -EINVAL;
+ }
+
+ datasec = btf__type_by_id(btf, datasec_id);
+ vsi = btf_var_secinfos(datasec);
+ for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
+ type = btf__type_by_id(obj->btf, vsi->type);
+ var_name = btf__name_by_offset(obj->btf, type->name_off);
+
+ type_id = btf__resolve_type(obj->btf, vsi->type);
+ if (type_id < 0) {
+ pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
+ vsi->type, STRUCT_OPS_SEC);
+ return -EINVAL;
+ }
+
+ type = btf__type_by_id(obj->btf, type_id);
+ tname = btf__name_by_offset(obj->btf, type->name_off);
+ if (!tname[0]) {
+ pr_warn("struct_ops init: anonymous type is not supported\n");
+ return -ENOTSUP;
+ }
+ if (!btf_is_struct(type)) {
+ pr_warn("struct_ops init: %s is not a struct\n", tname);
+ return -EINVAL;
+ }
+
+ map = bpf_object__add_map(obj);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ map->sec_idx = obj->efile.st_ops_shndx;
+ map->sec_offset = vsi->offset;
+ map->name = strdup(var_name);
+ if (!map->name)
+ return -ENOMEM;
+
+ map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
+ map->def.key_size = sizeof(int);
+ map->def.value_size = type->size;
+ map->def.max_entries = 1;
+
+ map->st_ops = calloc(1, sizeof(*map->st_ops));
+ if (!map->st_ops)
+ return -ENOMEM;
+ st_ops = map->st_ops;
+ st_ops->data = malloc(type->size);
+ st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
+ st_ops->kern_func_off = malloc(btf_vlen(type) *
+ sizeof(*st_ops->kern_func_off));
+ if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
+ return -ENOMEM;
+
+ if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
+ pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
+ var_name, STRUCT_OPS_SEC);
+ return -EINVAL;
+ }
+
+ memcpy(st_ops->data,
+ obj->efile.st_ops_data->d_buf + vsi->offset,
+ type->size);
+ st_ops->tname = tname;
+ st_ops->type = type;
+ st_ops->type_id = type_id;
+
+ pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
+ tname, type_id, var_name, vsi->offset);
+ }
+
+ return 0;
+}
+
static struct bpf_object *bpf_object__new(const char *path,
const void *obj_buf,
size_t obj_buf_sz,
@@ -607,6 +990,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.data_shndx = -1;
obj->efile.rodata_shndx = -1;
obj->efile.bss_shndx = -1;
+ obj->efile.st_ops_shndx = -1;
obj->kconfig_map_idx = -1;
obj->kern_version = get_kernel_version();
@@ -630,6 +1014,7 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
obj->efile.data = NULL;
obj->efile.rodata = NULL;
obj->efile.bss = NULL;
+ obj->efile.st_ops_data = NULL;
zfree(&obj->efile.reloc_sects);
obj->efile.nr_reloc_sects = 0;
@@ -735,16 +1120,6 @@ bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
return 0;
}
-static int compare_bpf_map(const void *_a, const void *_b)
-{
- const struct bpf_map *a = _a;
- const struct bpf_map *b = _b;
-
- if (a->sec_idx != b->sec_idx)
- return a->sec_idx - b->sec_idx;
- return a->sec_offset - b->sec_offset;
-}
-
static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
{
if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
@@ -815,6 +1190,9 @@ int bpf_object__section_size(const struct bpf_object *obj, const char *name,
} else if (!strcmp(name, RODATA_SEC)) {
if (obj->efile.rodata)
*size = obj->efile.rodata->d_size;
+ } else if (!strcmp(name, STRUCT_OPS_SEC)) {
+ if (obj->efile.st_ops_data)
+ *size = obj->efile.st_ops_data->d_size;
} else {
ret = bpf_object_search_section_size(obj, name, &d_size);
if (!ret)
@@ -898,7 +1276,7 @@ static size_t bpf_map_mmap_sz(const struct bpf_map *map)
long page_sz = sysconf(_SC_PAGE_SIZE);
size_t map_sz;
- map_sz = roundup(map->def.value_size, 8) * map->def.max_entries;
+ map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
map_sz = roundup(map_sz, page_sz);
return map_sz;
}
@@ -1440,6 +1818,20 @@ skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
return t;
}
+static const struct btf_type *
+resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
+{
+ const struct btf_type *t;
+
+ t = skip_mods_and_typedefs(btf, id, NULL);
+ if (!btf_is_ptr(t))
+ return NULL;
+
+ t = skip_mods_and_typedefs(btf, t->type, res_id);
+
+ return btf_is_func_proto(t) ? t : NULL;
+}
+
/*
* Fetch integer attribute of BTF map definition. Such attributes are
* represented using a pointer to an array, in which dimensionality of array
@@ -1787,13 +2179,10 @@ static int bpf_object__init_maps(struct bpf_object *obj,
err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
err = err ?: bpf_object__init_global_data_maps(obj);
err = err ?: bpf_object__init_kconfig_map(obj);
+ err = err ?: bpf_object__init_struct_ops_maps(obj);
if (err)
return err;
- if (obj->nr_maps) {
- qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
- compare_bpf_map);
- }
return 0;
}
@@ -1817,13 +2206,14 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
static void bpf_object__sanitize_btf(struct bpf_object *obj)
{
+ bool has_func_global = obj->caps.btf_func_global;
bool has_datasec = obj->caps.btf_datasec;
bool has_func = obj->caps.btf_func;
struct btf *btf = obj->btf;
struct btf_type *t;
int i, j, vlen;
- if (!obj->btf || (has_func && has_datasec))
+ if (!obj->btf || (has_func && has_datasec && has_func_global))
return;
for (i = 1; i <= btf__get_nr_types(btf); i++) {
@@ -1871,6 +2261,9 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
} else if (!has_func && btf_is_func(t)) {
/* replace FUNC with TYPEDEF */
t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
+ } else if (!has_func_global && btf_is_func(t)) {
+ /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
+ t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
}
}
}
@@ -1889,23 +2282,26 @@ static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
{
return obj->efile.btf_maps_shndx >= 0 ||
- obj->nr_extern > 0;
+ obj->efile.st_ops_shndx >= 0 ||
+ obj->nr_extern > 0;
}
static int bpf_object__init_btf(struct bpf_object *obj,
Elf_Data *btf_data,
Elf_Data *btf_ext_data)
{
- bool btf_required = bpf_object__is_btf_mandatory(obj);
- int err = 0;
+ int err = -ENOENT;
if (btf_data) {
obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
if (IS_ERR(obj->btf)) {
+ err = PTR_ERR(obj->btf);
+ obj->btf = NULL;
pr_warn("Error loading ELF section %s: %d.\n",
BTF_ELF_SEC, err);
goto out;
}
+ err = 0;
}
if (btf_ext_data) {
if (!obj->btf) {
@@ -1923,18 +2319,9 @@ static int bpf_object__init_btf(struct bpf_object *obj,
}
}
out:
- if (err || IS_ERR(obj->btf)) {
- if (btf_required)
- err = err ? : PTR_ERR(obj->btf);
- else
- err = 0;
- if (!IS_ERR_OR_NULL(obj->btf))
- btf__free(obj->btf);
- obj->btf = NULL;
- }
- if (btf_required && !obj->btf) {
+ if (err && bpf_object__is_btf_mandatory(obj)) {
pr_warn("BTF is required, but is missing or corrupted.\n");
- return err == 0 ? -ENOENT : err;
+ return err;
}
return 0;
}
@@ -1963,6 +2350,41 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
return 0;
}
+static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
+{
+ if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
+ return true;
+
+ /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
+ * also need vmlinux BTF
+ */
+ if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
+ return true;
+
+ return false;
+}
+
+static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
+{
+ struct bpf_program *prog;
+ int err;
+
+ bpf_object__for_each_program(prog, obj) {
+ if (libbpf_prog_needs_vmlinux_btf(prog)) {
+ obj->btf_vmlinux = libbpf_find_kernel_btf();
+ if (IS_ERR(obj->btf_vmlinux)) {
+ err = PTR_ERR(obj->btf_vmlinux);
+ pr_warn("Error loading vmlinux BTF: %d\n", err);
+ obj->btf_vmlinux = NULL;
+ return err;
+ }
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
{
int err = 0;
@@ -2088,6 +2510,9 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (strcmp(name, RODATA_SEC) == 0) {
obj->efile.rodata = data;
obj->efile.rodata_shndx = idx;
+ } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
+ obj->efile.st_ops_data = data;
+ obj->efile.st_ops_shndx = idx;
} else {
pr_debug("skip section(%d) %s\n", idx, name);
}
@@ -2097,7 +2522,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
int sec = sh.sh_info; /* points to other section */
/* Only do relo for section with exec instructions */
- if (!section_have_execinstr(obj, sec)) {
+ if (!section_have_execinstr(obj, sec) &&
+ strcmp(name, ".rel" STRUCT_OPS_SEC)) {
pr_debug("skip relo %s(%d) for section(%d)\n",
name, idx, sec);
continue;
@@ -2599,8 +3025,12 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
__u32 key_type_id = 0, value_type_id = 0;
int ret;
- /* if it's BTF-defined map, we don't need to search for type IDs */
- if (map->sec_idx == obj->efile.btf_maps_shndx)
+ /* if it's BTF-defined map, we don't need to search for type IDs.
+ * For struct_ops map, it does not need btf_key_type_id and
+ * btf_value_type_id.
+ */
+ if (map->sec_idx == obj->efile.btf_maps_shndx ||
+ bpf_map__is_struct_ops(map))
return 0;
if (!bpf_map__is_internal(map)) {
@@ -2804,6 +3234,32 @@ static int bpf_object__probe_btf_func(struct bpf_object *obj)
return 0;
}
+static int bpf_object__probe_btf_func_global(struct bpf_object *obj)
+{
+ static const char strs[] = "\0int\0x\0a";
+ /* static void x(int a) {} */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* FUNC_PROTO */ /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
+ BTF_PARAM_ENC(7, 1),
+ /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
+ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
+ };
+ int btf_fd;
+
+ btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs));
+ if (btf_fd >= 0) {
+ obj->caps.btf_func_global = 1;
+ close(btf_fd);
+ return 1;
+ }
+
+ return 0;
+}
+
static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
{
static const char strs[] = "\0x\0.data";
@@ -2859,6 +3315,7 @@ bpf_object__probe_caps(struct bpf_object *obj)
bpf_object__probe_name,
bpf_object__probe_global_data,
bpf_object__probe_btf_func,
+ bpf_object__probe_btf_func_global,
bpf_object__probe_btf_datasec,
bpf_object__probe_array_mmap,
};
@@ -3025,6 +3482,9 @@ bpf_object__create_maps(struct bpf_object *obj)
if (bpf_map_type__is_map_in_map(def->type) &&
map->inner_map_fd >= 0)
create_attr.inner_map_fd = map->inner_map_fd;
+ if (bpf_map__is_struct_ops(map))
+ create_attr.btf_vmlinux_value_type_id =
+ map->btf_vmlinux_value_type_id;
if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
create_attr.btf_fd = btf__fd(obj->btf);
@@ -3860,92 +4320,6 @@ static int bpf_core_reloc_insn(struct bpf_program *prog,
return 0;
}
-static struct btf *btf_load_raw(const char *path)
-{
- struct btf *btf;
- size_t read_cnt;
- struct stat st;
- void *data;
- FILE *f;
-
- if (stat(path, &st))
- return ERR_PTR(-errno);
-
- data = malloc(st.st_size);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- f = fopen(path, "rb");
- if (!f) {
- btf = ERR_PTR(-errno);
- goto cleanup;
- }
-
- read_cnt = fread(data, 1, st.st_size, f);
- fclose(f);
- if (read_cnt < st.st_size) {
- btf = ERR_PTR(-EBADF);
- goto cleanup;
- }
-
- btf = btf__new(data, read_cnt);
-
-cleanup:
- free(data);
- return btf;
-}
-
-/*
- * Probe few well-known locations for vmlinux kernel image and try to load BTF
- * data out of it to use for target BTF.
- */
-static struct btf *bpf_core_find_kernel_btf(void)
-{
- struct {
- const char *path_fmt;
- bool raw_btf;
- } locations[] = {
- /* try canonical vmlinux BTF through sysfs first */
- { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
- /* fall back to trying to find vmlinux ELF on disk otherwise */
- { "/boot/vmlinux-%1$s" },
- { "/lib/modules/%1$s/vmlinux-%1$s" },
- { "/lib/modules/%1$s/build/vmlinux" },
- { "/usr/lib/modules/%1$s/kernel/vmlinux" },
- { "/usr/lib/debug/boot/vmlinux-%1$s" },
- { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
- { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
- };
- char path[PATH_MAX + 1];
- struct utsname buf;
- struct btf *btf;
- int i;
-
- uname(&buf);
-
- for (i = 0; i < ARRAY_SIZE(locations); i++) {
- snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
-
- if (access(path, R_OK))
- continue;
-
- if (locations[i].raw_btf)
- btf = btf_load_raw(path);
- else
- btf = btf__parse_elf(path, NULL);
-
- pr_debug("loading kernel BTF '%s': %ld\n",
- path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
- if (IS_ERR(btf))
- continue;
-
- return btf;
- }
-
- pr_warn("failed to find valid kernel BTF\n");
- return ERR_PTR(-ESRCH);
-}
-
/* Output spec definition in the format:
* [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
* where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
@@ -4180,7 +4554,7 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
if (targ_btf_path)
targ_btf = btf__parse_elf(targ_btf_path, NULL);
else
- targ_btf = bpf_core_find_kernel_btf();
+ targ_btf = libbpf_find_kernel_btf();
if (IS_ERR(targ_btf)) {
pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
return PTR_ERR(targ_btf);
@@ -4252,13 +4626,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
size_t new_cnt;
int err;
- if (prog->idx == obj->efile.text_shndx) {
- pr_warn("relo in .text insn %d into off %d (insn #%d)\n",
- relo->insn_idx, relo->sym_off, relo->sym_off / 8);
- return -LIBBPF_ERRNO__RELOC;
- }
-
- if (prog->main_prog_cnt == 0) {
+ if (prog->idx != obj->efile.text_shndx && prog->main_prog_cnt == 0) {
text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
if (!text) {
pr_warn("no .text section found yet relo into text exist\n");
@@ -4288,6 +4656,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
text->insns_cnt, text->section_name,
prog->section_name);
}
+
insn = &prog->insns[relo->insn_idx];
insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx;
return 0;
@@ -4367,8 +4736,28 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
+ /* ensure .text is relocated first, as it's going to be copied as-is
+ * later for sub-program calls
+ */
+ for (i = 0; i < obj->nr_programs; i++) {
+ prog = &obj->programs[i];
+ if (prog->idx != obj->efile.text_shndx)
+ continue;
+
+ err = bpf_program__relocate(prog, obj);
+ if (err) {
+ pr_warn("failed to relocate '%s'\n", prog->section_name);
+ return err;
+ }
+ break;
+ }
+ /* now relocate everything but .text, which by now is relocated
+ * properly, so we can copy raw sub-program instructions as is safely
+ */
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
+ if (prog->idx == obj->efile.text_shndx)
+ continue;
err = bpf_program__relocate(prog, obj);
if (err) {
@@ -4379,6 +4768,10 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return 0;
}
+static int bpf_object__collect_struct_ops_map_reloc(struct bpf_object *obj,
+ GElf_Shdr *shdr,
+ Elf_Data *data);
+
static int bpf_object__collect_reloc(struct bpf_object *obj)
{
int i, err;
@@ -4399,6 +4792,15 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
return -LIBBPF_ERRNO__INTERNAL;
}
+ if (idx == obj->efile.st_ops_shndx) {
+ err = bpf_object__collect_struct_ops_map_reloc(obj,
+ shdr,
+ data);
+ if (err)
+ return err;
+ continue;
+ }
+
prog = bpf_object__find_prog_by_idx(obj, idx);
if (!prog) {
pr_warn("relocation failed: no section(%d)\n", idx);
@@ -4433,7 +4835,10 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.insns = insns;
load_attr.insns_cnt = insns_cnt;
load_attr.license = license;
- if (prog->type == BPF_PROG_TYPE_TRACING) {
+ if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) {
+ load_attr.attach_btf_id = prog->attach_btf_id;
+ } else if (prog->type == BPF_PROG_TYPE_TRACING ||
+ prog->type == BPF_PROG_TYPE_EXT) {
load_attr.attach_prog_fd = prog->attach_prog_fd;
load_attr.attach_btf_id = prog->attach_btf_id;
} else {
@@ -4508,18 +4913,15 @@ out:
return ret;
}
-static int libbpf_find_attach_btf_id(const char *name,
- enum bpf_attach_type attach_type,
- __u32 attach_prog_fd);
+static int libbpf_find_attach_btf_id(struct bpf_program *prog);
int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
{
int err = 0, fd, i, btf_id;
- if (prog->type == BPF_PROG_TYPE_TRACING) {
- btf_id = libbpf_find_attach_btf_id(prog->section_name,
- prog->expected_attach_type,
- prog->attach_prog_fd);
+ if (prog->type == BPF_PROG_TYPE_TRACING ||
+ prog->type == BPF_PROG_TYPE_EXT) {
+ btf_id = libbpf_find_attach_btf_id(prog);
if (btf_id <= 0)
return btf_id;
prog->attach_btf_id = btf_id;
@@ -4679,6 +5081,9 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
enum bpf_prog_type prog_type;
enum bpf_attach_type attach_type;
+ if (prog->type != BPF_PROG_TYPE_UNSPEC)
+ continue;
+
err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
&attach_type);
if (err == -ESRCH)
@@ -4689,7 +5094,8 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
bpf_program__set_type(prog, prog_type);
bpf_program__set_expected_attach_type(prog, attach_type);
- if (prog_type == BPF_PROG_TYPE_TRACING)
+ if (prog_type == BPF_PROG_TYPE_TRACING ||
+ prog_type == BPF_PROG_TYPE_EXT)
prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
}
@@ -4774,8 +5180,11 @@ int bpf_object__unload(struct bpf_object *obj)
if (!obj)
return -EINVAL;
- for (i = 0; i < obj->nr_maps; i++)
+ for (i = 0; i < obj->nr_maps; i++) {
zclose(obj->maps[i].fd);
+ if (obj->maps[i].st_ops)
+ zfree(&obj->maps[i].st_ops->kern_vdata);
+ }
for (i = 0; i < obj->nr_programs; i++)
bpf_program__unload(&obj->programs[i]);
@@ -4891,9 +5300,15 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__sanitize_maps(obj);
+ err = err ? : bpf_object__load_vmlinux_btf(obj);
+ err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
err = err ? : bpf_object__create_maps(obj);
err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
err = err ? : bpf_object__load_progs(obj, attr->log_level);
+
+ btf__free(obj->btf_vmlinux);
+ obj->btf_vmlinux = NULL;
+
if (err)
goto out;
@@ -5478,6 +5893,13 @@ void bpf_object__close(struct bpf_object *obj)
map->mmaped = NULL;
}
+ if (map->st_ops) {
+ zfree(&map->st_ops->data);
+ zfree(&map->st_ops->progs);
+ zfree(&map->st_ops->kern_func_off);
+ zfree(&map->st_ops);
+ }
+
zfree(&map->name);
zfree(&map->pin_path);
}
@@ -5746,6 +6168,8 @@ BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
+BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
+BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
enum bpf_attach_type
bpf_program__get_expected_attach_type(struct bpf_program *prog)
@@ -5845,6 +6269,9 @@ static const struct bpf_sec_def section_defs[] = {
.expected_attach_type = BPF_TRACE_FEXIT,
.is_attach_btf = true,
.attach_fn = attach_trace),
+ SEC_DEF("freplace/", EXT,
+ .is_attach_btf = true,
+ .attach_fn = attach_trace),
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
@@ -5899,6 +6326,7 @@ static const struct bpf_sec_def section_defs[] = {
BPF_CGROUP_GETSOCKOPT),
BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
BPF_CGROUP_SETSOCKOPT),
+ BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS),
};
#undef BPF_PROG_SEC_IMPL
@@ -5975,34 +6403,182 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
return -ESRCH;
}
-#define BTF_PREFIX "btf_trace_"
+static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
+ size_t offset)
+{
+ struct bpf_map *map;
+ size_t i;
+
+ for (i = 0; i < obj->nr_maps; i++) {
+ map = &obj->maps[i];
+ if (!bpf_map__is_struct_ops(map))
+ continue;
+ if (map->sec_offset <= offset &&
+ offset - map->sec_offset < map->def.value_size)
+ return map;
+ }
+
+ return NULL;
+}
+
+/* Collect the reloc from ELF and populate the st_ops->progs[] */
+static int bpf_object__collect_struct_ops_map_reloc(struct bpf_object *obj,
+ GElf_Shdr *shdr,
+ Elf_Data *data)
+{
+ const struct btf_member *member;
+ struct bpf_struct_ops *st_ops;
+ struct bpf_program *prog;
+ unsigned int shdr_idx;
+ const struct btf *btf;
+ struct bpf_map *map;
+ Elf_Data *symbols;
+ unsigned int moff;
+ const char *name;
+ __u32 member_idx;
+ GElf_Sym sym;
+ GElf_Rel rel;
+ int i, nrels;
+
+ symbols = obj->efile.symbols;
+ btf = obj->btf;
+ nrels = shdr->sh_size / shdr->sh_entsize;
+ for (i = 0; i < nrels; i++) {
+ if (!gelf_getrel(data, i, &rel)) {
+ pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+
+ if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
+ pr_warn("struct_ops reloc: symbol %zx not found\n",
+ (size_t)GELF_R_SYM(rel.r_info));
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+
+ name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
+ sym.st_name) ? : "<?>";
+ map = find_struct_ops_map_by_offset(obj, rel.r_offset);
+ if (!map) {
+ pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
+ (size_t)rel.r_offset);
+ return -EINVAL;
+ }
+
+ moff = rel.r_offset - map->sec_offset;
+ shdr_idx = sym.st_shndx;
+ st_ops = map->st_ops;
+ pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
+ map->name,
+ (long long)(rel.r_info >> 32),
+ (long long)sym.st_value,
+ shdr_idx, (size_t)rel.r_offset,
+ map->sec_offset, sym.st_name, name);
+
+ if (shdr_idx >= SHN_LORESERVE) {
+ pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
+ map->name, (size_t)rel.r_offset, shdr_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+ member = find_member_by_offset(st_ops->type, moff * 8);
+ if (!member) {
+ pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
+ map->name, moff);
+ return -EINVAL;
+ }
+ member_idx = member - btf_members(st_ops->type);
+ name = btf__name_by_offset(btf, member->name_off);
+
+ if (!resolve_func_ptr(btf, member->type, NULL)) {
+ pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
+ map->name, name);
+ return -EINVAL;
+ }
+
+ prog = bpf_object__find_prog_by_idx(obj, shdr_idx);
+ if (!prog) {
+ pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
+ map->name, shdr_idx, name);
+ return -EINVAL;
+ }
+
+ if (prog->type == BPF_PROG_TYPE_UNSPEC) {
+ const struct bpf_sec_def *sec_def;
+
+ sec_def = find_sec_def(prog->section_name);
+ if (sec_def &&
+ sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
+ /* for pr_warn */
+ prog->type = sec_def->prog_type;
+ goto invalid_prog;
+ }
+
+ prog->type = BPF_PROG_TYPE_STRUCT_OPS;
+ prog->attach_btf_id = st_ops->type_id;
+ prog->expected_attach_type = member_idx;
+ } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
+ prog->attach_btf_id != st_ops->type_id ||
+ prog->expected_attach_type != member_idx) {
+ goto invalid_prog;
+ }
+ st_ops->progs[member_idx] = prog;
+ }
+
+ return 0;
+
+invalid_prog:
+ pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
+ map->name, prog->name, prog->section_name, prog->type,
+ prog->attach_btf_id, prog->expected_attach_type, name);
+ return -EINVAL;
+}
+
+#define BTF_TRACE_PREFIX "btf_trace_"
+#define BTF_MAX_NAME_SIZE 128
+
+static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
+ const char *name, __u32 kind)
+{
+ char btf_type_name[BTF_MAX_NAME_SIZE];
+ int ret;
+
+ ret = snprintf(btf_type_name, sizeof(btf_type_name),
+ "%s%s", prefix, name);
+ /* snprintf returns the number of characters written excluding the
+ * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
+ * indicates truncation.
+ */
+ if (ret < 0 || ret >= sizeof(btf_type_name))
+ return -ENAMETOOLONG;
+ return btf__find_by_name_kind(btf, btf_type_name, kind);
+}
+
+static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
+ enum bpf_attach_type attach_type)
+{
+ int err;
+
+ if (attach_type == BPF_TRACE_RAW_TP)
+ err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
+ BTF_KIND_TYPEDEF);
+ else
+ err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
+
+ return err;
+}
+
int libbpf_find_vmlinux_btf_id(const char *name,
enum bpf_attach_type attach_type)
{
- struct btf *btf = bpf_core_find_kernel_btf();
- char raw_tp_btf[128] = BTF_PREFIX;
- char *dst = raw_tp_btf + sizeof(BTF_PREFIX) - 1;
- const char *btf_name;
- int err = -EINVAL;
- __u32 kind;
+ struct btf *btf;
+ btf = libbpf_find_kernel_btf();
if (IS_ERR(btf)) {
pr_warn("vmlinux BTF is not found\n");
return -EINVAL;
}
- if (attach_type == BPF_TRACE_RAW_TP) {
- /* prepend "btf_trace_" prefix per kernel convention */
- strncat(dst, name, sizeof(raw_tp_btf) - sizeof(BTF_PREFIX));
- btf_name = raw_tp_btf;
- kind = BTF_KIND_TYPEDEF;
- } else {
- btf_name = name;
- kind = BTF_KIND_FUNC;
- }
- err = btf__find_by_name_kind(btf, btf_name, kind);
- btf__free(btf);
- return err;
+ return __find_vmlinux_btf_id(btf, name, attach_type);
}
static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
@@ -6038,10 +6614,11 @@ out:
return err;
}
-static int libbpf_find_attach_btf_id(const char *name,
- enum bpf_attach_type attach_type,
- __u32 attach_prog_fd)
+static int libbpf_find_attach_btf_id(struct bpf_program *prog)
{
+ enum bpf_attach_type attach_type = prog->expected_attach_type;
+ __u32 attach_prog_fd = prog->attach_prog_fd;
+ const char *name = prog->section_name;
int i, err;
if (!name)
@@ -6056,8 +6633,9 @@ static int libbpf_find_attach_btf_id(const char *name,
err = libbpf_find_prog_btf_id(name + section_defs[i].len,
attach_prog_fd);
else
- err = libbpf_find_vmlinux_btf_id(name + section_defs[i].len,
- attach_type);
+ err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
+ name + section_defs[i].len,
+ attach_type);
if (err <= 0)
pr_warn("%s is not found in vmlinux BTF\n", name);
return err;
@@ -6805,6 +7383,58 @@ struct bpf_link *bpf_program__attach(struct bpf_program *prog)
return sec_def->attach_fn(sec_def, prog);
}
+static int bpf_link__detach_struct_ops(struct bpf_link *link)
+{
+ struct bpf_link_fd *l = (void *)link;
+ __u32 zero = 0;
+
+ if (bpf_map_delete_elem(l->fd, &zero))
+ return -errno;
+
+ return 0;
+}
+
+struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
+{
+ struct bpf_struct_ops *st_ops;
+ struct bpf_link_fd *link;
+ __u32 i, zero = 0;
+ int err;
+
+ if (!bpf_map__is_struct_ops(map) || map->fd == -1)
+ return ERR_PTR(-EINVAL);
+
+ link = calloc(1, sizeof(*link));
+ if (!link)
+ return ERR_PTR(-EINVAL);
+
+ st_ops = map->st_ops;
+ for (i = 0; i < btf_vlen(st_ops->type); i++) {
+ struct bpf_program *prog = st_ops->progs[i];
+ void *kern_data;
+ int prog_fd;
+
+ if (!prog)
+ continue;
+
+ prog_fd = bpf_program__fd(prog);
+ kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
+ *(unsigned long *)kern_data = prog_fd;
+ }
+
+ err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
+ if (err) {
+ err = -errno;
+ free(link);
+ return ERR_PTR(err);
+ }
+
+ link->link.detach = bpf_link__detach_struct_ops;
+ link->fd = map->fd;
+
+ return (struct bpf_link *)link;
+}
+
enum bpf_perf_event_ret
bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
void **copy_mem, size_t *copy_size,
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index fe592ef48f1b..2a5e3b087002 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -239,6 +239,8 @@ bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
LIBBPF_API struct bpf_link *
bpf_program__attach_trace(struct bpf_program *prog);
+struct bpf_map;
+LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map);
struct bpf_insn;
/*
@@ -315,6 +317,8 @@ LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog);
LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog);
LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
@@ -335,6 +339,8 @@ LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog);
/*
* No need for __attribute__((packed)), all members of 'bpf_map_def'
@@ -354,7 +360,6 @@ struct bpf_map_def {
* The 'struct bpf_map' in include/linux/bpf.h is internal to the kernel,
* so no need to worry about a name clash.
*/
-struct bpf_map;
LIBBPF_API struct bpf_map *
bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
@@ -521,6 +526,7 @@ LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type,
LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
enum bpf_prog_type prog_type, __u32 ifindex);
+LIBBPF_API bool bpf_probe_large_insn_limit(__u32 ifindex);
/*
* Get bpf_prog_info in continuous memory
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index e9713a574243..b035122142bb 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -213,14 +213,25 @@ LIBBPF_0.0.7 {
global:
btf_dump__emit_type_decl;
bpf_link__disconnect;
+ bpf_map__attach_struct_ops;
+ bpf_map_delete_batch;
+ bpf_map_lookup_and_delete_batch;
+ bpf_map_lookup_batch;
+ bpf_map_update_batch;
bpf_object__find_program_by_name;
bpf_object__attach_skeleton;
bpf_object__destroy_skeleton;
bpf_object__detach_skeleton;
bpf_object__load_skeleton;
bpf_object__open_skeleton;
+ bpf_probe_large_insn_limit;
bpf_prog_attach_xattr;
bpf_program__attach;
bpf_program__name;
+ bpf_program__is_extension;
+ bpf_program__is_struct_ops;
+ bpf_program__set_extension;
+ bpf_program__set_struct_ops;
btf__align_of;
+ libbpf_find_kernel_btf;
} LIBBPF_0.0.6;
diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c
index 4343e40588c6..0afb51f7a919 100644
--- a/tools/lib/bpf/libbpf_errno.c
+++ b/tools/lib/bpf/libbpf_errno.c
@@ -13,6 +13,9 @@
#include "libbpf.h"
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index a9eb8b322671..b782ebef6ac9 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -17,6 +17,9 @@
#include "libbpf.h"
#include "libbpf_internal.h"
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
static bool grep(const char *buffer, const char *pattern)
{
return !!strstr(buffer, pattern);
@@ -103,6 +106,8 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
case BPF_PROG_TYPE_CGROUP_SYSCTL:
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ case BPF_PROG_TYPE_EXT:
default:
break;
}
@@ -251,6 +256,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
case BPF_MAP_TYPE_XSKMAP:
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+ case BPF_MAP_TYPE_STRUCT_OPS:
default:
break;
}
@@ -321,3 +327,24 @@ bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
return res;
}
+
+/*
+ * Probe for availability of kernel commit (5.3):
+ *
+ * c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
+ */
+bool bpf_probe_large_insn_limit(__u32 ifindex)
+{
+ struct bpf_insn insns[BPF_MAXINSNS + 1];
+ int i;
+
+ for (i = 0; i < BPF_MAXINSNS; i++)
+ insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
+ insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
+
+ errno = 0;
+ probe_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
+ ifindex);
+
+ return errno != E2BIG && errno != EINVAL;
+}
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 5065c1aa1061..431bd25c6cdb 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -15,6 +15,9 @@
#include "libbpf_internal.h"
#include "nlattr.h"
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
#ifndef SOL_NETLINK
#define SOL_NETLINK 270
#endif
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
index 8db44bbfc66d..0ad41dfea8eb 100644
--- a/tools/lib/bpf/nlattr.c
+++ b/tools/lib/bpf/nlattr.c
@@ -13,6 +13,9 @@
#include <string.h>
#include <stdio.h>
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
static uint16_t nla_attr_minlen[LIBBPF_NLA_TYPE_MAX+1] = {
[LIBBPF_NLA_U8] = sizeof(uint8_t),
[LIBBPF_NLA_U16] = sizeof(uint16_t),
diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c
index b8064eedc177..146da01979c7 100644
--- a/tools/lib/bpf/str_error.c
+++ b/tools/lib/bpf/str_error.c
@@ -4,6 +4,9 @@
#include <stdio.h>
#include "str_error.h"
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
/*
* Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl
* libc, while checking strerror_r() return to avoid having to check this in
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 8e0ffa800a71..9807903f121e 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -32,6 +32,9 @@
#include "libbpf_internal.h"
#include "xsk.h"
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
#ifndef SOL_XDP
#define SOL_XDP 283
#endif