aboutsummaryrefslogtreecommitdiff
path: root/kernel/bpf
diff options
context:
space:
mode:
authorDave Marchevsky2023-04-15 13:18:10 -0700
committerAlexei Starovoitov2023-04-15 17:36:50 -0700
commit3e81740a90626024a9d9c6f9bfa3d36204dafefb (patch)
treefd3df74964919ee3e70c4d4d3e656135787a2f3b /kernel/bpf
parent404ad75a36fb1a1008e9fe803aa7d0212df9e240 (diff)
bpf: Centralize btf_field-specific initialization logic
All btf_fields in an object are 0-initialized by memset in bpf_obj_init. This might not be a valid initial state for some field types, in which case kfuncs that use the type will properly initialize their input if it's been 0-initialized. Some BPF graph collection types and kfuncs do this: bpf_list_{head,node} and bpf_rb_node. An earlier patch in this series added the bpf_refcount field, for which the 0 state indicates that the refcounted object should be free'd. bpf_obj_init treats this field specially, setting refcount to 1 instead of relying on scattered "refcount is 0? Must have just been initialized, let's set to 1" logic in kfuncs. This patch extends this treatment to list and rbtree field types, allowing most scattered initialization logic in kfuncs to be removed. Note that bpf_{list_head,rb_root} may be inside a BPF map, in which case they'll be 0-initialized without passing through the newly-added logic, so scattered initialization logic must remain for these collection root types. Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> Link: https://lore.kernel.org/r/20230415201811.343116-9-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/helpers.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1835df333287..00e5fb0682ac 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1936,10 +1936,11 @@ static int __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head
{
struct list_head *n = (void *)node, *h = (void *)head;
+ /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
+ * called on its fields, so init here
+ */
if (unlikely(!h->next))
INIT_LIST_HEAD(h);
- if (unlikely(!n->next))
- INIT_LIST_HEAD(n);
if (!list_empty(n)) {
/* Only called from BPF prog, no need to migrate_disable */
__bpf_obj_drop_impl(n - off, rec);
@@ -1975,6 +1976,9 @@ static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tai
{
struct list_head *n, *h = (void *)head;
+ /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
+ * called on its fields, so init here
+ */
if (unlikely(!h->next))
INIT_LIST_HEAD(h);
if (list_empty(h))
@@ -2000,9 +2004,6 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
struct rb_root_cached *r = (struct rb_root_cached *)root;
struct rb_node *n = (struct rb_node *)node;
- if (!n->__rb_parent_color)
- RB_CLEAR_NODE(n);
-
if (RB_EMPTY_NODE(n))
return NULL;
@@ -2022,9 +2023,6 @@ static int __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
bpf_callback_t cb = (bpf_callback_t)less;
bool leftmost = true;
- if (!n->__rb_parent_color)
- RB_CLEAR_NODE(n);
-
if (!RB_EMPTY_NODE(n)) {
/* Only called from BPF prog, no need to migrate_disable */
__bpf_obj_drop_impl(n - off, rec);