diff options
author | Aaron Tomlin | 2022-03-22 14:03:35 +0000 |
---|---|---|
committer | Luis Chamberlain | 2022-04-05 08:43:04 -0700 |
commit | 58d208de3e8d87dbe196caf0b57cc58c7a3836ca (patch) | |
tree | c6ad5b8a4547f1c2ef08ee497b06bc6bf43853b1 | |
parent | 1be9473e31ab87ad1b6ecf9fd11df461930ede85 (diff) |
module: Move latched RB-tree support to a separate file
No functional change.
This patch migrates module latched RB-tree support
(e.g. see __module_address()) from core module code
into kernel/module/tree_lookup.c.
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Aaron Tomlin <atomlin@redhat.com>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
-rw-r--r-- | kernel/module/Makefile | 1 | ||||
-rw-r--r-- | kernel/module/internal.h | 33 | ||||
-rw-r--r-- | kernel/module/main.c | 130 | ||||
-rw-r--r-- | kernel/module/tree_lookup.c | 109 |
4 files changed, 147 insertions, 126 deletions
diff --git a/kernel/module/Makefile b/kernel/module/Makefile index ed3aacb04f17..88774e386276 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -11,3 +11,4 @@ obj-y += main.o obj-$(CONFIG_MODULE_DECOMPRESS) += decompress.o obj-$(CONFIG_MODULE_SIG) += signing.o obj-$(CONFIG_LIVEPATCH) += livepatch.o +obj-$(CONFIG_MODULES_TREE_LOOKUP) += tree_lookup.o diff --git a/kernel/module/internal.h b/kernel/module/internal.h index ad7a444253ed..f1682e3677be 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -9,6 +9,7 @@ #include <linux/compiler.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/rculist.h> #ifndef ARCH_SHF_SMALL #define ARCH_SHF_SMALL 0 @@ -93,3 +94,35 @@ static inline void module_decompress_cleanup(struct load_info *info) { } #endif + +#ifdef CONFIG_MODULES_TREE_LOOKUP +struct mod_tree_root { + struct latch_tree_root root; + unsigned long addr_min; + unsigned long addr_max; +}; + +extern struct mod_tree_root mod_tree; + +void mod_tree_insert(struct module *mod); +void mod_tree_remove_init(struct module *mod); +void mod_tree_remove(struct module *mod); +struct module *mod_find(unsigned long addr); +#else /* !CONFIG_MODULES_TREE_LOOKUP */ + +static inline void mod_tree_insert(struct module *mod) { } +static inline void mod_tree_remove_init(struct module *mod) { } +static inline void mod_tree_remove(struct module *mod) { } +static inline struct module *mod_find(unsigned long addr) +{ + struct module *mod; + + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { + if (within_module(addr, mod)) + return mod; + } + + return NULL; +} +#endif /* CONFIG_MODULES_TREE_LOOKUP */ diff --git a/kernel/module/main.c b/kernel/module/main.c index 915143827069..0d0fdd82359b 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -90,138 +90,16 @@ static DECLARE_WORK(init_free_wq, do_free_init); static LLIST_HEAD(init_free_list); #ifdef CONFIG_MODULES_TREE_LOOKUP - -/* - * Use a latched RB-tree for __module_address(); this allows us to use - * RCU-sched lookups of the address from any context. - * - * This is conditional on PERF_EVENTS || TRACING because those can really hit - * __module_address() hard by doing a lot of stack unwinding; potentially from - * NMI context. - */ - -static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) -{ - struct module_layout *layout = container_of(n, struct module_layout, mtn.node); - - return (unsigned long)layout->base; -} - -static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) -{ - struct module_layout *layout = container_of(n, struct module_layout, mtn.node); - - return (unsigned long)layout->size; -} - -static __always_inline bool -mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) -{ - return __mod_tree_val(a) < __mod_tree_val(b); -} - -static __always_inline int -mod_tree_comp(void *key, struct latch_tree_node *n) -{ - unsigned long val = (unsigned long)key; - unsigned long start, end; - - start = __mod_tree_val(n); - if (val < start) - return -1; - - end = start + __mod_tree_size(n); - if (val >= end) - return 1; - - return 0; -} - -static const struct latch_tree_ops mod_tree_ops = { - .less = mod_tree_less, - .comp = mod_tree_comp, -}; - -static struct mod_tree_root { - struct latch_tree_root root; - unsigned long addr_min; - unsigned long addr_max; -} mod_tree __cacheline_aligned = { +struct mod_tree_root mod_tree __cacheline_aligned = { .addr_min = -1UL, }; #define module_addr_min mod_tree.addr_min #define module_addr_max mod_tree.addr_max -static noinline void __mod_tree_insert(struct mod_tree_node *node) -{ - latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops); -} - -static void __mod_tree_remove(struct mod_tree_node *node) -{ - latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops); -} - -/* - * These modifications: insert, remove_init and remove; are serialized by the - * module_mutex. - */ -static void mod_tree_insert(struct module *mod) -{ - mod->core_layout.mtn.mod = mod; - mod->init_layout.mtn.mod = mod; - - __mod_tree_insert(&mod->core_layout.mtn); - if (mod->init_layout.size) - __mod_tree_insert(&mod->init_layout.mtn); -} - -static void mod_tree_remove_init(struct module *mod) -{ - if (mod->init_layout.size) - __mod_tree_remove(&mod->init_layout.mtn); -} - -static void mod_tree_remove(struct module *mod) -{ - __mod_tree_remove(&mod->core_layout.mtn); - mod_tree_remove_init(mod); -} - -static struct module *mod_find(unsigned long addr) -{ - struct latch_tree_node *ltn; - - ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops); - if (!ltn) - return NULL; - - return container_of(ltn, struct mod_tree_node, node)->mod; -} - -#else /* MODULES_TREE_LOOKUP */ - -static unsigned long module_addr_min = -1UL, module_addr_max = 0; - -static void mod_tree_insert(struct module *mod) { } -static void mod_tree_remove_init(struct module *mod) { } -static void mod_tree_remove(struct module *mod) { } - -static struct module *mod_find(unsigned long addr) -{ - struct module *mod; - - list_for_each_entry_rcu(mod, &modules, list, - lockdep_is_held(&module_mutex)) { - if (within_module(addr, mod)) - return mod; - } - - return NULL; -} - -#endif /* MODULES_TREE_LOOKUP */ +#else /* !CONFIG_MODULES_TREE_LOOKUP */ +static unsigned long module_addr_min = -1UL, module_addr_max; +#endif /* CONFIG_MODULES_TREE_LOOKUP */ /* * Bounds of module text, for speeding up __module_address. diff --git a/kernel/module/tree_lookup.c b/kernel/module/tree_lookup.c new file mode 100644 index 000000000000..0bc4ec3b22ce --- /dev/null +++ b/kernel/module/tree_lookup.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Modules tree lookup + * + * Copyright (C) 2015 Peter Zijlstra + * Copyright (C) 2015 Rusty Russell + */ + +#include <linux/module.h> +#include <linux/rbtree_latch.h> +#include "internal.h" + +/* + * Use a latched RB-tree for __module_address(); this allows us to use + * RCU-sched lookups of the address from any context. + * + * This is conditional on PERF_EVENTS || TRACING because those can really hit + * __module_address() hard by doing a lot of stack unwinding; potentially from + * NMI context. + */ + +static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) +{ + struct module_layout *layout = container_of(n, struct module_layout, mtn.node); + + return (unsigned long)layout->base; +} + +static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) +{ + struct module_layout *layout = container_of(n, struct module_layout, mtn.node); + + return (unsigned long)layout->size; +} + +static __always_inline bool +mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) +{ + return __mod_tree_val(a) < __mod_tree_val(b); +} + +static __always_inline int +mod_tree_comp(void *key, struct latch_tree_node *n) +{ + unsigned long val = (unsigned long)key; + unsigned long start, end; + + start = __mod_tree_val(n); + if (val < start) + return -1; + + end = start + __mod_tree_size(n); + if (val >= end) + return 1; + + return 0; +} + +static const struct latch_tree_ops mod_tree_ops = { + .less = mod_tree_less, + .comp = mod_tree_comp, +}; + +static noinline void __mod_tree_insert(struct mod_tree_node *node) +{ + latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops); +} + +static void __mod_tree_remove(struct mod_tree_node *node) +{ + latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops); +} + +/* + * These modifications: insert, remove_init and remove; are serialized by the + * module_mutex. + */ +void mod_tree_insert(struct module *mod) +{ + mod->core_layout.mtn.mod = mod; + mod->init_layout.mtn.mod = mod; + + __mod_tree_insert(&mod->core_layout.mtn); + if (mod->init_layout.size) + __mod_tree_insert(&mod->init_layout.mtn); +} + +void mod_tree_remove_init(struct module *mod) +{ + if (mod->init_layout.size) + __mod_tree_remove(&mod->init_layout.mtn); +} + +void mod_tree_remove(struct module *mod) +{ + __mod_tree_remove(&mod->core_layout.mtn); + mod_tree_remove_init(mod); +} + +struct module *mod_find(unsigned long addr) +{ + struct latch_tree_node *ltn; + + ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops); + if (!ltn) + return NULL; + + return container_of(ltn, struct mod_tree_node, node)->mod; +} |