aboutsummaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
authorGreg Kroah-Hartman2023-04-19 15:09:40 +0200
committerGreg Kroah-Hartman2023-04-19 15:09:40 +0200
commita7b3a470fdea9f4a7daa5ae446c4e27c65f00aac (patch)
treeb27754d359fd6c65c74655676362d7a5e27cd7b1 /drivers/base
parentca9d081b49cc225627aa73feb9284b7ffc190df5 (diff)
parentef9f643a9f8b62bcbcc51f0e0af8599adc2e17ed (diff)
Merge tag 'cacheinfo-updates-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into driver-core-next
Sudeep writes: cacheinfo and arch_topology updates for v6.4 The cache information can be extracted from either a Device Tree(DT), the PPTT ACPI table, or arch registers (clidr_el1 for arm64). When the DT is used but no cache properties are advertised, the current code doesn't correctly fallback to using arch information. The changes fixes the same and also assuse the that L1 data/instruction caches are private and L2/higher caches are shared when the cache information is missing in DT/ACPI and is derived form clidr_el1/arch registers. Currently the cacheinfo is built from the primary CPU prior to secondary CPUs boot, if the DT/ACPI description contains cache information. However, if not present, it still reverts to the old behavior, which allocates the cacheinfo memory on each secondary CPUs which causes RT kernels to triggers a "BUG: sleeping function called from invalid context". The changes here attempts to enable automatic detection for RT kernels when no DT/ACPI cache information is available, by pre-allocating cacheinfo memory on the primary CPU. * tag 'cacheinfo-updates-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux: cacheinfo: Add use_arch[|_cache]_info field/function arch_topology: Remove early cacheinfo error message if -ENOENT cacheinfo: Check cache properties are present in DT cacheinfo: Check sib_leaf in cache_leaves_are_shared() cacheinfo: Allow early level detection when DT/ACPI info is missing/broken cacheinfo: Add arm64 early level initializer implementation cacheinfo: Add arch specific early level initializer
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/arch_topology.c11
-rw-r--r--drivers/base/cacheinfo.c124
2 files changed, 103 insertions, 32 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index b1c1dd38ab01..b741b5ba82bd 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -835,18 +835,19 @@ void __init init_cpu_topology(void)
if (ret) {
/*
* Discard anything that was parsed if we hit an error so we
- * don't use partial information.
+ * don't use partial information. But do not return yet to give
+ * arch-specific early cache level detection a chance to run.
*/
reset_cpu_topology();
- return;
}
for_each_possible_cpu(cpu) {
ret = fetch_cache_info(cpu);
- if (ret) {
+ if (!ret)
+ continue;
+ else if (ret != -ENOENT)
pr_err("Early cacheinfo failed, ret = %d\n", ret);
- break;
- }
+ return;
}
}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index f3903d002819..14aa62c3f10f 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -28,6 +28,9 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
#define per_cpu_cacheinfo_idx(cpu, idx) \
(per_cpu_cacheinfo(cpu) + (idx))
+/* Set if no cache information is found in DT/ACPI. */
+static bool use_arch_info;
+
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
{
return ci_cacheinfo(cpu);
@@ -38,11 +41,11 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
{
/*
* For non DT/ACPI systems, assume unique level 1 caches,
- * system-wide shared caches for all other levels. This will be used
- * only if arch specific code has not populated shared_cpu_map
+ * system-wide shared caches for all other levels.
*/
- if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
- return !(this_leaf->level == 1);
+ if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) ||
+ use_arch_info)
+ return (this_leaf->level != 1) && (sib_leaf->level != 1);
if ((sib_leaf->attributes & CACHE_ID) &&
(this_leaf->attributes & CACHE_ID))
@@ -79,6 +82,9 @@ bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
}
#ifdef CONFIG_OF
+
+static bool of_check_cache_nodes(struct device_node *np);
+
/* OF properties to query for a given cache type */
struct cache_type_info {
const char *size_prop;
@@ -206,6 +212,11 @@ static int cache_setup_of_node(unsigned int cpu)
return -ENOENT;
}
+ if (!of_check_cache_nodes(np)) {
+ of_node_put(np);
+ return -ENOENT;
+ }
+
prev = np;
while (index < cache_leaves(cpu)) {
@@ -230,6 +241,25 @@ static int cache_setup_of_node(unsigned int cpu)
return 0;
}
+static bool of_check_cache_nodes(struct device_node *np)
+{
+ struct device_node *next;
+
+ if (of_property_present(np, "cache-size") ||
+ of_property_present(np, "i-cache-size") ||
+ of_property_present(np, "d-cache-size") ||
+ of_property_present(np, "cache-unified"))
+ return true;
+
+ next = of_find_next_cache_node(np);
+ if (next) {
+ of_node_put(next);
+ return true;
+ }
+
+ return false;
+}
+
static int of_count_cache_leaves(struct device_node *np)
{
unsigned int leaves = 0;
@@ -261,6 +291,11 @@ int init_of_cache_level(unsigned int cpu)
struct device_node *prev = NULL;
unsigned int levels = 0, leaves, level;
+ if (!of_check_cache_nodes(np)) {
+ of_node_put(np);
+ return -ENOENT;
+ }
+
leaves = of_count_cache_leaves(np);
if (leaves > 0)
levels = 1;
@@ -312,6 +347,10 @@ static int cache_setup_properties(unsigned int cpu)
else if (!acpi_disabled)
ret = cache_setup_acpi(cpu);
+ // Assume there is no cache information available in DT/ACPI from now.
+ if (ret && use_arch_cache_info())
+ use_arch_info = true;
+
return ret;
}
@@ -330,7 +369,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
* to update the shared cpu_map if the cache attributes were
* populated early before all the cpus are brought online
*/
- if (!last_level_cache_is_valid(cpu)) {
+ if (!last_level_cache_is_valid(cpu) && !use_arch_info) {
ret = cache_setup_properties(cpu);
if (ret)
return ret;
@@ -398,6 +437,11 @@ static void free_cache_attributes(unsigned int cpu)
cache_shared_cpu_map_remove(cpu);
}
+int __weak early_cache_level(unsigned int cpu)
+{
+ return -ENOENT;
+}
+
int __weak init_cache_level(unsigned int cpu)
{
return -ENOENT;
@@ -423,56 +467,82 @@ int allocate_cache_info(int cpu)
int fetch_cache_info(unsigned int cpu)
{
- struct cpu_cacheinfo *this_cpu_ci;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
unsigned int levels = 0, split_levels = 0;
int ret;
if (acpi_disabled) {
ret = init_of_cache_level(cpu);
- if (ret < 0)
- return ret;
} else {
ret = acpi_get_cache_info(cpu, &levels, &split_levels);
- if (ret < 0)
+ if (!ret) {
+ this_cpu_ci->num_levels = levels;
+ /*
+ * This assumes that:
+ * - there cannot be any split caches (data/instruction)
+ * above a unified cache
+ * - data/instruction caches come by pair
+ */
+ this_cpu_ci->num_leaves = levels + split_levels;
+ }
+ }
+
+ if (ret || !cache_leaves(cpu)) {
+ ret = early_cache_level(cpu);
+ if (ret)
return ret;
- this_cpu_ci = get_cpu_cacheinfo(cpu);
- this_cpu_ci->num_levels = levels;
- /*
- * This assumes that:
- * - there cannot be any split caches (data/instruction)
- * above a unified cache
- * - data/instruction caches come by pair
- */
- this_cpu_ci->num_leaves = levels + split_levels;
+ if (!cache_leaves(cpu))
+ return -ENOENT;
+
+ this_cpu_ci->early_ci_levels = true;
}
- if (!cache_leaves(cpu))
- return -ENOENT;
return allocate_cache_info(cpu);
}
-int detect_cache_attributes(unsigned int cpu)
+static inline int init_level_allocate_ci(unsigned int cpu)
{
- int ret;
+ unsigned int early_leaves = cache_leaves(cpu);
/* Since early initialization/allocation of the cacheinfo is allowed
* via fetch_cache_info() and this also gets called as CPU hotplug
* callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
* as it will happen only once (the cacheinfo memory is never freed).
- * Just populate the cacheinfo.
+ * Just populate the cacheinfo. However, if the cacheinfo has been
+ * allocated early through the arch-specific early_cache_level() call,
+ * there is a chance the info is wrong (this can happen on arm64). In
+ * that case, call init_cache_level() anyway to give the arch-specific
+ * code a chance to make things right.
*/
- if (per_cpu_cacheinfo(cpu))
- goto populate_leaves;
+ if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels)
+ return 0;
if (init_cache_level(cpu) || !cache_leaves(cpu))
return -ENOENT;
- ret = allocate_cache_info(cpu);
+ /*
+ * Now that we have properly initialized the cache level info, make
+ * sure we don't try to do that again the next time we are called
+ * (e.g. as CPU hotplug callbacks).
+ */
+ ci_cacheinfo(cpu)->early_ci_levels = false;
+
+ if (cache_leaves(cpu) <= early_leaves)
+ return 0;
+
+ kfree(per_cpu_cacheinfo(cpu));
+ return allocate_cache_info(cpu);
+}
+
+int detect_cache_attributes(unsigned int cpu)
+{
+ int ret;
+
+ ret = init_level_allocate_ci(cpu);
if (ret)
return ret;
-populate_leaves:
/*
* If LLC is valid the cache leaves were already populated so just go to
* update the cpu map.