diff options
-rw-r--r-- | drivers/acpi/processor_idle.c | 10 | ||||
-rw-r--r-- | drivers/cpuidle/Kconfig.arm | 10 | ||||
-rw-r--r-- | drivers/cpuidle/Makefile | 5 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-psci-domain.c | 74 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-psci.c | 141 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-psci.h | 11 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-tegra.c | 8 | ||||
-rw-r--r-- | drivers/idle/intel_idle.c | 53 | ||||
-rw-r--r-- | include/linux/cpuidle.h | 9 |
9 files changed, 210 insertions, 111 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 75534c5b5433..0c65a09aec1b 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -655,8 +655,8 @@ static int acpi_idle_enter(struct cpuidle_device *dev, return index; } -static void acpi_idle_enter_s2idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +static int acpi_idle_enter_s2idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -664,16 +664,18 @@ static void acpi_idle_enter_s2idle(struct cpuidle_device *dev, struct acpi_processor *pr = __this_cpu_read(processors); if (unlikely(!pr)) - return; + return 0; if (pr->flags.bm_check) { acpi_idle_enter_bm(pr, cx, false); - return; + return 0; } else { ACPI_FLUSH_CPU_CACHE(); } } acpi_idle_do_entry(cx); + + return 0; } static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 51a7e89085c0..0844fadc4be8 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -23,6 +23,16 @@ config ARM_PSCI_CPUIDLE It provides an idle driver that is capable of detecting and managing idle states through the PSCI firmware interface. +config ARM_PSCI_CPUIDLE_DOMAIN + bool "PSCI CPU idle Domain" + depends on ARM_PSCI_CPUIDLE + depends on PM_GENERIC_DOMAINS_OF + default y + help + Select this to enable the PSCI based CPUidle driver to use PM domains, + which is needed to support the hierarchical DT based layout of the + idle states. + config ARM_BIG_LITTLE_CPUIDLE bool "Support for ARM big.LITTLE processors" depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index f07800cbb43f..26bbc5e74123 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -21,9 +21,8 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o -obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle_psci.o -cpuidle_psci-y := cpuidle-psci.o -cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o +obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o +obj-$(CONFIG_ARM_PSCI_CPUIDLE_DOMAIN) += cpuidle-psci-domain.o obj-$(CONFIG_ARM_TEGRA_CPUIDLE) += cpuidle-tegra.o obj-$(CONFIG_ARM_QCOM_SPM_CPUIDLE) += cpuidle-qcom-spm.o diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c index 423f03bbeb74..b6e9649ab0da 100644 --- a/drivers/cpuidle/cpuidle-psci-domain.c +++ b/drivers/cpuidle/cpuidle-psci-domain.c @@ -12,6 +12,7 @@ #include <linux/cpu.h> #include <linux/device.h> #include <linux/kernel.h> +#include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/psci.h> @@ -26,7 +27,7 @@ struct psci_pd_provider { }; static LIST_HEAD(psci_pd_providers); -static bool osi_mode_enabled __initdata; +static bool psci_pd_allow_domain_state; static int psci_pd_power_off(struct generic_pm_domain *pd) { @@ -36,6 +37,9 @@ static int psci_pd_power_off(struct generic_pm_domain *pd) if (!state->data) return 0; + if (!psci_pd_allow_domain_state) + return -EBUSY; + /* OSI mode is enabled, set the corresponding domain state. */ pd_state = state->data; psci_set_domain_state(*pd_state); @@ -43,8 +47,8 @@ static int psci_pd_power_off(struct generic_pm_domain *pd) return 0; } -static int __init psci_pd_parse_state_nodes(struct genpd_power_state *states, - int state_count) +static int psci_pd_parse_state_nodes(struct genpd_power_state *states, + int state_count) { int i, ret; u32 psci_state, *psci_state_buf; @@ -73,7 +77,7 @@ free_state: return ret; } -static int __init psci_pd_parse_states(struct device_node *np, +static int psci_pd_parse_states(struct device_node *np, struct genpd_power_state **states, int *state_count) { int ret; @@ -101,7 +105,7 @@ static void psci_pd_free_states(struct genpd_power_state *states, kfree(states); } -static int __init psci_pd_init(struct device_node *np) +static int psci_pd_init(struct device_node *np) { struct generic_pm_domain *pd; struct psci_pd_provider *pd_provider; @@ -168,7 +172,7 @@ out: return ret; } -static void __init psci_pd_remove(void) +static void psci_pd_remove(void) { struct psci_pd_provider *pd_provider, *it; struct generic_pm_domain *genpd; @@ -186,7 +190,7 @@ static void __init psci_pd_remove(void) } } -static int __init psci_pd_init_topology(struct device_node *np, bool add) +static int psci_pd_init_topology(struct device_node *np, bool add) { struct device_node *node; struct of_phandle_args child, parent; @@ -212,24 +216,33 @@ static int __init psci_pd_init_topology(struct device_node *np, bool add) return 0; } -static int __init psci_pd_add_topology(struct device_node *np) +static int psci_pd_add_topology(struct device_node *np) { return psci_pd_init_topology(np, true); } -static void __init psci_pd_remove_topology(struct device_node *np) +static void psci_pd_remove_topology(struct device_node *np) { psci_pd_init_topology(np, false); } -static const struct of_device_id psci_of_match[] __initconst = { +static void psci_cpuidle_domain_sync_state(struct device *dev) +{ + /* + * All devices have now been attached/probed to the PM domain topology, + * hence it's fine to allow domain states to be picked. + */ + psci_pd_allow_domain_state = true; +} + +static const struct of_device_id psci_of_match[] = { { .compatible = "arm,psci-1.0" }, {} }; -static int __init psci_idle_init_domains(void) +static int psci_cpuidle_domain_probe(struct platform_device *pdev) { - struct device_node *np = of_find_matching_node(NULL, psci_of_match); + struct device_node *np = pdev->dev.of_node; struct device_node *node; int ret = 0, pd_count = 0; @@ -238,7 +251,7 @@ static int __init psci_idle_init_domains(void) /* Currently limit the hierarchical topology to be used in OSI mode. */ if (!psci_has_osi_support()) - goto out; + return 0; /* * Parse child nodes for the "#power-domain-cells" property and @@ -257,7 +270,7 @@ static int __init psci_idle_init_domains(void) /* Bail out if not using the hierarchical CPU topology. */ if (!pd_count) - goto out; + return 0; /* Link genpd masters/subdomains to model the CPU topology. */ ret = psci_pd_add_topology(np); @@ -272,10 +285,8 @@ static int __init psci_idle_init_domains(void) goto remove_pd; } - osi_mode_enabled = true; - of_node_put(np); pr_info("Initialized CPU PM domain topology\n"); - return pd_count; + return 0; put_node: of_node_put(node); @@ -283,19 +294,28 @@ remove_pd: if (pd_count) psci_pd_remove(); pr_err("failed to create CPU PM domains ret=%d\n", ret); -out: - of_node_put(np); return ret; } + +static struct platform_driver psci_cpuidle_domain_driver = { + .probe = psci_cpuidle_domain_probe, + .driver = { + .name = "psci-cpuidle-domain", + .of_match_table = psci_of_match, + .sync_state = psci_cpuidle_domain_sync_state, + }, +}; + +static int __init psci_idle_init_domains(void) +{ + return platform_driver_register(&psci_cpuidle_domain_driver); +} subsys_initcall(psci_idle_init_domains); -struct device __init *psci_dt_attach_cpu(int cpu) +struct device *psci_dt_attach_cpu(int cpu) { struct device *dev; - if (!osi_mode_enabled) - return NULL; - dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci"); if (IS_ERR_OR_NULL(dev)) return dev; @@ -306,3 +326,11 @@ struct device __init *psci_dt_attach_cpu(int cpu) return dev; } + +void psci_dt_detach_cpu(struct device *dev) +{ + if (IS_ERR_OR_NULL(dev)) + return; + + dev_pm_domain_detach(dev, false); +} diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index 3806f911b61c..74463841805f 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -17,9 +17,11 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/platform_device.h> #include <linux/psci.h> #include <linux/pm_runtime.h> #include <linux/slab.h> +#include <linux/string.h> #include <asm/cpuidle.h> @@ -33,7 +35,7 @@ struct psci_cpuidle_data { static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); static DEFINE_PER_CPU(u32, domain_state); -static bool psci_cpuidle_use_cpuhp __initdata; +static bool psci_cpuidle_use_cpuhp; void psci_set_domain_state(u32 state) { @@ -104,7 +106,7 @@ static int psci_idle_cpuhp_down(unsigned int cpu) return 0; } -static void __init psci_idle_init_cpuhp(void) +static void psci_idle_init_cpuhp(void) { int err; @@ -127,30 +129,13 @@ static int psci_enter_idle_state(struct cpuidle_device *dev, return psci_enter_state(idx, state[idx]); } -static struct cpuidle_driver psci_idle_driver __initdata = { - .name = "psci_idle", - .owner = THIS_MODULE, - /* - * PSCI idle states relies on architectural WFI to - * be represented as state index 0. - */ - .states[0] = { - .enter = psci_enter_idle_state, - .exit_latency = 1, - .target_residency = 1, - .power_usage = UINT_MAX, - .name = "WFI", - .desc = "ARM WFI", - } -}; - -static const struct of_device_id psci_idle_state_match[] __initconst = { +static const struct of_device_id psci_idle_state_match[] = { { .compatible = "arm,idle-state", .data = psci_enter_idle_state }, { }, }; -int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) +int psci_dt_parse_state_node(struct device_node *np, u32 *state) { int err = of_property_read_u32(np, "arm,psci-suspend-param", state); @@ -167,9 +152,9 @@ int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) return 0; } -static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv, - struct psci_cpuidle_data *data, - unsigned int state_count, int cpu) +static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv, + struct psci_cpuidle_data *data, + unsigned int state_count, int cpu) { /* Currently limit the hierarchical topology to be used in OSI mode. */ if (!psci_has_osi_support()) @@ -190,9 +175,9 @@ static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv, return 0; } -static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv, - struct device_node *cpu_node, - unsigned int state_count, int cpu) +static int psci_dt_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv, + struct device_node *cpu_node, + unsigned int state_count, int cpu) { int i, ret = 0; u32 *psci_states; @@ -200,7 +185,8 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv, struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); state_count++; /* Add WFI state too */ - psci_states = kcalloc(state_count, sizeof(*psci_states), GFP_KERNEL); + psci_states = devm_kcalloc(dev, state_count, sizeof(*psci_states), + GFP_KERNEL); if (!psci_states) return -ENOMEM; @@ -213,32 +199,26 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv, of_node_put(state_node); if (ret) - goto free_mem; + return ret; pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); } - if (i != state_count) { - ret = -ENODEV; - goto free_mem; - } + if (i != state_count) + return -ENODEV; /* Initialize optional data, used for the hierarchical topology. */ ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu); if (ret < 0) - goto free_mem; + return ret; /* Idle states parsed correctly, store them in the per-cpu struct. */ data->psci_states = psci_states; return 0; - -free_mem: - kfree(psci_states); - return ret; } -static __init int psci_cpu_init_idle(struct cpuidle_driver *drv, - unsigned int cpu, unsigned int state_count) +static int psci_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv, + unsigned int cpu, unsigned int state_count) { struct device_node *cpu_node; int ret; @@ -254,14 +234,22 @@ static __init int psci_cpu_init_idle(struct cpuidle_driver *drv, if (!cpu_node) return -ENODEV; - ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu); + ret = psci_dt_cpu_init_idle(dev, drv, cpu_node, state_count, cpu); of_node_put(cpu_node); return ret; } -static int __init psci_idle_init_cpu(int cpu) +static void psci_cpu_deinit_idle(int cpu) +{ + struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); + + psci_dt_detach_cpu(data->dev); + psci_cpuidle_use_cpuhp = false; +} + +static int psci_idle_init_cpu(struct device *dev, int cpu) { struct cpuidle_driver *drv; struct device_node *cpu_node; @@ -284,17 +272,26 @@ static int __init psci_idle_init_cpu(int cpu) if (ret) return ret; - drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL); + drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); if (!drv) return -ENOMEM; + drv->name = "psci_idle"; + drv->owner = THIS_MODULE; drv->cpumask = (struct cpumask *)cpumask_of(cpu); /* - * Initialize idle states data, starting at index 1, since - * by default idle state 0 is the quiescent state reached - * by the cpu by executing the wfi instruction. - * + * PSCI idle states relies on architectural WFI to be represented as + * state index 0. + */ + drv->states[0].enter = psci_enter_idle_state; + drv->states[0].exit_latency = 1; + drv->states[0].target_residency = 1; + drv->states[0].power_usage = UINT_MAX; + strcpy(drv->states[0].name, "WFI"); + strcpy(drv->states[0].desc, "ARM WFI"); + + /* * If no DT idle states are detected (ret == 0) let the driver * initialization fail accordingly since there is no reason to * initialize the idle driver if only wfi is supported, the @@ -302,48 +299,45 @@ static int __init psci_idle_init_cpu(int cpu) * on idle entry. */ ret = dt_init_idle_driver(drv, psci_idle_state_match, 1); - if (ret <= 0) { - ret = ret ? : -ENODEV; - goto out_kfree_drv; - } + if (ret <= 0) + return ret ? : -ENODEV; /* * Initialize PSCI idle states. */ - ret = psci_cpu_init_idle(drv, cpu, ret); + ret = psci_cpu_init_idle(dev, drv, cpu, ret); if (ret) { pr_err("CPU %d failed to PSCI idle\n", cpu); - goto out_kfree_drv; + return ret; } ret = cpuidle_register(drv, NULL); if (ret) - goto out_kfree_drv; + goto deinit; cpuidle_cooling_register(drv); return 0; - -out_kfree_drv: - kfree(drv); +deinit: + psci_cpu_deinit_idle(cpu); return ret; } /* - * psci_idle_init - Initializes PSCI cpuidle driver + * psci_idle_probe - Initializes PSCI cpuidle driver * * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails * to register cpuidle driver then rollback to cancel all CPUs * registration. */ -static int __init psci_idle_init(void) +static int psci_cpuidle_probe(struct platform_device *pdev) { int cpu, ret; struct cpuidle_driver *drv; struct cpuidle_device *dev; for_each_possible_cpu(cpu) { - ret = psci_idle_init_cpu(cpu); + ret = psci_idle_init_cpu(&pdev->dev, cpu); if (ret) goto out_fail; } @@ -356,9 +350,34 @@ out_fail: dev = per_cpu(cpuidle_devices, cpu); drv = cpuidle_get_cpu_driver(dev); cpuidle_unregister(drv); - kfree(drv); + psci_cpu_deinit_idle(cpu); } return ret; } + +static struct platform_driver psci_cpuidle_driver = { + .probe = psci_cpuidle_probe, + .driver = { + .name = "psci-cpuidle", + }, +}; + +static int __init psci_idle_init(void) +{ + struct platform_device *pdev; + int ret; + + ret = platform_driver_register(&psci_cpuidle_driver); + if (ret) + return ret; + + pdev = platform_device_register_simple("psci-cpuidle", -1, NULL, 0); + if (IS_ERR(pdev)) { + platform_driver_unregister(&psci_cpuidle_driver); + return PTR_ERR(pdev); + } + + return 0; +} device_initcall(psci_idle_init); diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h index 7299a04dd467..d8e925e84c27 100644 --- a/drivers/cpuidle/cpuidle-psci.h +++ b/drivers/cpuidle/cpuidle-psci.h @@ -3,15 +3,18 @@ #ifndef __CPUIDLE_PSCI_H #define __CPUIDLE_PSCI_H +struct device; struct device_node; void psci_set_domain_state(u32 state); -int __init psci_dt_parse_state_node(struct device_node *np, u32 *state); +int psci_dt_parse_state_node(struct device_node *np, u32 *state); -#ifdef CONFIG_PM_GENERIC_DOMAINS_OF -struct device __init *psci_dt_attach_cpu(int cpu); +#ifdef CONFIG_ARM_PSCI_CPUIDLE_DOMAIN +struct device *psci_dt_attach_cpu(int cpu); +void psci_dt_detach_cpu(struct device *dev); #else -static inline struct device __init *psci_dt_attach_cpu(int cpu) { return NULL; } +static inline struct device *psci_dt_attach_cpu(int cpu) { return NULL; } +static inline void psci_dt_detach_cpu(struct device *dev) { } #endif #endif /* __CPUIDLE_PSCI_H */ diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c index 150045849d78..a12fb141875a 100644 --- a/drivers/cpuidle/cpuidle-tegra.c +++ b/drivers/cpuidle/cpuidle-tegra.c @@ -253,11 +253,13 @@ static int tegra_cpuidle_enter(struct cpuidle_device *dev, return err ? -1 : index; } -static void tegra114_enter_s2idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) +static int tegra114_enter_s2idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) { tegra_cpuidle_enter(dev, drv, index); + + return 0; } /* diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index f4495841bf68..fd0fa9e7900b 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -66,8 +66,6 @@ static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; static unsigned long auto_demotion_disable_flags; static bool disable_promotion_to_c1e; -static bool lapic_timer_always_reliable; - struct idle_cpu { struct cpuidle_state *state_table; @@ -142,7 +140,7 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) leave_mm(cpu); - if (!static_cpu_has(X86_FEATURE_ARAT) && !lapic_timer_always_reliable) { + if (!static_cpu_has(X86_FEATURE_ARAT)) { /* * Switch over to one-shot tick broadcast if the target C-state * is deeper than C1. @@ -175,13 +173,15 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, * Invoked as a suspend-to-idle callback routine with frozen user space, frozen * scheduler tick and suspended scheduler clock on the target CPU. */ -static __cpuidle void intel_idle_s2idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { unsigned long eax = flg2MWAIT(drv->states[index].flags); unsigned long ecx = 1; /* break on interrupt flag */ mwait_idle_with_hints(eax, ecx); + + return 0; } /* @@ -752,6 +752,35 @@ static struct cpuidle_state skx_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state icx_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 4, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 128, + .target_residency = 384, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state atom_cstates[] __initdata = { { .name = "C1E", @@ -1056,6 +1085,12 @@ static const struct idle_cpu idle_cpu_skx __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_icx __initconst = { + .state_table = icx_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_avn __initconst = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, @@ -1110,6 +1145,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &idle_cpu_skl), X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl), X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt), @@ -1562,7 +1598,7 @@ static int intel_idle_cpu_online(unsigned int cpu) { struct cpuidle_device *dev; - if (!lapic_timer_always_reliable) + if (!boot_cpu_has(X86_FEATURE_ARAT)) tick_broadcast_enable(); /* @@ -1655,16 +1691,13 @@ static int __init intel_idle_init(void) goto init_driver_fail; } - if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ - lapic_timer_always_reliable = true; - retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online", intel_idle_cpu_online, NULL); if (retval < 0) goto hp_setup_fail; pr_debug("Local APIC timer is reliable in %s\n", - lapic_timer_always_reliable ? "all C-states" : "C1"); + boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1"); return 0; diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index ec2ef63771f0..b65909ae4e20 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -65,10 +65,13 @@ struct cpuidle_state { * CPUs execute ->enter_s2idle with the local tick or entire timekeeping * suspended, so it must not re-enable interrupts at any point (even * temporarily) or attempt to change states of clock event devices. + * + * This callback may point to the same function as ->enter if all of + * the above requirements are met by it. */ - void (*enter_s2idle) (struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index); + int (*enter_s2idle)(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index); }; /* Idle State Flags */ |