diff options
author | Mark Brown | 2020-10-06 16:19:24 +0100 |
---|---|---|
committer | Mark Brown | 2020-10-06 16:19:24 +0100 |
commit | fd6b519a30a7179026d22c98d6bf10bb5ca8ca27 (patch) | |
tree | e4a6bc4d8548a5b8db7148a18eb257b84e72d48b /drivers/cpufreq | |
parent | 43499134f50a77844f0503df2c995f43b858f4c3 (diff) | |
parent | 856deb866d16e29bd65952e0289066f6078af773 (diff) |
Merge tag 'v5.9-rc5' into asoc-5.10
Linux 5.9-rc5
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 236 | ||||
-rw-r--r-- | drivers/cpufreq/p4-clockmod.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/speedstep-lib.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/tegra194-cpufreq.c | 10 | ||||
-rw-r--r-- | drivers/cpufreq/ti-cpufreq.c | 4 |
6 files changed, 160 insertions, 97 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 02ab56b2a0d8..47aa90f9a7c2 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -703,8 +703,7 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) freq = arch_freq_get_on_cpu(policy->cpu); if (freq) ret = sprintf(buf, "%u\n", freq); - else if (cpufreq_driver && cpufreq_driver->setpolicy && - cpufreq_driver->get) + else if (cpufreq_driver->setpolicy && cpufreq_driver->get) ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); else ret = sprintf(buf, "%u\n", policy->cur); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e0220a6fbc69..a827b000ef51 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -219,14 +219,13 @@ struct global_params { * @epp_policy: Last saved policy used to set EPP/EPB * @epp_default: Power on default HWP energy performance * preference/bias - * @epp_saved: Saved EPP/EPB during system suspend or CPU offline - * operation * @epp_cached Cached HWP energy-performance preference value * @hwp_req_cached: Cached value of the last HWP Request MSR * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR * @last_io_update: Last time when IO wake flag was set * @sched_flags: Store scheduler flags for possible cross CPU update * @hwp_boost_min: Last HWP boosted min performance + * @suspended: Whether or not the driver has been suspended. * * This structure stores per CPU instance data for all CPUs. */ @@ -258,13 +257,13 @@ struct cpudata { s16 epp_powersave; s16 epp_policy; s16 epp_default; - s16 epp_saved; s16 epp_cached; u64 hwp_req_cached; u64 hwp_cap_cached; u64 last_io_update; unsigned int sched_flags; u32 hwp_boost_min; + bool suspended; }; static struct cpudata **all_cpu_data; @@ -644,6 +643,8 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) { + int ret; + /* * Use the cached HWP Request MSR value, because in the active mode the * register itself may be updated by intel_pstate_hwp_boost_up() or @@ -659,7 +660,11 @@ static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) * function, so it cannot run in parallel with the update below. */ WRITE_ONCE(cpu->hwp_req_cached, value); - return wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + if (!ret) + cpu->epp_cached = epp; + + return ret; } static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, @@ -678,6 +683,14 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, else if (epp == -EINVAL) epp = epp_values[pref_index - 1]; + /* + * To avoid confusion, refuse to set EPP to any values different + * from 0 (performance) if the current policy is "performance", + * because those values would be overridden. + */ + if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) + return -EBUSY; + ret = intel_pstate_set_epp(cpu_data, epp); } else { if (epp == -EINVAL) @@ -762,10 +775,8 @@ static ssize_t store_energy_performance_preference( cpufreq_stop_governor(policy); ret = intel_pstate_set_epp(cpu, epp); err = cpufreq_start_governor(policy); - if (!ret) { - cpu->epp_cached = epp; + if (!ret) ret = err; - } } } @@ -825,7 +836,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap); - if (global.no_turbo) + if (global.no_turbo || global.turbo_disabled) *current_max = HWP_GUARANTEED_PERF(cap); else *current_max = HWP_HIGHEST_PERF(cap); @@ -859,12 +870,6 @@ static void intel_pstate_hwp_set(unsigned int cpu) cpu_data->epp_policy = cpu_data->policy; - if (cpu_data->epp_saved >= 0) { - epp = cpu_data->epp_saved; - cpu_data->epp_saved = -EINVAL; - goto update_epp; - } - if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { epp = intel_pstate_get_epp(cpu_data, value); cpu_data->epp_powersave = epp; @@ -891,7 +896,6 @@ static void intel_pstate_hwp_set(unsigned int cpu) epp = cpu_data->epp_powersave; } -update_epp: if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { value &= ~GENMASK_ULL(31, 24); value |= (u64)epp << 24; @@ -903,14 +907,24 @@ skip_epp: wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); } -static void intel_pstate_hwp_force_min_perf(int cpu) +static void intel_pstate_hwp_offline(struct cpudata *cpu) { - u64 value; + u64 value = READ_ONCE(cpu->hwp_req_cached); int min_perf; - value = all_cpu_data[cpu]->hwp_req_cached; + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { + /* + * In case the EPP has been set to "performance" by the + * active mode "performance" scaling algorithm, replace that + * temporary value with the cached EPP one. + */ + value &= ~GENMASK_ULL(31, 24); + value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); + WRITE_ONCE(cpu->hwp_req_cached, value); + } + value &= ~GENMASK_ULL(31, 0); - min_perf = HWP_LOWEST_PERF(all_cpu_data[cpu]->hwp_cap_cached); + min_perf = HWP_LOWEST_PERF(cpu->hwp_cap_cached); /* Set hwp_max = hwp_min */ value |= HWP_MAX_PERF(min_perf); @@ -920,19 +934,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu) if (boot_cpu_has(X86_FEATURE_HWP_EPP)) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); - wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); -} - -static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) -{ - struct cpudata *cpu_data = all_cpu_data[policy->cpu]; - - if (!hwp_active) - return 0; - - cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0); - - return 0; + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); } #define POWER_CTL_EE_ENABLE 1 @@ -959,8 +961,28 @@ static void set_power_ctl_ee_state(bool input) static void intel_pstate_hwp_enable(struct cpudata *cpudata); +static void intel_pstate_hwp_reenable(struct cpudata *cpu) +{ + intel_pstate_hwp_enable(cpu); + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); +} + +static int intel_pstate_suspend(struct cpufreq_policy *policy) +{ + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d suspending\n", cpu->cpu); + + cpu->suspended = true; + + return 0; +} + static int intel_pstate_resume(struct cpufreq_policy *policy) { + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d resuming\n", cpu->cpu); /* Only restore if the system default is changed */ if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) @@ -968,18 +990,16 @@ static int intel_pstate_resume(struct cpufreq_policy *policy) else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) set_power_ctl_ee_state(false); - if (!hwp_active) - return 0; + if (cpu->suspended && hwp_active) { + mutex_lock(&intel_pstate_limits_lock); - mutex_lock(&intel_pstate_limits_lock); + /* Re-enable HWP, because "online" has not done that. */ + intel_pstate_hwp_reenable(cpu); - if (policy->cpu == 0) - intel_pstate_hwp_enable(all_cpu_data[policy->cpu]); + mutex_unlock(&intel_pstate_limits_lock); + } - all_cpu_data[policy->cpu]->epp_policy = 0; - intel_pstate_hwp_set(policy->cpu); - - mutex_unlock(&intel_pstate_limits_lock); + cpu->suspended = false; return 0; } @@ -1428,7 +1448,6 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); - cpudata->epp_policy = 0; if (cpudata->epp_default == -EINVAL) cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); } @@ -2097,25 +2116,31 @@ static int intel_pstate_init_cpu(unsigned int cpunum) all_cpu_data[cpunum] = cpu; - cpu->epp_default = -EINVAL; - cpu->epp_powersave = -EINVAL; - cpu->epp_saved = -EINVAL; - } - - cpu = all_cpu_data[cpunum]; + cpu->cpu = cpunum; - cpu->cpu = cpunum; + cpu->epp_default = -EINVAL; - if (hwp_active) { - const struct x86_cpu_id *id; + if (hwp_active) { + const struct x86_cpu_id *id; - intel_pstate_hwp_enable(cpu); + intel_pstate_hwp_enable(cpu); - id = x86_match_cpu(intel_pstate_hwp_boost_ids); - if (id && intel_pstate_acpi_pm_profile_server()) - hwp_boost = true; + id = x86_match_cpu(intel_pstate_hwp_boost_ids); + if (id && intel_pstate_acpi_pm_profile_server()) + hwp_boost = true; + } + } else if (hwp_active) { + /* + * Re-enable HWP in case this happens after a resume from ACPI + * S3 if the CPU was offline during the whole system/resume + * cycle. + */ + intel_pstate_hwp_reenable(cpu); } + cpu->epp_powersave = -EINVAL; + cpu->epp_policy = 0; + intel_pstate_get_cpu_pstates(cpu); pr_debug("controlling: cpu %d\n", cpunum); @@ -2296,28 +2321,61 @@ static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) return 0; } -static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) +static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) { + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d going offline\n", cpu->cpu); + + if (cpu->suspended) + return 0; + + /* + * If the CPU is an SMT thread and it goes offline with the performance + * settings different from the minimum, it will prevent its sibling + * from getting to lower performance levels, so force the minimum + * performance on CPU offline to prevent that from happening. + */ if (hwp_active) - intel_pstate_hwp_force_min_perf(policy->cpu); + intel_pstate_hwp_offline(cpu); else - intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); + intel_pstate_set_min_pstate(cpu); + + intel_pstate_exit_perf_limits(policy); + + return 0; +} + +static int intel_pstate_cpu_online(struct cpufreq_policy *policy) +{ + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d going online\n", cpu->cpu); + + intel_pstate_init_acpi_perf_limits(policy); + + if (hwp_active) { + /* + * Re-enable HWP and clear the "suspended" flag to let "resume" + * know that it need not do that. + */ + intel_pstate_hwp_reenable(cpu); + cpu->suspended = false; + } + + return 0; } static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) { - pr_debug("CPU %d exiting\n", policy->cpu); + pr_debug("CPU %d stopping\n", policy->cpu); intel_pstate_clear_update_util_hook(policy->cpu); - if (hwp_active) - intel_pstate_hwp_save_state(policy); - - intel_cpufreq_stop_cpu(policy); } static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) { - intel_pstate_exit_perf_limits(policy); + pr_debug("CPU %d exiting\n", policy->cpu); policy->fast_switch_possible = false; @@ -2378,6 +2436,12 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) */ policy->policy = CPUFREQ_POLICY_POWERSAVE; + if (hwp_active) { + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + cpu->epp_cached = intel_pstate_get_epp(cpu, 0); + } + return 0; } @@ -2385,11 +2449,13 @@ static struct cpufreq_driver intel_pstate = { .flags = CPUFREQ_CONST_LOOPS, .verify = intel_pstate_verify_policy, .setpolicy = intel_pstate_set_policy, - .suspend = intel_pstate_hwp_save_state, + .suspend = intel_pstate_suspend, .resume = intel_pstate_resume, .init = intel_pstate_cpu_init, .exit = intel_pstate_cpu_exit, .stop_cpu = intel_pstate_stop_cpu, + .offline = intel_pstate_cpu_offline, + .online = intel_pstate_cpu_online, .update_limits = intel_pstate_update_limits, .name = "intel_pstate", }; @@ -2585,7 +2651,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); WRITE_ONCE(cpu->hwp_req_cached, value); - cpu->epp_cached = (value & GENMASK_ULL(31, 24)) >> 24; + cpu->epp_cached = intel_pstate_get_epp(cpu, value); } else { turbo_max = cpu->pstate.turbo_pstate; policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; @@ -2644,7 +2710,10 @@ static struct cpufreq_driver intel_cpufreq = { .fast_switch = intel_cpufreq_fast_switch, .init = intel_cpufreq_cpu_init, .exit = intel_cpufreq_cpu_exit, - .stop_cpu = intel_cpufreq_stop_cpu, + .offline = intel_pstate_cpu_offline, + .online = intel_pstate_cpu_online, + .suspend = intel_pstate_suspend, + .resume = intel_pstate_resume, .update_limits = intel_pstate_update_limits, .name = "intel_cpufreq", }; @@ -2667,9 +2736,6 @@ static void intel_pstate_driver_cleanup(void) } put_online_cpus(); - if (intel_pstate_driver == &intel_pstate) - intel_pstate_sysfs_hide_hwp_dynamic_boost(); - intel_pstate_driver = NULL; } @@ -2695,14 +2761,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) return 0; } -static int intel_pstate_unregister_driver(void) -{ - cpufreq_unregister_driver(intel_pstate_driver); - intel_pstate_driver_cleanup(); - - return 0; -} - static ssize_t intel_pstate_show_status(char *buf) { if (!intel_pstate_driver) @@ -2714,20 +2772,23 @@ static ssize_t intel_pstate_show_status(char *buf) static int intel_pstate_update_status(const char *buf, size_t size) { - int ret; + if (size == 3 && !strncmp(buf, "off", size)) { + if (!intel_pstate_driver) + return -EINVAL; + + if (hwp_active) + return -EBUSY; - if (size == 3 && !strncmp(buf, "off", size)) - return intel_pstate_driver ? - intel_pstate_unregister_driver() : -EINVAL; + cpufreq_unregister_driver(intel_pstate_driver); + intel_pstate_driver_cleanup(); + } if (size == 6 && !strncmp(buf, "active", size)) { if (intel_pstate_driver) { if (intel_pstate_driver == &intel_pstate) return 0; - ret = intel_pstate_unregister_driver(); - if (ret) - return ret; + cpufreq_unregister_driver(intel_pstate_driver); } return intel_pstate_register_driver(&intel_pstate); @@ -2738,9 +2799,8 @@ static int intel_pstate_update_status(const char *buf, size_t size) if (intel_pstate_driver == &intel_cpufreq) return 0; - ret = intel_pstate_unregister_driver(); - if (ret) - return ret; + cpufreq_unregister_driver(intel_pstate_driver); + intel_pstate_sysfs_hide_hwp_dynamic_boost(); } return intel_pstate_register_driver(&intel_cpufreq); diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index bb61677c11c7..ef0a3216a386 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -129,7 +129,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); case 0x0D: /* Pentium M (Dothan) */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; - /* fall through */ + fallthrough; case 0x09: /* Pentium M (Banias) */ return speedstep_get_frequency(SPEEDSTEP_CPU_PM); } diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 5c4f8f07c5a6..a13a2d1e444e 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -366,7 +366,7 @@ enum speedstep_processor speedstep_detect_processor(void) } else return SPEEDSTEP_CPU_PIII_C; } - /* fall through */ + fallthrough; default: return 0; } diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index bae527e507e0..e1d931c457a7 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -56,9 +56,11 @@ struct read_counters_work { static struct workqueue_struct *read_counters_wq; -static enum cluster get_cpu_cluster(u8 cpu) +static void get_cpu_cluster(void *cluster) { - return MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); + u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; + + *((uint32_t *)cluster) = MPIDR_AFFINITY_LEVEL(mpidr, 1); } /* @@ -186,8 +188,10 @@ static unsigned int tegra194_get_speed(u32 cpu) static int tegra194_cpufreq_init(struct cpufreq_policy *policy) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); - int cl = get_cpu_cluster(policy->cpu); u32 cpu; + u32 cl; + + smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true); if (cl >= data->num_clusters) return -EINVAL; diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index ab0de27539ad..8f9fdd864391 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -86,11 +86,11 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data, case DRA76_EFUSE_HAS_PLUS_MPU_OPP: case DRA76_EFUSE_HAS_ALL_MPU_OPP: calculated_efuse |= DRA76_EFUSE_PLUS_MPU_OPP; - /* Fall through */ + fallthrough; case DRA7_EFUSE_HAS_ALL_MPU_OPP: case DRA7_EFUSE_HAS_HIGH_MPU_OPP: calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP; - /* Fall through */ + fallthrough; case DRA7_EFUSE_HAS_OD_MPU_OPP: calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP; } |