diff options
author | Linus Torvalds | 2019-01-11 09:01:43 -0800 |
---|---|---|
committer | Linus Torvalds | 2019-01-11 09:01:43 -0800 |
commit | f4f31fff32252da3791e9d901adb44f963a79153 (patch) | |
tree | 1895dd290f40bcd366aefe29f907f846c956fb15 | |
parent | 385c59c7baaa4626f5c01888d50e86e5636e655e (diff) | |
parent | 343e60e52a3a093bb7baeca441789724fcac4bb5 (diff) |
Merge tag 'pm-5.0-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki:
"These fix fallout after starting to use hrtimers in the runtime PM
framework, fix a few cpufreq issues, fix a recently broken reference
to cpuidle documentation, update MAINTAINERS entries for cpufreq and
cpuidle and make the recently added system suspend and resume support
in devfreq actually work.
Specifics:
- Prevent integer overflows from occurring on 32-bit when converting
milliseconds to nanoseconds in the runtime PM framework and update
comments that still refer to jiffies in it (Vincent Guittot,
Ladislav Michl).
- Fix the SCMI cpufreq driver to always use the same frequency units
for arch_set_freq_scale() and make the scale-invariant load
tracking acutally work with this driver (Quentin Perret).
- Fix freeing of dynamic OPPs in the SCPI and SCMI cpufreq drivers
broken during the 4.20 defelopment cycle (Viresh Kumar).
- Prevent the cpufreq core from attempting to return the current
frequency of offline CPUs (Sudeep Holla).
- Add devfreq suspend and resume hooks (missed previously) to the PM
core to make the recently added system suspend and resume support
in devfreq actually work (Lukasz Luba).
- Update MAINTAINERS entries for cpufreq and cpuidle, mostly to add
references to new/current documentation to them (Rafael Wysocki).
- Fix a recently broken reference to cpuidle documentation (Otto
Sabart)"
* tag 'pm-5.0-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
PM-runtime: Fix autosuspend_delay on 32bits arch
PM-runtime: Fix 'jiffies' in comments after switch to hrtimers
cpufreq: scmi: Fix frequency invariance in slow path
doc: trace: fix reference to cpuidle documentation file
cpufreq: check if policy is inactive early in __cpufreq_get()
cpufreq: scpi/scmi: Fix freeing of dynamic OPPs
cpuidle / Documentation: Update cpuidle MAINTAINERS entry
cpufreq / Documentation: Update cpufreq MAINTAINERS entry
PM: sleep: call devfreq suspend/resume
-rw-r--r-- | Documentation/trace/coresight-cpu-debug.txt | 2 | ||||
-rw-r--r-- | MAINTAINERS | 7 | ||||
-rw-r--r-- | drivers/base/power/main.c | 3 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 11 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 12 | ||||
-rw-r--r-- | drivers/cpufreq/scmi-cpufreq.c | 8 | ||||
-rw-r--r-- | drivers/cpufreq/scpi-cpufreq.c | 4 | ||||
-rw-r--r-- | drivers/opp/core.c | 63 | ||||
-rw-r--r-- | include/linux/pm_opp.h | 5 |
9 files changed, 89 insertions, 26 deletions
diff --git a/Documentation/trace/coresight-cpu-debug.txt b/Documentation/trace/coresight-cpu-debug.txt index 89ab09e78e8d..f07e38094b40 100644 --- a/Documentation/trace/coresight-cpu-debug.txt +++ b/Documentation/trace/coresight-cpu-debug.txt @@ -165,7 +165,7 @@ Do some work... The same can also be done from an application program. Disable specific CPU's specific idle state from cpuidle sysfs (see -Documentation/cpuidle/sysfs.txt): +Documentation/admin-guide/pm/cpuidle.rst): # echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable diff --git a/MAINTAINERS b/MAINTAINERS index f3a226379e57..2cf9c1ceb502 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3951,7 +3951,7 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/ti/cpmac.c -CPU FREQUENCY DRIVERS +CPU FREQUENCY SCALING FRAMEWORK M: "Rafael J. Wysocki" <rjw@rjwysocki.net> M: Viresh Kumar <viresh.kumar@linaro.org> L: linux-pm@vger.kernel.org @@ -3959,6 +3959,8 @@ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) B: https://bugzilla.kernel.org +F: Documentation/admin-guide/pm/cpufreq.rst +F: Documentation/admin-guide/pm/intel_pstate.rst F: Documentation/cpu-freq/ F: Documentation/devicetree/bindings/cpufreq/ F: drivers/cpufreq/ @@ -4006,13 +4008,14 @@ S: Supported F: drivers/cpuidle/cpuidle-exynos.c F: arch/arm/mach-exynos/pm.c -CPUIDLE DRIVERS +CPU IDLE TIME MANAGEMENT FRAMEWORK M: "Rafael J. Wysocki" <rjw@rjwysocki.net> M: Daniel Lezcano <daniel.lezcano@linaro.org> L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git B: https://bugzilla.kernel.org +F: Documentation/admin-guide/pm/cpuidle.rst F: drivers/cpuidle/* F: include/linux/cpuidle.h diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a690fd400260..0992e67e862b 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -32,6 +32,7 @@ #include <trace/events/power.h> #include <linux/cpufreq.h> #include <linux/cpuidle.h> +#include <linux/devfreq.h> #include <linux/timer.h> #include "../base.h" @@ -1078,6 +1079,7 @@ void dpm_resume(pm_message_t state) dpm_show_time(starttime, state, 0, NULL); cpufreq_resume(); + devfreq_resume(); trace_suspend_resume(TPS("dpm_resume"), state.event, false); } @@ -1852,6 +1854,7 @@ int dpm_suspend(pm_message_t state) trace_suspend_resume(TPS("dpm_suspend"), state.event, true); might_sleep(); + devfreq_suspend(); cpufreq_suspend(); mutex_lock(&dpm_list_mtx); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 70624695b6d5..457be03b744d 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -121,7 +121,7 @@ static void pm_runtime_cancel_pending(struct device *dev) * Compute the autosuspend-delay expiration time based on the device's * power.last_busy time. If the delay has already expired or is disabled * (negative) or the power.use_autosuspend flag isn't set, return 0. - * Otherwise return the expiration time in jiffies (adjusted to be nonzero). + * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). * * This function may be called either with or without dev->power.lock held. * Either way it can be racy, since power.last_busy may be updated at any time. @@ -141,7 +141,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev) last_busy = READ_ONCE(dev->power.last_busy); - expires = last_busy + autosuspend_delay * NSEC_PER_MSEC; + expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC; if (expires <= now) expires = 0; /* Already expired. */ @@ -525,7 +525,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) * We add a slack of 25% to gather wakeups * without sacrificing the granularity. */ - u64 slack = READ_ONCE(dev->power.autosuspend_delay) * + u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * (NSEC_PER_MSEC >> 2); dev->power.timer_expires = expires; @@ -905,7 +905,10 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) spin_lock_irqsave(&dev->power.lock, flags); expires = dev->power.timer_expires; - /* If 'expire' is after 'jiffies' we've been called too early. */ + /* + * If 'expires' is after the current time, we've been called + * too early. + */ if (expires > 0 && expires < ktime_to_ns(ktime_get())) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6f23ebb395f1..e35a886e00bc 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1530,17 +1530,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) { unsigned int ret_freq = 0; - if (!cpufreq_driver->get) + if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get) return ret_freq; ret_freq = cpufreq_driver->get(policy->cpu); /* - * Updating inactive policies is invalid, so avoid doing that. Also - * if fast frequency switching is used with the given policy, the check + * If fast frequency switching is used with the given policy, the check * against policy->cur is pointless, so skip it in that case too. */ - if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) + if (policy->fast_switch_enabled) return ret_freq; if (ret_freq && policy->cur && @@ -1569,10 +1568,7 @@ unsigned int cpufreq_get(unsigned int cpu) if (policy) { down_read(&policy->rwsem); - - if (!policy_is_inactive(policy)) - ret_freq = __cpufreq_get(policy); - + ret_freq = __cpufreq_get(policy); up_read(&policy->rwsem); cpufreq_cpu_put(policy); diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 50b1551ba894..242c3370544e 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -52,9 +52,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) int ret; struct scmi_data *priv = policy->driver_data; struct scmi_perf_ops *perf_ops = handle->perf_ops; - u64 freq = policy->freq_table[index].frequency * 1000; + u64 freq = policy->freq_table[index].frequency; - ret = perf_ops->freq_set(handle, priv->domain_id, freq, false); + ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false); if (!ret) arch_set_freq_scale(policy->related_cpus, freq, policy->cpuinfo.max_freq); @@ -176,7 +176,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) out_free_priv: kfree(priv); out_free_opp: - dev_pm_opp_cpumask_remove_table(policy->cpus); + dev_pm_opp_remove_all_dynamic(cpu_dev); return ret; } @@ -188,7 +188,7 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); kfree(priv); - dev_pm_opp_cpumask_remove_table(policy->related_cpus); + dev_pm_opp_remove_all_dynamic(priv->cpu_dev); return 0; } diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 87a98ec77773..99449738faa4 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -177,7 +177,7 @@ out_free_cpufreq_table: out_free_priv: kfree(priv); out_free_opp: - dev_pm_opp_cpumask_remove_table(policy->cpus); + dev_pm_opp_remove_all_dynamic(cpu_dev); return ret; } @@ -190,7 +190,7 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) clk_put(priv->clk); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); kfree(priv); - dev_pm_opp_cpumask_remove_table(policy->related_cpus); + dev_pm_opp_remove_all_dynamic(priv->cpu_dev); return 0; } diff --git a/drivers/opp/core.c b/drivers/opp/core.c index e5507add8f04..18f1639dbc4a 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -988,11 +988,9 @@ void _opp_free(struct dev_pm_opp *opp) kfree(opp); } -static void _opp_kref_release(struct kref *kref) +static void _opp_kref_release(struct dev_pm_opp *opp, + struct opp_table *opp_table) { - struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); - struct opp_table *opp_table = opp->opp_table; - /* * Notify the changes in the availability of the operable * frequency/voltage list. @@ -1002,7 +1000,22 @@ static void _opp_kref_release(struct kref *kref) opp_debug_remove_one(opp); list_del(&opp->node); kfree(opp); +} +static void _opp_kref_release_unlocked(struct kref *kref) +{ + struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); + struct opp_table *opp_table = opp->opp_table; + + _opp_kref_release(opp, opp_table); +} + +static void _opp_kref_release_locked(struct kref *kref) +{ + struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); + struct opp_table *opp_table = opp->opp_table; + + _opp_kref_release(opp, opp_table); mutex_unlock(&opp_table->lock); } @@ -1013,10 +1026,16 @@ void dev_pm_opp_get(struct dev_pm_opp *opp) void dev_pm_opp_put(struct dev_pm_opp *opp) { - kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); + kref_put_mutex(&opp->kref, _opp_kref_release_locked, + &opp->opp_table->lock); } EXPORT_SYMBOL_GPL(dev_pm_opp_put); +static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp) +{ + kref_put(&opp->kref, _opp_kref_release_unlocked); +} + /** * dev_pm_opp_remove() - Remove an OPP from OPP table * @dev: device for which we do this operation @@ -1060,6 +1079,40 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); +/** + * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs + * @dev: device for which we do this operation + * + * This function removes all dynamically created OPPs from the opp table. + */ +void dev_pm_opp_remove_all_dynamic(struct device *dev) +{ + struct opp_table *opp_table; + struct dev_pm_opp *opp, *temp; + int count = 0; + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) + return; + + mutex_lock(&opp_table->lock); + list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) { + if (opp->dynamic) { + dev_pm_opp_put_unlocked(opp); + count++; + } + } + mutex_unlock(&opp_table->lock); + + /* Drop the references taken by dev_pm_opp_add() */ + while (count--) + dev_pm_opp_put_opp_table(opp_table); + + /* Drop the reference taken by _find_opp_table() */ + dev_pm_opp_put_opp_table(opp_table); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); + struct dev_pm_opp *_opp_allocate(struct opp_table *table) { struct dev_pm_opp *opp; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 0a2a88e5a383..b895f4e79868 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -108,6 +108,7 @@ void dev_pm_opp_put(struct dev_pm_opp *opp); int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); void dev_pm_opp_remove(struct device *dev, unsigned long freq); +void dev_pm_opp_remove_all_dynamic(struct device *dev); int dev_pm_opp_enable(struct device *dev, unsigned long freq); @@ -217,6 +218,10 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) { } +static inline void dev_pm_opp_remove_all_dynamic(struct device *dev) +{ +} + static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) { return 0; |