diff options
author | Linus Torvalds | 2020-10-14 10:45:41 -0700 |
---|---|---|
committer | Linus Torvalds | 2020-10-14 10:45:41 -0700 |
commit | 0b8417c14181595997091145342954332fa016cd (patch) | |
tree | 6a9c81a04ee42796e7b6dd4e212da5227d159d25 /drivers/base | |
parent | 15cb5469fc5fff06969832028b743cb658d1a5b5 (diff) | |
parent | 16641d81f9ff5f902d084754c84b2bde3a60bc6e (diff) |
Merge tag 'pm-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki:
"These rework the collection of cpufreq statistics to allow it to take
place if fast frequency switching is enabled in the governor, rework
the frequency invariance handling in the cpufreq core and drivers, add
new hardware support to a couple of cpufreq drivers, fix a number of
assorted issues and clean up the code all over.
Specifics:
- Rework cpufreq statistics collection to allow it to take place when
fast frequency switching is enabled in the governor (Viresh Kumar).
- Make the cpufreq core set the frequency scale on behalf of the
driver and update several cpufreq drivers accordingly (Ionela
Voinescu, Valentin Schneider).
- Add new hardware support to the STI and qcom cpufreq drivers and
improve them (Alain Volmat, Manivannan Sadhasivam).
- Fix multiple assorted issues in cpufreq drivers (Jon Hunter,
Krzysztof Kozlowski, Matthias Kaehlcke, Pali Rohár, Stephan
Gerhold, Viresh Kumar).
- Fix several assorted issues in the operating performance points
(OPP) framework (Stephan Gerhold, Viresh Kumar).
- Allow devfreq drivers to fetch devfreq instances by DT enumeration
instead of using explicit phandles and modify the devfreq core code
to support driver-specific devfreq DT bindings (Leonard Crestez,
Chanwoo Choi).
- Improve initial hardware resetting in the tegra30 devfreq driver
and clean up the tegra cpuidle driver (Dmitry Osipenko).
- Update the cpuidle core to collect state entry rejection statistics
and expose them via sysfs (Lina Iyer).
- Improve the ACPI _CST code handling diagnostics (Chen Yu).
- Update the PSCI cpuidle driver to allow the PM domain
initialization to occur in the OSI mode as well as in the PC mode
(Ulf Hansson).
- Rework the generic power domains (genpd) core code to allow domain
power off transition to be aborted in the absence of the "power
off" domain callback (Ulf Hansson).
- Fix two suspend-to-idle issues in the ACPI EC driver (Rafael
Wysocki).
- Fix the handling of timer_expires in the PM-runtime framework on
32-bit systems and the handling of device links in it (Grygorii
Strashko, Xiang Chen).
- Add IO requests batching support to the hibernate image saving and
reading code and drop a bogus get_gendisk() from there (Xiaoyi
Chen, Christoph Hellwig).
- Allow PCIe ports to be put into the D3cold power state if they are
power-manageable via ACPI (Lukas Wunner).
- Add missing header file include to a power capping driver (Pujin
Shi).
- Clean up the qcom-cpr AVS driver a bit (Liu Shixin).
- Kevin Hilman steps down as designated reviwer of adaptive voltage
scaling (AVS) drivers (Kevin Hilman)"
* tag 'pm-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (65 commits)
cpufreq: stats: Fix string format specifier mismatch
arm: disable frequency invariance for CONFIG_BL_SWITCHER
cpufreq,arm,arm64: restructure definitions of arch_set_freq_scale()
cpufreq: stats: Add memory barrier to store_reset()
cpufreq: schedutil: Simplify sugov_fast_switch()
ACPI: EC: PM: Drop ec_no_wakeup check from acpi_ec_dispatch_gpe()
ACPI: EC: PM: Flush EC work unconditionally after wakeup
PCI/ACPI: Whitelist hotplug ports for D3 if power managed by ACPI
PM: hibernate: remove the bogus call to get_gendisk() in software_resume()
cpufreq: Move traces and update to policy->cur to cpufreq core
cpufreq: stats: Enable stats for fast-switch as well
cpufreq: stats: Mark few conditionals with unlikely()
cpufreq: stats: Remove locking
cpufreq: stats: Defer stats update to cpufreq_stats_record_transition()
PM: domains: Allow to abort power off when no ->power_off() callback
PM: domains: Rename power state enums for genpd
PM / devfreq: tegra30: Improve initial hardware resetting
PM / devfreq: event: Change prototype of devfreq_event_get_edev_by_phandle function
PM / devfreq: Change prototype of devfreq_get_devfreq_by_phandle function
PM / devfreq: Add devfreq_get_devfreq_by_node function
...
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/arch_topology.c | 15 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 71 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 5 |
3 files changed, 44 insertions, 47 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 75f72d684294..c1a9e2fb634e 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -21,18 +21,27 @@ #include <linux/sched.h> #include <linux/smp.h> -__weak bool arch_freq_counters_available(struct cpumask *cpus) +bool topology_scale_freq_invariant(void) +{ + return cpufreq_supports_freq_invariance() || + arch_freq_counters_available(cpu_online_mask); +} + +__weak bool arch_freq_counters_available(const struct cpumask *cpus) { return false; } DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; -void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, - unsigned long max_freq) +void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, + unsigned long max_freq) { unsigned long scale; int i; + if (WARN_ON_ONCE(!cur_freq || !max_freq)) + return; + /* * If the use of counters for FIE is enabled, just return as we don't * want to update the scale factor with information from CPUFREQ. diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 2cb5e04cf86c..05bb4d4401b2 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -123,7 +123,7 @@ static const struct genpd_lock_ops genpd_spin_ops = { #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) #define genpd_unlock(p) p->lock_ops->unlock(p) -#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE) +#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) @@ -222,7 +222,7 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd) * out of off and so update the idle time and vice * versa. */ - if (genpd->status == GPD_STATE_ACTIVE) { + if (genpd->status == GENPD_STATE_ON) { int state_idx = genpd->state_idx; genpd->states[state_idx].idle_time = @@ -497,6 +497,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, struct pm_domain_data *pdd; struct gpd_link *link; unsigned int not_suspended = 0; + int ret; /* * Do not try to power off the domain in the following situations: @@ -544,26 +545,15 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, if (!genpd->gov) genpd->state_idx = 0; - if (genpd->power_off) { - int ret; - - if (atomic_read(&genpd->sd_count) > 0) - return -EBUSY; + /* Don't power off, if a child domain is waiting to power on. */ + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; - /* - * If sd_count > 0 at this point, one of the subdomains hasn't - * managed to call genpd_power_on() for the parent yet after - * incrementing it. In that case genpd_power_on() will wait - * for us to drop the lock, so we can call .power_off() and let - * the genpd_power_on() restore power for us (this shouldn't - * happen very often). - */ - ret = _genpd_power_off(genpd, true); - if (ret) - return ret; - } + ret = _genpd_power_off(genpd, true); + if (ret) + return ret; - genpd->status = GPD_STATE_POWER_OFF; + genpd->status = GENPD_STATE_OFF; genpd_update_accounting(genpd); list_for_each_entry(link, &genpd->child_links, child_node) { @@ -616,7 +606,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) if (ret) goto err; - genpd->status = GPD_STATE_ACTIVE; + genpd->status = GENPD_STATE_ON; genpd_update_accounting(genpd); return 0; @@ -961,7 +951,7 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, if (_genpd_power_off(genpd, false)) return; - genpd->status = GPD_STATE_POWER_OFF; + genpd->status = GENPD_STATE_OFF; list_for_each_entry(link, &genpd->child_links, child_node) { genpd_sd_counter_dec(link->parent); @@ -1007,8 +997,7 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, } _genpd_power_on(genpd, false); - - genpd->status = GPD_STATE_ACTIVE; + genpd->status = GENPD_STATE_ON; } /** @@ -1287,7 +1276,7 @@ static int genpd_restore_noirq(struct device *dev) * so make it appear as powered off to genpd_sync_power_on(), * so that it tries to power it on in case it was really off. */ - genpd->status = GPD_STATE_POWER_OFF; + genpd->status = GENPD_STATE_OFF; genpd_sync_power_on(genpd, true, 0); genpd_unlock(genpd); @@ -1777,7 +1766,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->gov = gov; INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); atomic_set(&genpd->sd_count, 0); - genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; + genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; genpd->device_count = 0; genpd->max_off_time_ns = -1; genpd->max_off_time_changed = true; @@ -2044,8 +2033,9 @@ int of_genpd_add_provider_simple(struct device_node *np, if (genpd->set_performance_state) { ret = dev_pm_opp_of_add_table(&genpd->dev); if (ret) { - dev_err(&genpd->dev, "Failed to add OPP table: %d\n", - ret); + if (ret != -EPROBE_DEFER) + dev_err(&genpd->dev, "Failed to add OPP table: %d\n", + ret); goto unlock; } @@ -2054,7 +2044,7 @@ int of_genpd_add_provider_simple(struct device_node *np, * state. */ genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); - WARN_ON(!genpd->opp_table); + WARN_ON(IS_ERR(genpd->opp_table)); } ret = genpd_add_provider(np, genpd_xlate_simple, genpd); @@ -2111,8 +2101,9 @@ int of_genpd_add_provider_onecell(struct device_node *np, if (genpd->set_performance_state) { ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); if (ret) { - dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n", - i, ret); + if (ret != -EPROBE_DEFER) + dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n", + i, ret); goto error; } @@ -2121,7 +2112,7 @@ int of_genpd_add_provider_onecell(struct device_node *np, * performance state. */ genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i); - WARN_ON(!genpd->opp_table); + WARN_ON(IS_ERR(genpd->opp_table)); } genpd->provider = &np->fwnode; @@ -2802,8 +2793,8 @@ static int genpd_summary_one(struct seq_file *s, struct generic_pm_domain *genpd) { static const char * const status_lookup[] = { - [GPD_STATE_ACTIVE] = "on", - [GPD_STATE_POWER_OFF] = "off" + [GENPD_STATE_ON] = "on", + [GENPD_STATE_OFF] = "off" }; struct pm_domain_data *pm_data; const char *kobj_path; @@ -2881,8 +2872,8 @@ static int summary_show(struct seq_file *s, void *data) static int status_show(struct seq_file *s, void *data) { static const char * const status_lookup[] = { - [GPD_STATE_ACTIVE] = "on", - [GPD_STATE_POWER_OFF] = "off" + [GENPD_STATE_ON] = "on", + [GENPD_STATE_OFF] = "off" }; struct generic_pm_domain *genpd = s->private; @@ -2895,7 +2886,7 @@ static int status_show(struct seq_file *s, void *data) if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) goto exit; - if (genpd->status == GPD_STATE_POWER_OFF) + if (genpd->status == GENPD_STATE_OFF) seq_printf(s, "%s-%u\n", status_lookup[genpd->status], genpd->state_idx); else @@ -2938,7 +2929,7 @@ static int idle_states_show(struct seq_file *s, void *data) ktime_t delta = 0; s64 msecs; - if ((genpd->status == GPD_STATE_POWER_OFF) && + if ((genpd->status == GENPD_STATE_OFF) && (genpd->state_idx == i)) delta = ktime_sub(ktime_get(), genpd->accounting_time); @@ -2961,7 +2952,7 @@ static int active_time_show(struct seq_file *s, void *data) if (ret) return -ERESTARTSYS; - if (genpd->status == GPD_STATE_ACTIVE) + if (genpd->status == GENPD_STATE_ON) delta = ktime_sub(ktime_get(), genpd->accounting_time); seq_printf(s, "%lld ms\n", ktime_to_ms( @@ -2984,7 +2975,7 @@ static int total_idle_time_show(struct seq_file *s, void *data) for (i = 0; i < genpd->state_count; i++) { - if ((genpd->status == GPD_STATE_POWER_OFF) && + if ((genpd->status == GENPD_STATE_OFF) && (genpd->state_idx == i)) delta = ktime_sub(ktime_get(), genpd->accounting_time); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 8143210a5c54..6f605f7820bb 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -291,8 +291,7 @@ static int rpm_get_suppliers(struct device *dev) device_links_read_lock_held()) { int retval; - if (!(link->flags & DL_FLAG_PM_RUNTIME) || - READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) + if (!(link->flags & DL_FLAG_PM_RUNTIME)) continue; retval = pm_runtime_get_sync(link->supplier); @@ -312,8 +311,6 @@ static void rpm_put_suppliers(struct device *dev) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, device_links_read_lock_held()) { - if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) - continue; while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put(link->supplier); |