diff options
-rw-r--r-- | Documentation/devicetree/bindings/power/power_domain.txt | 2 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/power/rockchip-io-domain.txt | 14 | ||||
-rw-r--r-- | Documentation/power/devices.txt | 7 | ||||
-rw-r--r-- | Documentation/power/runtime_pm.txt | 4 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 386 | ||||
-rw-r--r-- | drivers/base/power/main.c | 2 | ||||
-rw-r--r-- | drivers/power/avs/Kconfig | 2 | ||||
-rw-r--r-- | drivers/power/avs/rockchip-io-domain.c | 59 | ||||
-rw-r--r-- | include/linux/pm_domain.h | 9 | ||||
-rw-r--r-- | include/linux/pm_runtime.h | 6 | ||||
-rw-r--r-- | kernel/power/Kconfig | 10 | ||||
-rw-r--r-- | kernel/power/suspend.c | 2 | ||||
-rw-r--r-- | kernel/power/wakelock.c | 18 |
13 files changed, 186 insertions, 335 deletions
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt index 0f8ed3710c66..025b5e7df61c 100644 --- a/Documentation/devicetree/bindings/power/power_domain.txt +++ b/Documentation/devicetree/bindings/power/power_domain.txt @@ -48,7 +48,7 @@ Example 2: #power-domain-cells = <1>; }; - child: power-controller@12340000 { + child: power-controller@12341000 { compatible = "foo,power-controller"; reg = <0x12341000 0x1000>; power-domains = <&parent 0>; diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt index 8b70db103ca7..b8627e763dba 100644 --- a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt +++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt @@ -33,6 +33,8 @@ Required properties: - compatible: should be one of: - "rockchip,rk3188-io-voltage-domain" for rk3188 - "rockchip,rk3288-io-voltage-domain" for rk3288 + - "rockchip,rk3368-io-voltage-domain" for rk3368 + - "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains - rockchip,grf: phandle to the syscon managing the "general register files" @@ -64,6 +66,18 @@ Possible supplies for rk3288: - sdcard-supply: The supply connected to SDMMC0_VDD. - wifi-supply: The supply connected to APIO3_VDD. Also known as SDIO0. +Possible supplies for rk3368: +- audio-supply: The supply connected to APIO3_VDD. +- dvp-supply: The supply connected to DVPIO_VDD. +- flash0-supply: The supply connected to FLASH0_VDD. Typically for eMMC +- gpio30-supply: The supply connected to APIO1_VDD. +- gpio1830 The supply connected to APIO4_VDD. +- sdcard-supply: The supply connected to SDMMC0_VDD. +- wifi-supply: The supply connected to APIO2_VDD. Also known as SDIO0. + +Possible supplies for rk3368 pmu-domains: +- pmu-supply: The supply connected to PMUIO_VDD. +- vop-supply: The supply connected to LCDC_VDD. Example: diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index d172bce0fd49..8ba6625fdd63 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt @@ -341,6 +341,13 @@ the phases are: and is entirely responsible for bringing the device back to the functional state as appropriate. + Note that this direct-complete procedure applies even if the device is + disabled for runtime PM; only the runtime-PM status matters. It follows + that if a device has system-sleep callbacks but does not support runtime + PM, then its prepare callback must never return a positive value. This + is because all devices are initially set to runtime-suspended with + runtime PM disabled. + 2. The suspend methods should quiesce the device to stop it from performing I/O. They also may save the device registers and put it into the appropriate low-power state, depending on the bus type the device is on, diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index e76dc0ad4d2b..0784bc3a2ab5 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -445,10 +445,6 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: bool pm_runtime_status_suspended(struct device *dev); - return true if the device's runtime PM status is 'suspended' - bool pm_runtime_suspended_if_enabled(struct device *dev); - - return true if the device's runtime PM status is 'suspended' and its - 'power.disable_depth' field is equal to 1 - void pm_runtime_allow(struct device *dev); - set the power.runtime_auto flag for the device and decrease its usage counter (used by the /sys/devices/.../power/control interface to diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 0ee43c1056e0..416720159e96 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -114,8 +114,12 @@ static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) stop_latency_ns, "stop"); } -static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) +static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev, + bool timed) { + if (!timed) + return GENPD_DEV_CALLBACK(genpd, int, start, dev); + return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, start_latency_ns, "start"); } @@ -136,41 +140,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } -static void genpd_acquire_lock(struct generic_pm_domain *genpd) -{ - DEFINE_WAIT(wait); - - mutex_lock(&genpd->lock); - /* - * Wait for the domain to transition into either the active, - * or the power off state. - */ - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - if (genpd->status == GPD_STATE_ACTIVE - || genpd->status == GPD_STATE_POWER_OFF) - break; - mutex_unlock(&genpd->lock); - - schedule(); - - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); -} - -static void genpd_release_lock(struct generic_pm_domain *genpd) -{ - mutex_unlock(&genpd->lock); -} - -static void genpd_set_active(struct generic_pm_domain *genpd) -{ - if (genpd->resume_count == 0) - genpd->status = GPD_STATE_ACTIVE; -} - static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) { s64 usecs64; @@ -251,35 +220,14 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) * resume a device belonging to it. */ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) { struct gpd_link *link; - DEFINE_WAIT(wait); int ret = 0; - /* If the domain's master is being waited for, we have to wait too. */ - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - if (genpd->status != GPD_STATE_WAIT_MASTER) - break; - mutex_unlock(&genpd->lock); - - schedule(); - - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); - if (genpd->status == GPD_STATE_ACTIVE || (genpd->prepared_count > 0 && genpd->suspend_power_off)) return 0; - if (genpd->status != GPD_STATE_POWER_OFF) { - genpd_set_active(genpd); - return 0; - } - if (genpd->cpuidle_data) { cpuidle_pause_and_lock(); genpd->cpuidle_data->idle_state->disabled = true; @@ -294,20 +242,8 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) */ list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_inc(link->master); - genpd->status = GPD_STATE_WAIT_MASTER; - - mutex_unlock(&genpd->lock); ret = pm_genpd_poweron(link->master); - - mutex_lock(&genpd->lock); - - /* - * The "wait for parent" status is guaranteed not to change - * while the master is powering on. - */ - genpd->status = GPD_STATE_POWER_OFF; - wake_up_all(&genpd->status_wait_queue); if (ret) { genpd_sd_counter_dec(link->master); goto err; @@ -319,8 +255,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) goto err; out: - genpd_set_active(genpd); - + genpd->status = GPD_STATE_ACTIVE; return 0; err: @@ -356,20 +291,18 @@ int pm_genpd_name_poweron(const char *domain_name) return genpd ? pm_genpd_poweron(genpd) : -EINVAL; } -static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, - struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, start, dev); -} - static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, save_state_latency_ns, "state save"); } -static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +static int genpd_restore_dev(struct generic_pm_domain *genpd, + struct device *dev, bool timed) { + if (!timed) + return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); + return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, restore_state_latency_ns, "state restore"); @@ -416,89 +349,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, } /** - * __pm_genpd_save_device - Save the pre-suspend state of a device. - * @pdd: Domain data of the device to save the state of. - * @genpd: PM domain the device belongs to. - */ -static int __pm_genpd_save_device(struct pm_domain_data *pdd, - struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) -{ - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - struct device *dev = pdd->dev; - int ret = 0; - - if (gpd_data->need_restore > 0) - return 0; - - /* - * If the value of the need_restore flag is still unknown at this point, - * we trust that pm_genpd_poweroff() has verified that the device is - * already runtime PM suspended. - */ - if (gpd_data->need_restore < 0) { - gpd_data->need_restore = 1; - return 0; - } - - mutex_unlock(&genpd->lock); - - genpd_start_dev(genpd, dev); - ret = genpd_save_dev(genpd, dev); - genpd_stop_dev(genpd, dev); - - mutex_lock(&genpd->lock); - - if (!ret) - gpd_data->need_restore = 1; - - return ret; -} - -/** - * __pm_genpd_restore_device - Restore the pre-suspend state of a device. - * @pdd: Domain data of the device to restore the state of. - * @genpd: PM domain the device belongs to. - */ -static void __pm_genpd_restore_device(struct pm_domain_data *pdd, - struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) -{ - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - struct device *dev = pdd->dev; - int need_restore = gpd_data->need_restore; - - gpd_data->need_restore = 0; - mutex_unlock(&genpd->lock); - - genpd_start_dev(genpd, dev); - - /* - * Call genpd_restore_dev() for recently added devices too (need_restore - * is negative then). - */ - if (need_restore) - genpd_restore_dev(genpd, dev); - - mutex_lock(&genpd->lock); -} - -/** - * genpd_abort_poweroff - Check if a PM domain power off should be aborted. - * @genpd: PM domain to check. - * - * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during - * a "power off" operation, which means that a "power on" has occured in the - * meantime, or if its resume_count field is different from zero, which means - * that one of its devices has been resumed in the meantime. - */ -static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) -{ - return genpd->status == GPD_STATE_WAIT_MASTER - || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; -} - -/** * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). * @genpd: PM domait to power off. * @@ -515,34 +365,26 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) * @genpd: PM domain to power down. * * If all of the @genpd's devices have been suspended and all of its subdomains - * have been powered down, run the runtime suspend callbacks provided by all of - * the @genpd's devices' drivers and remove power from @genpd. + * have been powered down, remove power from @genpd. */ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) { struct pm_domain_data *pdd; struct gpd_link *link; - unsigned int not_suspended; - int ret = 0; + unsigned int not_suspended = 0; - start: /* * Do not try to power off the domain in the following situations: * (1) The domain is already in the "power off" state. - * (2) The domain is waiting for its master to power up. - * (3) One of the domain's devices is being resumed right now. - * (4) System suspend is in progress. + * (2) System suspend is in progress. */ if (genpd->status == GPD_STATE_POWER_OFF - || genpd->status == GPD_STATE_WAIT_MASTER - || genpd->resume_count > 0 || genpd->prepared_count > 0) + || genpd->prepared_count > 0) return 0; if (atomic_read(&genpd->sd_count) > 0) return -EBUSY; - not_suspended = 0; list_for_each_entry(pdd, &genpd->dev_list, list_node) { enum pm_qos_flags_status stat; @@ -560,41 +402,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) if (not_suspended > genpd->in_progress) return -EBUSY; - if (genpd->poweroff_task) { - /* - * Another instance of pm_genpd_poweroff() is executing - * callbacks, so tell it to start over and return. - */ - genpd->status = GPD_STATE_REPEAT; - return 0; - } - if (genpd->gov && genpd->gov->power_down_ok) { if (!genpd->gov->power_down_ok(&genpd->domain)) return -EAGAIN; } - genpd->status = GPD_STATE_BUSY; - genpd->poweroff_task = current; - - list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { - ret = atomic_read(&genpd->sd_count) == 0 ? - __pm_genpd_save_device(pdd, genpd) : -EBUSY; - - if (genpd_abort_poweroff(genpd)) - goto out; - - if (ret) { - genpd_set_active(genpd); - goto out; - } - - if (genpd->status == GPD_STATE_REPEAT) { - genpd->poweroff_task = NULL; - goto start; - } - } - if (genpd->cpuidle_data) { /* * If cpuidle_data is set, cpuidle should turn the domain off @@ -607,14 +419,14 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) cpuidle_pause_and_lock(); genpd->cpuidle_data->idle_state->disabled = false; cpuidle_resume_and_unlock(); - goto out; + return 0; } if (genpd->power_off) { - if (atomic_read(&genpd->sd_count) > 0) { - ret = -EBUSY; - goto out; - } + int ret; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; /* * If sd_count > 0 at this point, one of the subdomains hasn't @@ -625,10 +437,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) * happen very often). */ ret = genpd_power_off(genpd, true); - if (ret == -EBUSY) { - genpd_set_active(genpd); - goto out; - } + if (ret) + return ret; } genpd->status = GPD_STATE_POWER_OFF; @@ -638,10 +448,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) genpd_queue_power_off_work(link->master); } - out: - genpd->poweroff_task = NULL; - wake_up_all(&genpd->status_wait_queue); - return ret; + return 0; } /** @@ -654,9 +461,9 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); pm_genpd_poweroff(genpd); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); } /** @@ -670,7 +477,6 @@ static void genpd_power_off_work_fn(struct work_struct *work) static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; - struct generic_pm_domain_data *gpd_data; bool (*stop_ok)(struct device *__dev); int ret; @@ -684,10 +490,16 @@ static int pm_genpd_runtime_suspend(struct device *dev) if (stop_ok && !stop_ok(dev)) return -EBUSY; - ret = genpd_stop_dev(genpd, dev); + ret = genpd_save_dev(genpd, dev); if (ret) return ret; + ret = genpd_stop_dev(genpd, dev); + if (ret) { + genpd_restore_dev(genpd, dev, true); + return ret; + } + /* * If power.irq_safe is set, this routine will be run with interrupts * off, so it can't use mutexes. @@ -696,16 +508,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) return 0; mutex_lock(&genpd->lock); - - /* - * If we have an unknown state of the need_restore flag, it means none - * of the runtime PM callbacks has been invoked yet. Let's update the - * flag to reflect that the current state is active. - */ - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - if (gpd_data->need_restore < 0) - gpd_data->need_restore = 0; - genpd->in_progress++; pm_genpd_poweroff(genpd); genpd->in_progress--; @@ -725,8 +527,8 @@ static int pm_genpd_runtime_suspend(struct device *dev) static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; - DEFINE_WAIT(wait); int ret; + bool timed = true; dev_dbg(dev, "%s()\n", __func__); @@ -735,39 +537,21 @@ static int pm_genpd_runtime_resume(struct device *dev) return -EINVAL; /* If power.irq_safe, the PM domain is never powered off. */ - if (dev->power.irq_safe) - return genpd_start_dev_no_timing(genpd, dev); + if (dev->power.irq_safe) { + timed = false; + goto out; + } mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); - if (ret) { - mutex_unlock(&genpd->lock); - return ret; - } - genpd->status = GPD_STATE_BUSY; - genpd->resume_count++; - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - /* - * If current is the powering off task, we have been called - * reentrantly from one of the device callbacks, so we should - * not wait. - */ - if (!genpd->poweroff_task || genpd->poweroff_task == current) - break; - mutex_unlock(&genpd->lock); + mutex_unlock(&genpd->lock); - schedule(); + if (ret) + return ret; - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); - __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); - genpd->resume_count--; - genpd_set_active(genpd); - wake_up_all(&genpd->status_wait_queue); - mutex_unlock(&genpd->lock); + out: + genpd_start_dev(genpd, dev, timed); + genpd_restore_dev(genpd, dev, timed); return 0; } @@ -883,7 +667,7 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, { struct gpd_link *link; - if (genpd->status != GPD_STATE_POWER_OFF) + if (genpd->status == GPD_STATE_ACTIVE) return; list_for_each_entry(link, &genpd->slave_links, slave_node) { @@ -960,14 +744,14 @@ static int pm_genpd_prepare(struct device *dev) if (resume_needed(dev, genpd)) pm_runtime_resume(dev); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count++ == 0) { genpd->suspended_count = 0; genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; } - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); if (genpd->suspend_power_off) { pm_runtime_put_noidle(dev); @@ -1102,7 +886,7 @@ static int pm_genpd_resume_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); genpd->suspended_count--; - return genpd_start_dev(genpd, dev); + return genpd_start_dev(genpd, dev, true); } /** @@ -1230,7 +1014,7 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true); } /** @@ -1324,7 +1108,7 @@ static int pm_genpd_restore_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); - return genpd_start_dev(genpd, dev); + return genpd_start_dev(genpd, dev, true); } /** @@ -1440,7 +1224,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, gpd_data->td = *td; gpd_data->base.dev = dev; - gpd_data->need_restore = -1; gpd_data->td.constraint_changed = true; gpd_data->td.effective_constraint_ns = -1; gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; @@ -1502,7 +1285,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1519,7 +1302,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); if (ret) genpd_free_dev_data(dev, gpd_data); @@ -1563,7 +1346,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, gpd_data = to_gpd_data(pdd); dev_pm_qos_remove_notifier(dev, &gpd_data->nb); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1578,14 +1361,14 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, list_del_init(&pdd->list_node); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); genpd_free_dev_data(dev, gpd_data); return 0; out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); dev_pm_qos_add_notifier(dev, &gpd_data->nb); return ret; @@ -1606,17 +1389,9 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, || genpd == subdomain) return -EINVAL; - start: - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); - if (subdomain->status != GPD_STATE_POWER_OFF - && subdomain->status != GPD_STATE_ACTIVE) { - mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); - goto start; - } - if (genpd->status == GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_POWER_OFF) { ret = -EINVAL; @@ -1644,7 +1419,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, out: mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; } @@ -1692,8 +1467,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; - start: - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); list_for_each_entry(link, &genpd->master_links, master_node) { if (link->slave != subdomain) @@ -1701,13 +1475,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); - if (subdomain->status != GPD_STATE_POWER_OFF - && subdomain->status != GPD_STATE_ACTIVE) { - mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); - goto start; - } - list_del(&link->master_node); list_del(&link->slave_node); kfree(link); @@ -1720,7 +1487,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, break; } - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; } @@ -1744,7 +1511,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) if (IS_ERR_OR_NULL(genpd) || state < 0) return -EINVAL; - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->cpuidle_data) { ret = -EEXIST; @@ -1775,7 +1542,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) genpd_recalc_cpu_exit_latency(genpd); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; err: @@ -1812,7 +1579,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) if (IS_ERR_OR_NULL(genpd)) return -EINVAL; - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); cpuidle_data = genpd->cpuidle_data; if (!cpuidle_data) { @@ -1830,7 +1597,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) kfree(cpuidle_data); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; } @@ -1912,9 +1679,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->in_progress = 0; atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; - init_waitqueue_head(&genpd->status_wait_queue); - genpd->poweroff_task = NULL; - genpd->resume_count = 0; genpd->device_count = 0; genpd->max_off_time_ns = -1; genpd->max_off_time_changed = true; @@ -1952,6 +1716,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd, list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); } +EXPORT_SYMBOL_GPL(pm_genpd_init); #ifdef CONFIG_PM_GENERIC_DOMAINS_OF /* @@ -2125,7 +1890,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); /** * genpd_dev_pm_detach - Detach a device from its PM domain. - * @dev: Device to attach. + * @dev: Device to detach. * @power_off: Currently not used * * Try to locate a corresponding generic PM domain, which the device was @@ -2183,7 +1948,10 @@ static void genpd_dev_pm_sync(struct device *dev) * Both generic and legacy Samsung-specific DT bindings are supported to keep * backwards compatibility with existing DTBs. * - * Returns 0 on successfully attached PM domain or negative error code. + * Returns 0 on successfully attached PM domain or negative error code. Note + * that if a power-domain exists for the device, but it cannot be found or + * turned on, then return -EPROBE_DEFER to ensure that the device is not + * probed and to re-try again later. */ int genpd_dev_pm_attach(struct device *dev) { @@ -2220,7 +1988,7 @@ int genpd_dev_pm_attach(struct device *dev) dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); of_node_put(dev->of_node); - return PTR_ERR(pd); + return -EPROBE_DEFER; } dev_dbg(dev, "adding to PM domain %s\n", pd->name); @@ -2238,14 +2006,15 @@ int genpd_dev_pm_attach(struct device *dev) dev_err(dev, "failed to add to PM domain %s: %d", pd->name, ret); of_node_put(dev->of_node); - return ret; + goto out; } dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - pm_genpd_poweron(pd); + ret = pm_genpd_poweron(pd); - return 0; +out: + return ret ? -EPROBE_DEFER : 0; } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ @@ -2293,9 +2062,6 @@ static int pm_genpd_summary_one(struct seq_file *s, { static const char * const status_lookup[] = { [GPD_STATE_ACTIVE] = "on", - [GPD_STATE_WAIT_MASTER] = "wait-master", - [GPD_STATE_BUSY] = "busy", - [GPD_STATE_REPEAT] = "off-in-progress", [GPD_STATE_POWER_OFF] = "off" }; struct pm_domain_data *pm_data; @@ -2309,7 +2075,7 @@ static int pm_genpd_summary_one(struct seq_file *s, if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) goto exit; - seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); + seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); /* * Modifications on the list require holding locks on both @@ -2344,8 +2110,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) struct generic_pm_domain *genpd; int ret = 0; - seq_puts(s, " domain status slaves\n"); - seq_puts(s, " /device runtime status\n"); + seq_puts(s, "domain status slaves\n"); + seq_puts(s, " /device runtime status\n"); seq_puts(s, "----------------------------------------------------------------------\n"); ret = mutex_lock_interruptible(&gpd_list_lock); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 30b7bbfdc558..1710c26ba097 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1377,7 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); - if (pm_runtime_suspended_if_enabled(dev)) + if (pm_runtime_status_suspended(dev)) goto Complete; pm_runtime_enable(dev); diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig index 7f3d389bd601..a67eeace6a89 100644 --- a/drivers/power/avs/Kconfig +++ b/drivers/power/avs/Kconfig @@ -13,7 +13,7 @@ menuconfig POWER_AVS config ROCKCHIP_IODOMAIN tristate "Rockchip IO domain support" - depends on ARCH_ROCKCHIP && OF + depends on POWER_AVS && ARCH_ROCKCHIP && OF help Say y here to enable support io domains on Rockchip SoCs. It is necessary for the io domain setting of the SoC to match the diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c index 3ae35d0590d2..2e300028f0f7 100644 --- a/drivers/power/avs/rockchip-io-domain.c +++ b/drivers/power/avs/rockchip-io-domain.c @@ -43,6 +43,10 @@ #define RK3288_SOC_CON2_FLASH0 BIT(7) #define RK3288_SOC_FLASH_SUPPLY_NUM 2 +#define RK3368_SOC_CON15 0x43c +#define RK3368_SOC_CON15_FLASH0 BIT(14) +#define RK3368_SOC_FLASH_SUPPLY_NUM 2 + struct rockchip_iodomain; /** @@ -158,6 +162,25 @@ static void rk3288_iodomain_init(struct rockchip_iodomain *iod) dev_warn(iod->dev, "couldn't update flash0 ctrl\n"); } +static void rk3368_iodomain_init(struct rockchip_iodomain *iod) +{ + int ret; + u32 val; + + /* if no flash supply we should leave things alone */ + if (!iod->supplies[RK3368_SOC_FLASH_SUPPLY_NUM].reg) + return; + + /* + * set flash0 iodomain to also use this framework + * instead of a special gpio. + */ + val = RK3368_SOC_CON15_FLASH0 | (RK3368_SOC_CON15_FLASH0 << 16); + ret = regmap_write(iod->grf, RK3368_SOC_CON15, val); + if (ret < 0) + dev_warn(iod->dev, "couldn't update flash0 ctrl\n"); +} + /* * On the rk3188 the io-domains are handled by a shared register with the * lower 8 bits being still being continuing drive-strength settings. @@ -201,6 +224,34 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3288 = { .init = rk3288_iodomain_init, }; +static const struct rockchip_iodomain_soc_data soc_data_rk3368 = { + .grf_offset = 0x900, + .supply_names = { + NULL, /* reserved */ + "dvp", /* DVPIO_VDD */ + "flash0", /* FLASH0_VDD (emmc) */ + "wifi", /* APIO2_VDD (sdio0) */ + NULL, + "audio", /* APIO3_VDD */ + "sdcard", /* SDMMC0_VDD (sdmmc) */ + "gpio30", /* APIO1_VDD */ + "gpio1830", /* APIO4_VDD (gpujtag) */ + }, + .init = rk3368_iodomain_init, +}; + +static const struct rockchip_iodomain_soc_data soc_data_rk3368_pmu = { + .grf_offset = 0x100, + .supply_names = { + NULL, + NULL, + NULL, + NULL, + "pmu", /*PMU IO domain*/ + "vop", /*LCDC IO domain*/ + }, +}; + static const struct of_device_id rockchip_iodomain_match[] = { { .compatible = "rockchip,rk3188-io-voltage-domain", @@ -210,6 +261,14 @@ static const struct of_device_id rockchip_iodomain_match[] = { .compatible = "rockchip,rk3288-io-voltage-domain", .data = (void *)&soc_data_rk3288 }, + { + .compatible = "rockchip,rk3368-io-voltage-domain", + .data = (void *)&soc_data_rk3368 + }, + { + .compatible = "rockchip,rk3368-pmu-io-voltage-domain", + .data = (void *)&soc_data_rk3368_pmu + }, { /* sentinel */ }, }; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 681ccb053f72..b1cf7e797892 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -22,9 +22,6 @@ enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ - GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ - GPD_STATE_BUSY, /* Something is happening to the PM domain */ - GPD_STATE_REPEAT, /* Power off in progress, to be repeated */ GPD_STATE_POWER_OFF, /* PM domain is off */ }; @@ -59,9 +56,6 @@ struct generic_pm_domain { unsigned int in_progress; /* Number of devices being suspended now */ atomic_t sd_count; /* Number of subdomains with power "on" */ enum gpd_status status; /* Current state of the domain */ - wait_queue_head_t status_wait_queue; - struct task_struct *poweroff_task; /* Powering off task */ - unsigned int resume_count; /* Number of devices being resumed */ unsigned int device_count; /* Number of devices */ unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ @@ -113,7 +107,6 @@ struct generic_pm_domain_data { struct pm_domain_data base; struct gpd_timing_data td; struct notifier_block nb; - int need_restore; }; #ifdef CONFIG_PM_GENERIC_DOMAINS @@ -228,8 +221,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name) return -ENOSYS; } static inline void pm_genpd_poweroff_unused(void) {} -#define simple_qos_governor NULL -#define pm_domain_always_on_gov NULL #endif static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 30e84d48bfea..3bdbb4189780 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -98,11 +98,6 @@ static inline bool pm_runtime_status_suspended(struct device *dev) return dev->power.runtime_status == RPM_SUSPENDED; } -static inline bool pm_runtime_suspended_if_enabled(struct device *dev) -{ - return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1; -} - static inline bool pm_runtime_enabled(struct device *dev) { return !dev->power.disable_depth; @@ -164,7 +159,6 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {} static inline bool pm_runtime_suspended(struct device *dev) { return false; } static inline bool pm_runtime_active(struct device *dev) { return true; } static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } -static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; } static inline bool pm_runtime_enabled(struct device *dev) { return false; } static inline void pm_runtime_no_callbacks(struct device *dev) {} diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 9e302315e33d..02e8dfaa1ce2 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -18,6 +18,16 @@ config SUSPEND_FREEZER Turning OFF this setting is NOT recommended! If in doubt, say Y. +config SUSPEND_SKIP_SYNC + bool "Skip kernel's sys_sync() on suspend to RAM/standby" + depends on SUSPEND + depends on EXPERT + help + Skip the kernel sys_sync() before freezing user processes. + Some systems prefer not to pay this cost on every invocation + of suspend, or they are content with invoking sync() from + user-space before invoking suspend. Say Y if that's your case. + config HIBERNATE_CALLBACKS bool diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 53266b729fd9..7e4cda4a8dd9 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -484,11 +484,13 @@ static int enter_state(suspend_state_t state) if (state == PM_SUSPEND_FREEZE) freeze_begin(); +#ifndef CONFIG_SUSPEND_SKIP_SYNC trace_suspend_resume(TPS("sync_filesystems"), 0, true); printk(KERN_INFO "PM: Syncing filesystems ... "); sys_sync(); printk("done.\n"); trace_suspend_resume(TPS("sync_filesystems"), 0, false); +#endif pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]); error = suspend_prepare(state); diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 019069c84ff6..1896386e16bb 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -17,6 +17,7 @@ #include <linux/list.h> #include <linux/rbtree.h> #include <linux/slab.h> +#include <linux/workqueue.h> #include "power.h" @@ -83,7 +84,9 @@ static inline void decrement_wakelocks_number(void) {} #define WL_GC_COUNT_MAX 100 #define WL_GC_TIME_SEC 300 +static void __wakelocks_gc(struct work_struct *work); static LIST_HEAD(wakelocks_lru_list); +static DECLARE_WORK(wakelock_work, __wakelocks_gc); static unsigned int wakelocks_gc_count; static inline void wakelocks_lru_add(struct wakelock *wl) @@ -96,13 +99,12 @@ static inline void wakelocks_lru_most_recent(struct wakelock *wl) list_move(&wl->lru, &wakelocks_lru_list); } -static void wakelocks_gc(void) +static void __wakelocks_gc(struct work_struct *work) { struct wakelock *wl, *aux; ktime_t now; - if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) - return; + mutex_lock(&wakelocks_lock); now = ktime_get(); list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) { @@ -127,6 +129,16 @@ static void wakelocks_gc(void) } } wakelocks_gc_count = 0; + + mutex_unlock(&wakelocks_lock); +} + +static void wakelocks_gc(void) +{ + if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) + return; + + schedule_work(&wakelock_work); } #else /* !CONFIG_PM_WAKELOCKS_GC */ static inline void wakelocks_lru_add(struct wakelock *wl) {} |