aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/cpufreq_schedutil.c
diff options
context:
space:
mode:
authorViresh Kumar2016-11-15 13:53:23 +0530
committerRafael J. Wysocki2016-11-16 23:21:08 +0100
commit21ef57297b15a49b0c4dd4e7135c1a08e9a29a1c (patch)
tree40bbbe65652db9d337bd22904e3bbcdfe2e2c3f1 /kernel/sched/cpufreq_schedutil.c
parent02a7b1ee3baa15a98b541d8cfd156bbe1a091c20 (diff)
cpufreq: schedutil: irq-work and mutex are only used in slow path
Execute the irq-work specific initialization/exit code only when the fast path isn't available. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'kernel/sched/cpufreq_schedutil.c')
-rw-r--r--kernel/sched/cpufreq_schedutil.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index f165ba0f0766..42a220e78f00 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -390,15 +390,12 @@ static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
return NULL;
sg_policy->policy = policy;
- init_irq_work(&sg_policy->irq_work, sugov_irq_work);
- mutex_init(&sg_policy->work_lock);
raw_spin_lock_init(&sg_policy->update_lock);
return sg_policy;
}
static void sugov_policy_free(struct sugov_policy *sg_policy)
{
- mutex_destroy(&sg_policy->work_lock);
kfree(sg_policy);
}
@@ -432,6 +429,9 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
sg_policy->thread = thread;
kthread_bind_mask(thread, policy->related_cpus);
+ init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+ mutex_init(&sg_policy->work_lock);
+
wake_up_process(thread);
return 0;
@@ -445,6 +445,7 @@ static void sugov_kthread_stop(struct sugov_policy *sg_policy)
kthread_flush_worker(&sg_policy->worker);
kthread_stop(sg_policy->thread);
+ mutex_destroy(&sg_policy->work_lock);
}
static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
@@ -611,8 +612,10 @@ static void sugov_stop(struct cpufreq_policy *policy)
synchronize_sched();
- irq_work_sync(&sg_policy->irq_work);
- kthread_cancel_work_sync(&sg_policy->work);
+ if (!policy->fast_switch_enabled) {
+ irq_work_sync(&sg_policy->irq_work);
+ kthread_cancel_work_sync(&sg_policy->work);
+ }
}
static void sugov_limits(struct cpufreq_policy *policy)