diff options
author | Linus Torvalds | 2012-09-19 11:00:07 -0700 |
---|---|---|
committer | Linus Torvalds | 2012-09-19 11:00:07 -0700 |
commit | c5c473e29c641380aef4a9d1f9c39de49219980f (patch) | |
tree | 7cc1d52fa7757ecd0903fc6e86bb22188d2a8bbd /drivers/cpufreq | |
parent | 925a6f0bf8bd122d5d2429af7f0ca0fecf4ae71f (diff) | |
parent | 6889125b8b4e09c5e53e6ecab3433bed1ce198c9 (diff) |
Merge branch 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue / powernow-k8 fix from Tejun Heo:
"This is the fix for the bug where cpufreq/powernow-k8 was tripping
BUG_ON() in try_to_wake_up_local() by migrating workqueue worker to a
different CPU.
https://bugzilla.kernel.org/show_bug.cgi?id=47301
As discussed, the fix is now two parts - one to reimplement
work_on_cpu() so that it doesn't create a new kthread each time and
the actual fix which makes powernow-k8 use work_on_cpu() instead of
performing manual migration.
While pretty late in the merge cycle, both changes are on the safer
side. Jiri and I verified two existing users of work_on_cpu() and
Duncan confirmed that the powernow-k8 fix survived about 18 hours of
testing."
* 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
cpufreq/powernow-k8: workqueue user shouldn't migrate the kworker to another CPU
workqueue: reimplement work_on_cpu() using system_wq
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/powernow-k8.c | 63 |
1 files changed, 34 insertions, 29 deletions
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index c0e816468e30..1a40935c85fd 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -35,7 +35,6 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/cpumask.h> -#include <linux/sched.h> /* for current / set_cpus_allowed() */ #include <linux/io.h> #include <linux/delay.h> @@ -1139,16 +1138,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, return res; } -/* Driver entry point to switch to the target frequency */ -static int powernowk8_target(struct cpufreq_policy *pol, - unsigned targfreq, unsigned relation) +struct powernowk8_target_arg { + struct cpufreq_policy *pol; + unsigned targfreq; + unsigned relation; +}; + +static long powernowk8_target_fn(void *arg) { - cpumask_var_t oldmask; + struct powernowk8_target_arg *pta = arg; + struct cpufreq_policy *pol = pta->pol; + unsigned targfreq = pta->targfreq; + unsigned relation = pta->relation; struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); u32 checkfid; u32 checkvid; unsigned int newstate; - int ret = -EIO; + int ret; if (!data) return -EINVAL; @@ -1156,29 +1162,16 @@ static int powernowk8_target(struct cpufreq_policy *pol, checkfid = data->currfid; checkvid = data->currvid; - /* only run on specific CPU from here on. */ - /* This is poor form: use a workqueue or smp_call_function_single */ - if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) - return -ENOMEM; - - cpumask_copy(oldmask, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); - - if (smp_processor_id() != pol->cpu) { - printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); - goto err_out; - } - if (pending_bit_stuck()) { printk(KERN_ERR PFX "failing targ, change pending bit set\n"); - goto err_out; + return -EIO; } pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", pol->cpu, targfreq, pol->min, pol->max, relation); if (query_current_values_with_pending_wait(data)) - goto err_out; + return -EIO; if (cpu_family != CPU_HW_PSTATE) { pr_debug("targ: curr fid 0x%x, vid 0x%x\n", @@ -1196,7 +1189,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) - goto err_out; + return -EIO; mutex_lock(&fidvid_mutex); @@ -1209,9 +1202,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, ret = transition_frequency_fidvid(data, newstate); if (ret) { printk(KERN_ERR PFX "transition frequency failed\n"); - ret = 1; mutex_unlock(&fidvid_mutex); - goto err_out; + return 1; } mutex_unlock(&fidvid_mutex); @@ -1220,12 +1212,25 @@ static int powernowk8_target(struct cpufreq_policy *pol, data->powernow_table[newstate].index); else pol->cur = find_khz_freq_from_fid(data->currfid); - ret = 0; -err_out: - set_cpus_allowed_ptr(current, oldmask); - free_cpumask_var(oldmask); - return ret; + return 0; +} + +/* Driver entry point to switch to the target frequency */ +static int powernowk8_target(struct cpufreq_policy *pol, + unsigned targfreq, unsigned relation) +{ + struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, + .relation = relation }; + + /* + * Must run on @pol->cpu. cpufreq core is responsible for ensuring + * that we're bound to the current CPU and pol->cpu stays online. + */ + if (smp_processor_id() == pol->cpu) + return powernowk8_target_fn(&pta); + else + return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); } /* Driver entry point to verify the policy and range of frequencies */ |