aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds2013-05-16 12:03:28 -0700
committerLinus Torvalds2013-05-16 12:03:28 -0700
commit4a007ed926fd4cbb4afe4566dbfd252cc49f22fe (patch)
tree0e71b161d387881fb8c197125a3bff6e444aa300 /kernel
parentff89acc563a0bd49965674f56552ad6620415fe2 (diff)
parent1be0c25da56e860992af972a60321563ca2cfcd1 (diff)
Merge branch 'for-3.10-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue fixes from Tejun Heo: "Three more workqueue regression fixes. - Fix unbalanced unlock in trylock failure path of manage_workers(). This shouldn't happen often in the wild but is possible. - While making schedule_work() and friends inline, they become unavailable to !GPL modules. Allow !GPL modules to access basic stuff - system_wq and queue_*work_on() - so that schedule_work() and friends can be used. - During boot, the unbound NUMA support code allocates a cpumask for each possible node using alloc_cpumask_var_node(), which ends up trying to allocate node-specific memory even for offline nodes triggering BUG in the memory alloc code. Use NUMA_NO_NODE for offline nodes." * 'for-3.10-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: don't perform NUMA-aware allocations on offline nodes in wq_numa_init() workqueue: Make schedule_work() available again to non GPL modules workqueue: correct handling of the pool spin_lock
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1ae602809efb..ee8e29a2320c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -296,7 +296,7 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
struct workqueue_struct *system_wq __read_mostly;
-EXPORT_SYMBOL_GPL(system_wq);
+EXPORT_SYMBOL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_highpri_wq);
struct workqueue_struct *system_long_wq __read_mostly;
@@ -1411,7 +1411,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
local_irq_restore(flags);
return ret;
}
-EXPORT_SYMBOL_GPL(queue_work_on);
+EXPORT_SYMBOL(queue_work_on);
void delayed_work_timer_fn(unsigned long __data)
{
@@ -1485,7 +1485,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
local_irq_restore(flags);
return ret;
}
-EXPORT_SYMBOL_GPL(queue_delayed_work_on);
+EXPORT_SYMBOL(queue_delayed_work_on);
/**
* mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
@@ -2059,6 +2059,7 @@ static bool manage_workers(struct worker *worker)
if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
spin_unlock_irq(&pool->lock);
mutex_lock(&pool->manager_mutex);
+ spin_lock_irq(&pool->lock);
ret = true;
}
@@ -4904,7 +4905,8 @@ static void __init wq_numa_init(void)
BUG_ON(!tbl);
for_each_node(node)
- BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node));
+ BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
+ node_online(node) ? node : NUMA_NO_NODE));
for_each_possible_cpu(cpu) {
node = cpu_to_node(cpu);