aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorTejun Heo2010-08-25 10:33:56 +0200
committerTejun Heo2010-08-25 10:33:56 +0200
commit8a2e8e5dec7e29c56a46ba176c664ab6a3d04118 (patch)
tree57da96451bead4986dfcd82aadf47ba2c05745ac /include/linux
parente41e704bc4f49057fc68b643108366e6e6781aa3 (diff)
workqueue: fix cwq->nr_active underflow
cwq->nr_active is used to keep track of how many work items are active for the cpu workqueue, where 'active' is defined as either pending on global worklist or executing. This is used to implement the max_active limit and workqueue freezing. If a work item is queued after nr_active has already reached max_active, the work item doesn't increment nr_active and is put on the delayed queue and gets activated later as previous active work items retire. try_to_grab_pending() which is used in the cancellation path unconditionally decremented nr_active whether the work item being cancelled is currently active or delayed, so cancelling a delayed work item makes nr_active underflow. This breaks max_active enforcement and triggers BUG_ON() in destroy_workqueue() later on. This patch fixes this bug by adding a flag WORK_STRUCT_DELAYED, which is set while a work item in on the delayed list and making try_to_grab_pending() decrement nr_active iff the work item is currently active. The addition of the flag enlarges cwq alignment to 256 bytes which is getting a bit too large. It's scheduled to be reduced back to 128 bytes by merging WORK_STRUCT_PENDING and WORK_STRUCT_CWQ in the next devel cycle. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Johannes Berg <johannes@sipsolutions.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/workqueue.h16
1 files changed, 9 insertions, 7 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index c959666eafca..f11100f96482 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work);
enum {
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
- WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */
- WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */
+ WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
+ WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
+ WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
#ifdef CONFIG_DEBUG_OBJECTS_WORK
- WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */
- WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
+ WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
+ WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
#else
- WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
+ WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
#endif
WORK_STRUCT_COLOR_BITS = 4,
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
+ WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -59,8 +61,8 @@ enum {
/*
* Reserve 7 bits off of cwq pointer w/ debugobjects turned
- * off. This makes cwqs aligned to 128 bytes which isn't too
- * excessive while allowing 15 workqueue flush colors.
+ * off. This makes cwqs aligned to 256 bytes and allows 15
+ * workqueue flush colors.
*/
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
WORK_STRUCT_COLOR_BITS,