aboutsummaryrefslogtreecommitdiff
path: root/block/blk-exec.c
diff options
context:
space:
mode:
authorRoland Dreier2012-11-22 02:00:11 -0800
committerJens Axboe2012-11-23 14:32:55 +0100
commit893d290f1d7496db97c9471bc352ad4a11dc8a25 (patch)
treec6d5444d06949edfd5d13fa0beed0992dd2ae469 /block/blk-exec.c
parent836413e8c78ecbc55aa31f3cb600f8ee1aa355a2 (diff)
block: Don't access request after it might be freed
After we've done __elv_add_request() and __blk_run_queue() in blk_execute_rq_nowait(), the request might finish and be freed immediately. Therefore checking if the type is REQ_TYPE_PM_RESUME isn't safe afterwards, because if it isn't, rq might be gone. Instead, check beforehand and stash the result in a temporary. This fixes crashes in blk_execute_rq_nowait() I get occasionally when running with lots of memory debugging options enabled -- I think this race is usually harmless because the window for rq to be reallocated is so small. Signed-off-by: Roland Dreier <roland@purestorage.com> Cc: stable@kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-exec.c')
-rw-r--r--block/blk-exec.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 8b6dc5bd4dd0..f71eac35c1b9 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -52,11 +52,17 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq_end_io_fn *done)
{
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+ bool is_pm_resume;
WARN_ON(irqs_disabled());
rq->rq_disk = bd_disk;
rq->end_io = done;
+ /*
+ * need to check this before __blk_run_queue(), because rq can
+ * be freed before that returns.
+ */
+ is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
spin_lock_irq(q->queue_lock);
@@ -71,7 +77,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
__elv_add_request(q, rq, where);
__blk_run_queue(q);
/* the queue is stopped so it won't be run */
- if (rq->cmd_type == REQ_TYPE_PM_RESUME)
+ if (is_pm_resume)
q->request_fn(q);
spin_unlock_irq(q->queue_lock);
}