aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorOmar Sandoval2018-02-27 16:56:42 -0800
committerJens Axboe2018-02-28 12:23:35 -0700
commite9a99a638800af25c7ed006c96fd1dabb99254b7 (patch)
treed23b4989804cd48bb248142f9fa6370bd2700a2c /block
parent18bc42308699522b57fd599401c03ad561f422ef (diff)
block: clear ctx pending bit under ctx lock
When we insert a request, we set the software queue pending bit while holding the software queue lock. However, we clear it outside of the lock, so it's possible that a concurrent insert could reset the bit after we clear it but before we empty the request list. Afterwards, the bit would still be set but the software queue wouldn't have any requests in it, leading us to do a spurious run in the future. This is mostly a benign/theoretical issue, but it makes the following change easier to justify. Signed-off-by: Omar Sandoval <osandov@fb.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 16e83e6df404..9594a0e9f65b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -986,9 +986,9 @@ static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
struct blk_mq_hw_ctx *hctx = flush_data->hctx;
struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
- sbitmap_clear_bit(sb, bitnr);
spin_lock(&ctx->lock);
list_splice_tail_init(&ctx->rq_list, flush_data->list);
+ sbitmap_clear_bit(sb, bitnr);
spin_unlock(&ctx->lock);
return true;
}