aboutsummaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c89
1 files changed, 18 insertions, 71 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 27fb1357ad4b..a0d1104c5590 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -136,7 +136,7 @@ static const char *const blk_op_name[] = {
* string format. Useful in the debugging and tracing bio or request. For
* invalid REQ_OP_XXX it returns string "UNKNOWN".
*/
-inline const char *blk_op_str(unsigned int op)
+inline const char *blk_op_str(enum req_op op)
{
const char *op_str = "UNKNOWN";
@@ -285,49 +285,6 @@ void blk_queue_start_drain(struct request_queue *q)
}
/**
- * blk_cleanup_queue - shutdown a request queue
- * @q: request queue to shutdown
- *
- * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
- * put it. All future requests will be failed immediately with -ENODEV.
- *
- * Context: can sleep
- */
-void blk_cleanup_queue(struct request_queue *q)
-{
- /* cannot be called from atomic context */
- might_sleep();
-
- WARN_ON_ONCE(blk_queue_registered(q));
-
- /* mark @q DYING, no new request or merges will be allowed afterwards */
- blk_queue_flag_set(QUEUE_FLAG_DYING, q);
- blk_queue_start_drain(q);
-
- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
- blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
-
- /*
- * Drain all requests queued before DYING marking. Set DEAD flag to
- * prevent that blk_mq_run_hw_queues() accesses the hardware queues
- * after draining finished.
- */
- blk_freeze_queue(q);
-
- blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
-
- blk_sync_queue(q);
- if (queue_is_mq(q)) {
- blk_mq_cancel_work_sync(q);
- blk_mq_exit_queue(q);
- }
-
- /* @q is and will stay empty, shutdown and put */
- blk_put_queue(q);
-}
-EXPORT_SYMBOL(blk_cleanup_queue);
-
-/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
* @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
@@ -420,7 +377,6 @@ static void blk_timeout_work(struct work_struct *work)
struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
{
struct request_queue *q;
- int ret;
q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
GFP_KERNEL | __GFP_ZERO, node_id);
@@ -435,17 +391,13 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
q->last_merge = NULL;
- q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
+ q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
if (q->id < 0)
goto fail_srcu;
- ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
- if (ret)
- goto fail_id;
-
q->stats = blk_alloc_queue_stats();
if (!q->stats)
- goto fail_split;
+ goto fail_id;
q->node = node_id;
@@ -482,10 +434,8 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
fail_stats:
blk_free_queue_stats(q->stats);
-fail_split:
- bioset_exit(&q->bio_split);
fail_id:
- ida_simple_remove(&blk_queue_ida, q->id);
+ ida_free(&blk_queue_ida, q->id);
fail_srcu:
if (alloc_srcu)
cleanup_srcu_struct(q->srcu);
@@ -504,12 +454,10 @@ fail_q:
*/
bool blk_get_queue(struct request_queue *q)
{
- if (likely(!blk_queue_dying(q))) {
- __blk_get_queue(q);
- return true;
- }
-
- return false;
+ if (unlikely(blk_queue_dying(q)))
+ return false;
+ kobject_get(&q->kobj);
+ return true;
}
EXPORT_SYMBOL(blk_get_queue);
@@ -608,16 +556,15 @@ static int blk_partition_remap(struct bio *bio)
static inline blk_status_t blk_check_zone_append(struct request_queue *q,
struct bio *bio)
{
- sector_t pos = bio->bi_iter.bi_sector;
int nr_sectors = bio_sectors(bio);
/* Only applicable to zoned block devices */
- if (!blk_queue_is_zoned(q))
+ if (!bdev_is_zoned(bio->bi_bdev))
return BLK_STS_NOTSUPP;
/* The bio sector must point to the start of a sequential zone */
- if (pos & (blk_queue_zone_sectors(q) - 1) ||
- !blk_queue_zone_is_seq(q, pos))
+ if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) ||
+ !bio_zone_is_seq(bio))
return BLK_STS_IOERR;
/*
@@ -762,7 +709,7 @@ void submit_bio_noacct(struct bio *bio)
might_sleep();
- plug = blk_mq_plug(q, bio);
+ plug = blk_mq_plug(bio);
if (plug && plug->nowait)
bio->bi_opf |= REQ_NOWAIT;
@@ -818,11 +765,11 @@ void submit_bio_noacct(struct bio *bio)
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
- if (!blk_queue_is_zoned(q))
+ if (!bdev_is_zoned(bio->bi_bdev))
goto not_supported;
break;
case REQ_OP_ZONE_RESET_ALL:
- if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
+ if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
goto not_supported;
break;
case REQ_OP_WRITE_ZEROES:
@@ -987,7 +934,7 @@ void update_io_ticks(struct block_device *part, unsigned long now, bool end)
again:
stamp = READ_ONCE(part->bd_stamp);
if (unlikely(time_after(now, stamp))) {
- if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
+ if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
__part_stat_add(part, io_ticks, end ? now - stamp : 1);
}
if (part->bd_partno) {
@@ -997,7 +944,7 @@ again:
}
unsigned long bdev_start_io_acct(struct block_device *bdev,
- unsigned int sectors, unsigned int op,
+ unsigned int sectors, enum req_op op,
unsigned long start_time)
{
const int sgrp = op_stat_group(op);
@@ -1038,7 +985,7 @@ unsigned long bio_start_io_acct(struct bio *bio)
}
EXPORT_SYMBOL_GPL(bio_start_io_acct);
-void bdev_end_io_acct(struct block_device *bdev, unsigned int op,
+void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
unsigned long start_time)
{
const int sgrp = op_stat_group(op);
@@ -1247,7 +1194,7 @@ EXPORT_SYMBOL_GPL(blk_io_schedule);
int __init blk_dev_init(void)
{
- BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
+ BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
sizeof_field(struct request, cmd_flags));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *