From ac514ffc968bf14649dd0e048447dc966ee49555 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 12 Jan 2018 19:53:40 -0500 Subject: dm mpath: delay the retry of a request if the target responded as busy Add DM_ENDIO_DELAY_REQUEUE to allow request-based multipath's multipath_end_io() to instruct dm-rq.c:dm_done() to delay a requeue. This is beneficial to do if BLK_STS_RESOURCE is returned from the target (because target is busy). Relative to blk-mq: kick the hw queues via blk_mq_requeue_work(), indirectly from dm-rq.c:__dm_mq_kick_requeue_list(), after a delay. For old .request_fn: use blk_delay_queue(). bio-based multipath doesn't have feature parity with request-based for retryable error requeues; that is something that'll need fixing in the future. Suggested-by: Bart Van Assche Signed-off-by: Mike Snitzer Acked-by: Bart Van Assche [as interpreted from Bart's "... patch looks fine to me."] --- drivers/md/dm-rq.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/md/dm-rq.c') diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 9d32f25489c2..b78ff6921cfb 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -315,6 +315,10 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) /* The target wants to requeue the I/O */ dm_requeue_original_request(tio, false); break; + case DM_ENDIO_DELAY_REQUEUE: + /* The target wants to requeue the I/O after a delay */ + dm_requeue_original_request(tio, true); + break; default: DMWARN("unimplemented target endio return value: %d", r); BUG(); -- cgit v1.2.3 From c12c9a3c3860c76ba273798c0c34c6f1294cc759 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 12 Jan 2018 09:32:21 -0500 Subject: dm: various cleanups to md->queue initialization code Also, add dm_sysfs_init() error handling to dm_create(). Signed-off-by: Mike Snitzer --- drivers/md/dm-core.h | 2 -- drivers/md/dm-rq.c | 2 -- drivers/md/dm.c | 30 ++++++++++++------------------ 3 files changed, 12 insertions(+), 22 deletions(-) (limited to 'drivers/md/dm-rq.c') diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 124ffa2d6b9a..3222e21cbbf8 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -129,8 +129,6 @@ struct mapped_device { struct srcu_struct io_barrier; }; -void dm_init_md_queue(struct mapped_device *md); -void dm_init_normal_md_queue(struct mapped_device *md); int md_in_flight(struct mapped_device *md); void disable_write_same(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index b78ff6921cfb..c59c59cfd2a5 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -704,7 +704,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t) /* disable dm_old_request_fn's merge heuristic by default */ md->seq_rq_merge_deadline_usecs = 0; - dm_init_normal_md_queue(md); blk_queue_softirq_done(md->queue, dm_softirq_done); /* Initialize the request-based DM worker thread */ @@ -814,7 +813,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) err = PTR_ERR(q); goto out_tag_set; } - dm_init_md_queue(md); /* backfill 'mq' sysfs registration normally done in blk_register_queue */ err = blk_mq_register_dev(disk_to_dev(md->disk), q); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 67bf11610e4d..148087932679 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1733,20 +1733,9 @@ static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); -void dm_init_md_queue(struct mapped_device *md) -{ - /* - * Initialize data that will only be used by a non-blk-mq DM queue - * - must do so here (in alloc_dev callchain) before queue is used - */ - md->queue->queuedata = md; - md->queue->backing_dev_info->congested_data = md; -} - -void dm_init_normal_md_queue(struct mapped_device *md) +static void dm_init_normal_md_queue(struct mapped_device *md) { md->use_blk_mq = false; - dm_init_md_queue(md); /* * Initialize aspects of queue that aren't relevant for blk-mq @@ -1846,10 +1835,10 @@ static struct mapped_device *alloc_dev(int minor) md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); if (!md->queue) goto bad; + md->queue->queuedata = md; + md->queue->backing_dev_info->congested_data = md; - dm_init_md_queue(md); - - md->disk = alloc_disk_node(1, numa_node_id); + md->disk = alloc_disk_node(1, md->numa_node_id); if (!md->disk) goto bad; @@ -2082,13 +2071,18 @@ static struct dm_table *__unbind(struct mapped_device *md) */ int dm_create(int minor, struct mapped_device **result) { + int r; struct mapped_device *md; md = alloc_dev(minor); if (!md) return -ENXIO; - dm_sysfs_init(md); + r = dm_sysfs_init(md); + if (r) { + free_dev(md); + return r; + } *result = md; return 0; @@ -2145,6 +2139,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) switch (type) { case DM_TYPE_REQUEST_BASED: + dm_init_normal_md_queue(md); r = dm_old_init_request_queue(md, t); if (r) { DMERR("Cannot initialize queue for request-based mapped device"); @@ -2236,7 +2231,6 @@ EXPORT_SYMBOL_GPL(dm_device_name); static void __dm_destroy(struct mapped_device *md, bool wait) { - struct request_queue *q = dm_get_md_queue(md); struct dm_table *map; int srcu_idx; @@ -2247,7 +2241,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); - blk_set_queue_dying(q); + blk_set_queue_dying(md->queue); if (dm_request_based(md) && md->kworker_task) kthread_flush_worker(&md->kworker); -- cgit v1.2.3