diff options
author | Jens Axboe | 2020-07-20 15:38:23 -0600 |
---|---|---|
committer | Jens Axboe | 2020-07-20 15:38:23 -0600 |
commit | 9caaa66c918c020fd16e84d1c6ebcce9960df1b2 (patch) | |
tree | 578d455a53453a1aead77da399c5134e73224a66 /drivers | |
parent | ba47d845d715a010f7b51f6f89bae32845e6acb7 (diff) | |
parent | ef45fe470e1e5410db4af87abc5d5055427945ac (diff) |
Merge branch 'for-5.9/block' into for-5.9/block-merge
* for-5.9/block: (124 commits)
blk-cgroup: show global disk stats in root cgroup io.stat
blk-cgroup: make iostat functions visible to stat printing
block: improve discard bio alignment in __blkdev_issue_discard()
block: change REQ_OP_ZONE_RESET and REQ_OP_ZONE_RESET_ALL to be odd numbers
block: defer flush request no matter whether we have elevator
block: make blk_timeout_init() static
block: remove retry loop in ioc_release_fn()
block: remove unnecessary ioc nested locking
block: integrate bd_start_claiming into __blkdev_get
block: use bd_prepare_to_claim directly in the loop driver
block: refactor bd_start_claiming
block: simplify the restart case in __blkdev_get
Revert "blk-rq-qos: remove redundant finish_wait to rq_qos_wait."
block: always remove partitions from blk_drop_partitions()
block: relax jiffies rounding for timeouts
blk-mq: remove redundant validation in __blk_mq_end_request()
blk-mq: Remove unnecessary local variable
writeback: remove bdi->congested_fn
writeback: remove struct bdi_writeback_congested
writeback: remove {set,clear}_wb_congested
...
Diffstat (limited to 'drivers')
77 files changed, 319 insertions, 741 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 2fb25c348d53..2723a70eb855 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -282,7 +282,7 @@ out: return err; } -static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t brd_submit_bio(struct bio *bio) { struct brd_device *brd = bio->bi_disk->private_data; struct bio_vec bvec; @@ -330,6 +330,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, static const struct block_device_operations brd_fops = { .owner = THIS_MODULE, + .submit_bio = brd_submit_bio, .rw_page = brd_rw_page, }; @@ -381,7 +382,7 @@ static struct brd_device *brd_alloc(int i) spin_lock_init(&brd->brd_lock); INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); - brd->brd_queue = blk_alloc_queue(brd_make_request, NUMA_NO_NODE); + brd->brd_queue = blk_alloc_queue(NUMA_NO_NODE); if (!brd->brd_queue) goto out_free_dev; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 33d0831c99b6..fe6cb99eb917 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1451,7 +1451,7 @@ extern void conn_free_crypto(struct drbd_connection *connection); /* drbd_req */ extern void do_submit(struct work_struct *ws); extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long); -extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio); +extern blk_qc_t drbd_submit_bio(struct bio *bio); extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); extern int is_valid_ar_handle(struct drbd_request *, sector_t); @@ -1576,12 +1576,12 @@ void drbd_set_my_capacity(struct drbd_device *device, sector_t size); /* * used to submit our private bio */ -static inline void drbd_generic_make_request(struct drbd_device *device, +static inline void drbd_submit_bio_noacct(struct drbd_device *device, int fault_type, struct bio *bio) { __release(local); if (!bio->bi_disk) { - drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n"); + drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n"); bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return; @@ -1590,7 +1590,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device, if (drbd_insert_fault(device, fault_type)) bio_io_error(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); } void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 45fbd526c453..cb687ccdbd96 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -132,9 +132,10 @@ wait_queue_head_t drbd_pp_wait; DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); static const struct block_device_operations drbd_ops = { - .owner = THIS_MODULE, - .open = drbd_open, - .release = drbd_release, + .owner = THIS_MODULE, + .submit_bio = drbd_submit_bio, + .open = drbd_open, + .release = drbd_release, }; struct bio *bio_alloc_drbd(gfp_t gfp_mask) @@ -2324,7 +2325,7 @@ static void do_retry(struct work_struct *ws) * workqueues instead. */ - /* We are not just doing generic_make_request(), + /* We are not just doing submit_bio_noacct(), * as we want to keep the start_time information. */ inc_ap_bio(device); __drbd_make_request(device, bio, start_jif); @@ -2414,62 +2415,6 @@ static void drbd_cleanup(void) pr_info("module cleanup done.\n"); } -/** - * drbd_congested() - Callback for the flusher thread - * @congested_data: User data - * @bdi_bits: Bits the BDI flusher thread is currently interested in - * - * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested. - */ -static int drbd_congested(void *congested_data, int bdi_bits) -{ - struct drbd_device *device = congested_data; - struct request_queue *q; - char reason = '-'; - int r = 0; - - if (!may_inc_ap_bio(device)) { - /* DRBD has frozen IO */ - r = bdi_bits; - reason = 'd'; - goto out; - } - - if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) { - r |= (1 << WB_async_congested); - /* Without good local data, we would need to read from remote, - * and that would need the worker thread as well, which is - * currently blocked waiting for that usermode helper to - * finish. - */ - if (!get_ldev_if_state(device, D_UP_TO_DATE)) - r |= (1 << WB_sync_congested); - else - put_ldev(device); - r &= bdi_bits; - reason = 'c'; - goto out; - } - - if (get_ldev(device)) { - q = bdev_get_queue(device->ldev->backing_bdev); - r = bdi_congested(q->backing_dev_info, bdi_bits); - put_ldev(device); - if (r) - reason = 'b'; - } - - if (bdi_bits & (1 << WB_async_congested) && - test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) { - r |= (1 << WB_async_congested); - reason = reason == 'b' ? 'a' : 'n'; - } - -out: - device->congestion_reason = reason; - return r; -} - static void drbd_init_workqueue(struct drbd_work_queue* wq) { spin_lock_init(&wq->q_lock); @@ -2801,11 +2746,10 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig drbd_init_set_defaults(device); - q = blk_alloc_queue(drbd_make_request, NUMA_NO_NODE); + q = blk_alloc_queue(NUMA_NO_NODE); if (!q) goto out_no_q; device->rq_queue = q; - q->queuedata = device; disk = alloc_disk(1); if (!disk) @@ -2825,9 +2769,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig /* we have no partitions. we contain only ourselves. */ device->this_bdev->bd_contains = device->this_bdev; - q->backing_dev_info->congested_fn = drbd_congested; - q->backing_dev_info->congested_data = device; - blk_queue_write_cache(q, true, true); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 1c41cd9982a2..3c0193de2498 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -265,7 +265,6 @@ int drbd_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%2d: cs:Unconfigured\n", i); } else { /* reset device->congestion_reason */ - bdi_rw_congested(device->rq_queue->backing_dev_info); nc = rcu_dereference(first_peer_device(device)->connection->net_conf); wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 3a3f2b6a821f..c74f561b4eab 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1723,7 +1723,7 @@ next_bio: bios = bios->bi_next; bio->bi_next = NULL; - drbd_generic_make_request(device, fault_type, bio); + drbd_submit_bio_noacct(device, fault_type, bio); } while (bios); return 0; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index c80a2f1c3c2a..674be09b2da9 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1164,7 +1164,7 @@ drbd_submit_req_private_bio(struct drbd_request *req) else if (bio_op(bio) == REQ_OP_DISCARD) drbd_process_discard_or_zeroes_req(req, EE_TRIM); else - generic_make_request(bio); + submit_bio_noacct(bio); put_ldev(device); } else bio_io_error(bio); @@ -1593,12 +1593,12 @@ void do_submit(struct work_struct *ws) } } -blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio) +blk_qc_t drbd_submit_bio(struct bio *bio) { - struct drbd_device *device = (struct drbd_device *) q->queuedata; + struct drbd_device *device = bio->bi_disk->private_data; unsigned long start_jif; - blk_queue_split(q, &bio); + blk_queue_split(&bio); start_jif = jiffies; diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 2b89c9f2ca70..7c903de5c4e1 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1525,7 +1525,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel) drbd_req_make_private_bio(req, req->master_bio); bio_set_dev(req->private_bio, device->ldev->backing_bdev); - generic_make_request(req->private_bio); + submit_bio_noacct(req->private_bio); return 0; } diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 3e9db22db2a8..09079aee8dc4 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4205,7 +4205,6 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) struct bio_vec bio_vec; struct page *page; struct rb0_cbdata cbdata; - size_t size; page = alloc_page(GFP_NOIO); if (!page) { @@ -4213,15 +4212,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) return -ENOMEM; } - size = bdev->bd_block_size; - if (!size) - size = 1024; - cbdata.drive = drive; bio_init(&bio, &bio_vec, 1); bio_set_dev(&bio, bdev); - bio_add_page(&bio, page, size, 0); + bio_add_page(&bio, page, block_size(bdev), 0); bio.bi_iter.bi_sector = 0; bio.bi_flags |= (1 << BIO_QUIET); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 475e1a738560..d18160146226 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -509,7 +509,8 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd) return; kfree(cmd->bvec); cmd->bvec = NULL; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) @@ -1089,11 +1090,10 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, * here to avoid changing device under exclusive owner. */ if (!(mode & FMODE_EXCL)) { - claimed_bdev = bd_start_claiming(bdev, loop_configure); - if (IS_ERR(claimed_bdev)) { - error = PTR_ERR(claimed_bdev); + claimed_bdev = bdev->bd_contains; + error = bd_prepare_to_claim(bdev, claimed_bdev, loop_configure); + if (error) goto out_putf; - } } error = mutex_lock_killable(&loop_ctl_mutex); @@ -2048,7 +2048,8 @@ static void loop_handle_cmd(struct loop_cmd *cmd) cmd->ret = ret; else cmd->ret = ret ? -EIO : 0; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } } @@ -2402,6 +2403,8 @@ static void __exit loop_exit(void) range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; + mutex_lock(&loop_ctl_mutex); + idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); idr_destroy(&loop_index_idr); @@ -2409,6 +2412,8 @@ static void __exit loop_exit(void) unregister_blkdev(LOOP_MAJOR, "loop"); misc_deregister(&loop_misc); + + mutex_unlock(&loop_ctl_mutex); } module_init(loop_init); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index f6bafa9a68b9..153e2cdecb4d 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -492,7 +492,8 @@ static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status) struct request *req = blk_mq_rq_from_pdu(cmd); cmd->status = status; - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); } /* diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index ce7e9f223b20..3ff4054d6834 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -784,6 +784,7 @@ static void recv_work(struct work_struct *work) struct nbd_device *nbd = args->nbd; struct nbd_config *config = nbd->config; struct nbd_cmd *cmd; + struct request *rq; while (1) { cmd = nbd_read_stat(nbd, args->index); @@ -796,7 +797,9 @@ static void recv_work(struct work_struct *work) break; } - blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); + rq = blk_mq_rq_from_pdu(cmd); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } atomic_dec(&config->recv_threads); wake_up(&config->recv_wq); diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 87b31f9ca362..907c6858aec0 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1283,7 +1283,8 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (cmd->nq->dev->queue_mode) { case NULL_Q_MQ: - blk_mq_complete_request(cmd->rq); + if (likely(!blk_should_fake_timeout(cmd->rq->q))) + blk_mq_complete_request(cmd->rq); break; case NULL_Q_BIO: /* @@ -1387,11 +1388,11 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb) return &nullb->queues[index]; } -static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) +static blk_qc_t null_submit_bio(struct bio *bio) { sector_t sector = bio->bi_iter.bi_sector; sector_t nr_sectors = bio_sectors(bio); - struct nullb *nullb = q->queuedata; + struct nullb *nullb = bio->bi_disk->private_data; struct nullb_queue *nq = nullb_to_queue(nullb); struct nullb_cmd *cmd; @@ -1423,7 +1424,7 @@ static bool should_requeue_request(struct request *rq) static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) { pr_info("rq %p timed out\n", rq); - blk_mq_force_complete_rq(rq); + blk_mq_complete_request(rq); return BLK_EH_DONE; } @@ -1574,7 +1575,13 @@ static void null_config_discard(struct nullb *nullb) blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); } -static const struct block_device_operations null_ops = { +static const struct block_device_operations null_bio_ops = { + .owner = THIS_MODULE, + .submit_bio = null_submit_bio, + .report_zones = null_report_zones, +}; + +static const struct block_device_operations null_rq_ops = { .owner = THIS_MODULE, .report_zones = null_report_zones, }; @@ -1646,7 +1653,10 @@ static int null_gendisk_register(struct nullb *nullb) disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; disk->major = null_major; disk->first_minor = nullb->index; - disk->fops = &null_ops; + if (queue_is_mq(nullb->q)) + disk->fops = &null_rq_ops; + else + disk->fops = &null_bio_ops; disk->private_data = nullb; disk->queue = nullb->q; strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); @@ -1791,7 +1801,7 @@ static int null_add_dev(struct nullb_device *dev) goto out_cleanup_tags; } } else if (dev->queue_mode == NULL_Q_BIO) { - nullb->q = blk_alloc_queue(null_queue_bio, dev->home_node); + nullb->q = blk_alloc_queue(dev->home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 27a33adc41e4..4becc1efe775 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -36,7 +36,7 @@ * block device, assembling the pieces to full packets and queuing them to the * packet I/O scheduler. * - * At the top layer there is a custom make_request_fn function that forwards + * At the top layer there is a custom ->submit_bio function that forwards * read requests directly to the iosched queue and puts write requests in the * unaligned write queue. A kernel thread performs the necessary read * gathering to convert the unaligned writes to aligned writes and then feeds @@ -913,7 +913,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) } atomic_inc(&pd->cdrw.pending_bios); - generic_make_request(bio); + submit_bio_noacct(bio); } } @@ -2428,15 +2428,15 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio) } } -static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t pkt_submit_bio(struct bio *bio) { struct pktcdvd_device *pd; char b[BDEVNAME_SIZE]; struct bio *split; - blk_queue_split(q, &bio); + blk_queue_split(&bio); - pd = q->queuedata; + pd = bio->bi_disk->queue->queuedata; if (!pd) { pr_err("%s incorrect request queue\n", bio_devname(bio, b)); goto end_io; @@ -2480,7 +2480,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio) split = bio; } - pkt_make_request_write(q, split); + pkt_make_request_write(bio->bi_disk->queue, split); } while (split != bio); return BLK_QC_T_NONE; @@ -2685,6 +2685,7 @@ static char *pkt_devnode(struct gendisk *disk, umode_t *mode) static const struct block_device_operations pktcdvd_ops = { .owner = THIS_MODULE, + .submit_bio = pkt_submit_bio, .open = pkt_open, .release = pkt_close, .ioctl = pkt_ioctl, @@ -2749,7 +2750,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) disk->flags = GENHD_FL_REMOVABLE; strcpy(disk->disk_name, pd->name); disk->private_data = pd; - disk->queue = blk_alloc_queue(pkt_make_request, NUMA_NO_NODE); + disk->queue = blk_alloc_queue(NUMA_NO_NODE); if (!disk->queue) goto out_mem2; diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 821d4d8b1d76..1088798c8dd0 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -90,12 +90,6 @@ struct ps3vram_priv { static int ps3vram_major; - -static const struct block_device_operations ps3vram_fops = { - .owner = THIS_MODULE, -}; - - #define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */ #define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */ #define DMA_NOTIFIER_SIZE 0x40 @@ -585,15 +579,15 @@ out: return next; } -static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t ps3vram_submit_bio(struct bio *bio) { - struct ps3_system_bus_device *dev = q->queuedata; + struct ps3_system_bus_device *dev = bio->bi_disk->private_data; struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); int busy; dev_dbg(&dev->core, "%s\n", __func__); - blk_queue_split(q, &bio); + blk_queue_split(&bio); spin_lock_irq(&priv->lock); busy = !bio_list_empty(&priv->list); @@ -610,6 +604,11 @@ static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } +static const struct block_device_operations ps3vram_fops = { + .owner = THIS_MODULE, + .submit_bio = ps3vram_submit_bio, +}; + static int ps3vram_probe(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv; @@ -737,7 +736,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev) ps3vram_proc_init(dev); - queue = blk_alloc_queue(ps3vram_make_request, NUMA_NO_NODE); + queue = blk_alloc_queue(NUMA_NO_NODE); if (!queue) { dev_err(&dev->core, "blk_alloc_queue failed\n"); error = -ENOMEM; @@ -745,7 +744,6 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev) } priv->queue = queue; - queue->queuedata = dev; blk_queue_max_segments(queue, BLK_MAX_SEGMENTS); blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE); blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS); diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 3ba07ab30c84..edacefff6e35 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -50,6 +50,8 @@ struct rsxx_bio_meta { static struct kmem_cache *bio_meta_pool; +static blk_qc_t rsxx_submit_bio(struct bio *bio); + /*----------------- Block Device Operations -----------------*/ static int rsxx_blkdev_ioctl(struct block_device *bdev, fmode_t mode, @@ -92,6 +94,7 @@ static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo) static const struct block_device_operations rsxx_fops = { .owner = THIS_MODULE, + .submit_bio = rsxx_submit_bio, .getgeo = rsxx_getgeo, .ioctl = rsxx_blkdev_ioctl, }; @@ -117,13 +120,13 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card, } } -static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t rsxx_submit_bio(struct bio *bio) { - struct rsxx_cardinfo *card = q->queuedata; + struct rsxx_cardinfo *card = bio->bi_disk->private_data; struct rsxx_bio_meta *bio_meta; blk_status_t st = BLK_STS_IOERR; - blk_queue_split(q, &bio); + blk_queue_split(&bio); might_sleep(); @@ -233,7 +236,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) return -ENOMEM; } - card->queue = blk_alloc_queue(rsxx_make_request, NUMA_NO_NODE); + card->queue = blk_alloc_queue(NUMA_NO_NODE); if (!card->queue) { dev_err(CARD_TO_DEV(card), "Failed queue alloc\n"); unregister_blkdev(card->major, DRIVER_NAME); @@ -267,8 +270,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE; } - card->queue->queuedata = card; - snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name), "rsxx%d", card->disk_id); card->gendisk->major = card->major; @@ -289,7 +290,6 @@ void rsxx_destroy_dev(struct rsxx_cardinfo *card) card->gendisk = NULL; blk_cleanup_queue(card->queue); - card->queue->queuedata = NULL; unregister_blkdev(card->major, DRIVER_NAME); } diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 51569c199a6c..3a476dc1d14f 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -1417,7 +1417,8 @@ static void skd_resolve_req_exception(struct skd_device *skdev, case SKD_CHECK_STATUS_REPORT_GOOD: case SKD_CHECK_STATUS_REPORT_SMART_ALERT: skreq->status = BLK_STS_OK; - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); break; case SKD_CHECK_STATUS_BUSY_IMMINENT: @@ -1440,7 +1441,8 @@ static void skd_resolve_req_exception(struct skd_device *skdev, case SKD_CHECK_STATUS_REPORT_ERROR: default: skreq->status = BLK_STS_IOERR; - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); break; } } @@ -1560,7 +1562,8 @@ static int skd_isr_completion_posted(struct skd_device *skdev, */ if (likely(cmp_status == SAM_STAT_GOOD)) { skreq->status = BLK_STS_OK; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } else { skd_resolve_req_exception(skdev, skreq, rq); } diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 1e2aa5ae2796..2b95d7b33b91 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -519,14 +519,15 @@ static int mm_check_plugged(struct cardinfo *card) return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); } -static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t mm_submit_bio(struct bio *bio) { - struct cardinfo *card = q->queuedata; + struct cardinfo *card = bio->bi_disk->private_data; + pr_debug("mm_make_request %llu %u\n", (unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size); - blk_queue_split(q, &bio); + blk_queue_split(&bio); spin_lock_irq(&card->lock); *card->biotail = bio; @@ -778,6 +779,7 @@ static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo) static const struct block_device_operations mm_fops = { .owner = THIS_MODULE, + .submit_bio = mm_submit_bio, .getgeo = mm_getgeo, .revalidate_disk = mm_revalidate, }; @@ -885,10 +887,9 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) card->biotail = &card->bio; spin_lock_init(&card->lock); - card->queue = blk_alloc_queue(mm_make_request, NUMA_NO_NODE); + card->queue = blk_alloc_queue(NUMA_NO_NODE); if (!card->queue) goto failed_alloc; - card->queue->queuedata = card; tasklet_init(&card->tasklet, process_page, (unsigned long)card); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 980df853ee49..63b213e00b37 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -171,7 +171,8 @@ static void virtblk_done(struct virtqueue *vq) while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { struct request *req = blk_mq_rq_from_pdu(vbr); - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 3b889ea950c2..3bb3dd8da9b0 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1655,7 +1655,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) BUG(); } - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); } rinfo->ring.rsp_cons = i; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 270dd810be54..9100ac36670a 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -793,9 +793,9 @@ static void zram_sync_read(struct work_struct *work) } /* - * Block layer want one ->make_request_fn to be active at a time - * so if we use chained IO with parent IO in same context, - * it's a deadlock. To avoid, it, it uses worker thread context. + * Block layer want one ->submit_bio to be active at a time, so if we use + * chained IO with parent IO in same context, it's a deadlock. To avoid that, + * use a worker thread context. */ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, unsigned long entry, struct bio *bio) @@ -1584,9 +1584,9 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) /* * Handler function for all zram I/O requests. */ -static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio) +static blk_qc_t zram_submit_bio(struct bio *bio) { - struct zram *zram = queue->queuedata; + struct zram *zram = bio->bi_disk->private_data; if (!valid_io_request(zram, bio->bi_iter.bi_sector, bio->bi_iter.bi_size)) { @@ -1813,6 +1813,7 @@ static int zram_open(struct block_device *bdev, fmode_t mode) static const struct block_device_operations zram_devops = { .open = zram_open, + .submit_bio = zram_submit_bio, .swap_slot_free_notify = zram_slot_free_notify, .rw_page = zram_rw_page, .owner = THIS_MODULE @@ -1891,7 +1892,7 @@ static int zram_add(void) #ifdef CONFIG_ZRAM_WRITEBACK spin_lock_init(&zram->wb_limit_lock); #endif - queue = blk_alloc_queue(zram_make_request, NUMA_NO_NODE); + queue = blk_alloc_queue(NUMA_NO_NODE); if (!queue) { pr_err("Error allocating disk queue for device %d\n", device_id); @@ -1912,7 +1913,6 @@ static int zram_add(void) zram->disk->first_minor = device_id; zram->disk->fops = &zram_devops; zram->disk->queue = queue; - zram->disk->queue->queuedata = zram; zram->disk->private_data = zram; snprintf(zram->disk->disk_name, 16, "zram%d", device_id); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index d82b3b7658bd..0c271b9e3c5b 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -605,7 +605,7 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi) disk->cdi = cdi; ENSURE(cdo, drive_status, CDC_DRIVE_STATUS); - if (cdo->check_events == NULL && cdo->media_changed == NULL) + if (cdo->check_events == NULL) WARN_ON_ONCE(cdo->capability & (CDC_MEDIA_CHANGED | CDC_SELECT_DISC)); ENSURE(cdo, tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); ENSURE(cdo, lock_door, CDC_LOCK); @@ -1419,8 +1419,6 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot) if (cdi->ops->check_events) cdi->ops->check_events(cdi, 0, slot); - else - cdi->ops->media_changed(cdi, slot); if (slot == CDSL_NONE) { /* set media changed bits, on both queues */ @@ -1517,13 +1515,10 @@ int media_changed(struct cdrom_device_info *cdi, int queue) return ret; /* changed since last call? */ - if (cdi->ops->check_events) { - BUG_ON(!queue); /* shouldn't be called from VFS path */ - cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE); - changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE; - cdi->ioctl_events = 0; - } else - changed = cdi->ops->media_changed(cdi, CDSL_CURRENT); + BUG_ON(!queue); /* shouldn't be called from VFS path */ + cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE); + changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE; + cdi->ioctl_events = 0; if (changed) { cdi->mc_flags = 0x3; /* set bit on both queues */ @@ -1535,18 +1530,6 @@ int media_changed(struct cdrom_device_info *cdi, int queue) return ret; } -int cdrom_media_changed(struct cdrom_device_info *cdi) -{ - /* This talks to the VFS, which doesn't like errors - just 1 or 0. - * Returning "0" is always safe (media hasn't been changed). Do that - * if the low-level cdrom driver dosn't support media changed. */ - if (cdi == NULL || cdi->ops->media_changed == NULL) - return 0; - if (!CDROM_CAN(CDC_MEDIA_CHANGED)) - return 0; - return media_changed(cdi, 0); -} - /* Requests to the low-level drivers will /always/ be done in the following format convention: @@ -3464,7 +3447,6 @@ EXPORT_SYMBOL(unregister_cdrom); EXPORT_SYMBOL(cdrom_open); EXPORT_SYMBOL(cdrom_release); EXPORT_SYMBOL(cdrom_ioctl); -EXPORT_SYMBOL(cdrom_media_changed); EXPORT_SYMBOL(cdrom_number_of_slots); EXPORT_SYMBOL(cdrom_mode_select); EXPORT_SYMBOL(cdrom_mode_sense); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 8e32345be0f7..f50828526331 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -59,7 +59,7 @@ EXPORT_SYMBOL(bdev_dax_pgoff); #if IS_ENABLED(CONFIG_FS_DAX) struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) { - if (!blk_queue_dax(bdev->bd_queue)) + if (!blk_queue_dax(bdev->bd_disk->queue)) return NULL; return dax_get_by_host(bdev->bd_disk->disk_name); } diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index db38a68abb6c..fe78bf0fdce5 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -236,10 +236,6 @@ err_dev: return tgt_dev; } -static const struct block_device_operations nvm_fops = { - .owner = THIS_MODULE, -}; - static struct nvm_tgt_type *__nvm_find_target_type(const char *name) { struct nvm_tgt_type *tt; @@ -380,7 +376,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) goto err_dev; } - tqueue = blk_alloc_queue(tt->make_rq, dev->q->node); + tqueue = blk_alloc_queue(dev->q->node); if (!tqueue) { ret = -ENOMEM; goto err_disk; @@ -390,7 +386,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) tdisk->flags = GENHD_FL_EXT_DEVT; tdisk->major = 0; tdisk->first_minor = 0; - tdisk->fops = &nvm_fops; + tdisk->fops = tt->bops; tdisk->queue = tqueue; targetdata = tt->init(tgt_dev, tdisk, create->flags); diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 6e677ff62cc9..b6246f73895c 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -47,9 +47,9 @@ static struct pblk_global_caches pblk_caches = { struct bio_set pblk_bio_set; -static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) +static blk_qc_t pblk_submit_bio(struct bio *bio) { - struct pblk *pblk = q->queuedata; + struct pblk *pblk = bio->bi_disk->queue->queuedata; if (bio_op(bio) == REQ_OP_DISCARD) { pblk_discard(pblk, bio); @@ -63,7 +63,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) * constraint. Writes can be of arbitrary size. */ if (bio_data_dir(bio) == READ) { - blk_queue_split(q, &bio); + blk_queue_split(&bio); pblk_submit_read(pblk, bio); } else { /* Prevent deadlock in the case of a modest LUN configuration @@ -71,7 +71,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) * leaves at least 256KB available for user I/O. */ if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl)) - blk_queue_split(q, &bio); + blk_queue_split(&bio); pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER); } @@ -79,6 +79,12 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } +static const struct block_device_operations pblk_bops = { + .owner = THIS_MODULE, + .submit_bio = pblk_submit_bio, +}; + + static size_t pblk_trans_map_size(struct pblk *pblk) { int entry_size = 8; @@ -1280,7 +1286,7 @@ static struct nvm_tgt_type tt_pblk = { .name = "pblk", .version = {1, 0, 0}, - .make_rq = pblk_make_rq, + .bops = &pblk_bops, .capacity = pblk_capacity, .init = pblk_init, diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 140927ebf41e..c28537a489bc 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -320,7 +320,7 @@ split_retry: split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL, &pblk_bio_set); bio_chain(split_bio, bio); - generic_make_request(bio); + submit_bio_noacct(bio); /* New bio contains first N sectors of the previous one, so * we can continue to use existing rqd, but we need to shrink diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 221e0191b687..3c708e8b5e2d 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -929,7 +929,7 @@ static inline void closure_bio_submit(struct cache_set *c, bio_endio(bio); return; } - generic_make_request(bio); + submit_bio_noacct(bio); } /* diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 6548a601edf0..d5c51e332046 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -959,7 +959,7 @@ err: * bch_btree_node_get - find a btree node in the cache and lock it, reading it * in from disk if necessary. * - * If IO is necessary and running under generic_make_request, returns -EAGAIN. + * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN. * * The btree node will have either a read or a write lock held, depending on * level and op->lock. diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 7acf024e99f3..a190bf47076d 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1115,7 +1115,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) !blk_queue_discard(bdev_get_queue(dc->bdev))) bio->bi_end_io(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); } static void quit_max_writeback_rate(struct cache_set *c, @@ -1158,7 +1158,7 @@ static void quit_max_writeback_rate(struct cache_set *c, /* Cached devices - read & write stuff */ -blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio) +blk_qc_t cached_dev_submit_bio(struct bio *bio) { struct search *s; struct bcache_device *d = bio->bi_disk->private_data; @@ -1197,7 +1197,7 @@ blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio) if (!bio->bi_iter.bi_size) { /* * can't call bch_journal_meta from under - * generic_make_request + * submit_bio_noacct */ continue_at_nobarrier(&s->cl, cached_dev_nodata, @@ -1228,36 +1228,8 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); } -static int cached_dev_congested(void *data, int bits) -{ - struct bcache_device *d = data; - struct cached_dev *dc = container_of(d, struct cached_dev, disk); - struct request_queue *q = bdev_get_queue(dc->bdev); - int ret = 0; - - if (bdi_congested(q->backing_dev_info, bits)) - return 1; - - if (cached_dev_get(dc)) { - unsigned int i; - struct cache *ca; - - for_each_cache(ca, d->c, i) { - q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(q->backing_dev_info, bits); - } - - cached_dev_put(dc); - } - - return ret; -} - void bch_cached_dev_request_init(struct cached_dev *dc) { - struct gendisk *g = dc->disk.disk; - - g->queue->backing_dev_info->congested_fn = cached_dev_congested; dc->disk.cache_miss = cached_dev_cache_miss; dc->disk.ioctl = cached_dev_ioctl; } @@ -1291,7 +1263,7 @@ static void flash_dev_nodata(struct closure *cl) continue_at(cl, search_free, NULL); } -blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio) +blk_qc_t flash_dev_submit_bio(struct bio *bio) { struct search *s; struct closure *cl; @@ -1311,8 +1283,7 @@ blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio) if (!bio->bi_iter.bi_size) { /* - * can't call bch_journal_meta from under - * generic_make_request + * can't call bch_journal_meta from under submit_bio_noacct */ continue_at_nobarrier(&s->cl, flash_dev_nodata, @@ -1342,27 +1313,8 @@ static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, return -ENOTTY; } -static int flash_dev_congested(void *data, int bits) -{ - struct bcache_device *d = data; - struct request_queue *q; - struct cache *ca; - unsigned int i; - int ret = 0; - - for_each_cache(ca, d->c, i) { - q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(q->backing_dev_info, bits); - } - - return ret; -} - void bch_flash_dev_request_init(struct bcache_device *d) { - struct gendisk *g = d->disk; - - g->queue->backing_dev_info->congested_fn = flash_dev_congested; d->cache_miss = flash_dev_cache_miss; d->ioctl = flash_dev_ioctl; } diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index bb005c93dd72..82b38366a95d 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -37,10 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c); void bch_data_insert(struct closure *cl); void bch_cached_dev_request_init(struct cached_dev *dc); -blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio); +blk_qc_t cached_dev_submit_bio(struct bio *bio); void bch_flash_dev_request_init(struct bcache_device *d); -blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio); +blk_qc_t flash_dev_submit_bio(struct bio *bio); extern struct kmem_cache *bch_search_cache; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 2014016f9a60..9e45faa054b6 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -680,7 +680,16 @@ static int ioctl_dev(struct block_device *b, fmode_t mode, return d->ioctl(d, mode, cmd, arg); } -static const struct block_device_operations bcache_ops = { +static const struct block_device_operations bcache_cached_ops = { + .submit_bio = cached_dev_submit_bio, + .open = open_dev, + .release = release_dev, + .ioctl = ioctl_dev, + .owner = THIS_MODULE, +}; + +static const struct block_device_operations bcache_flash_ops = { + .submit_bio = flash_dev_submit_bio, .open = open_dev, .release = release_dev, .ioctl = ioctl_dev, @@ -820,8 +829,8 @@ static void bcache_device_free(struct bcache_device *d) } static int bcache_device_init(struct bcache_device *d, unsigned int block_size, - sector_t sectors, make_request_fn make_request_fn, - struct block_device *cached_bdev) + sector_t sectors, struct block_device *cached_bdev, + const struct block_device_operations *ops) { struct request_queue *q; const size_t max_stripes = min_t(size_t, INT_MAX, @@ -868,16 +877,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, d->disk->major = bcache_major; d->disk->first_minor = idx_to_first_minor(idx); - d->disk->fops = &bcache_ops; + d->disk->fops = ops; d->disk->private_data = d; - q = blk_alloc_queue(make_request_fn, NUMA_NO_NODE); + q = blk_alloc_queue(NUMA_NO_NODE); if (!q) return -ENOMEM; d->disk->queue = q; - q->queuedata = d; - q->backing_dev_info->congested_data = d; q->limits.max_hw_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX; q->limits.max_segment_size = UINT_MAX; @@ -1356,7 +1363,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) ret = bcache_device_init(&dc->disk, block_size, dc->bdev->bd_part->nr_sects - dc->sb.data_offset, - cached_dev_make_request, dc->bdev); + dc->bdev, &bcache_cached_ops); if (ret) return ret; @@ -1469,7 +1476,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) kobject_init(&d->kobj, &bch_flash_dev_ktype); if (bcache_device_init(d, block_bytes(c), u->sectors, - flash_dev_make_request, NULL)) + NULL, &bcache_flash_ops)) goto err; bcache_device_attach(d, c, u - c->uuids); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index d3bb355819a4..96c93802ee4d 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -421,8 +421,6 @@ struct cache { struct rw_semaphore quiesce_lock; - struct dm_target_callbacks callbacks; - /* * origin_blocks entries, discarded if set. */ @@ -886,7 +884,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio) static void accounted_request(struct cache *cache, struct bio *bio) { accounted_begin(cache, bio); - generic_make_request(bio); + submit_bio_noacct(bio); } static void issue_op(struct bio *bio, void *context) @@ -1792,7 +1790,7 @@ static bool process_bio(struct cache *cache, struct bio *bio) bool commit_needed; if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED) - generic_make_request(bio); + submit_bio_noacct(bio); return commit_needed; } @@ -1858,7 +1856,7 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio) if (cache->features.discard_passdown) { remap_to_origin(cache, bio); - generic_make_request(bio); + submit_bio_noacct(bio); } else bio_endio(bio); @@ -2423,20 +2421,6 @@ static void set_cache_size(struct cache *cache, dm_cblock_t size) cache->cache_size = size; } -static int is_congested(struct dm_dev *dev, int bdi_bits) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(q->backing_dev_info, bdi_bits); -} - -static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct cache *cache = container_of(cb, struct cache, callbacks); - - return is_congested(cache->origin_dev, bdi_bits) || - is_congested(cache->cache_dev, bdi_bits); -} - #define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) @@ -2471,9 +2455,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) goto bad; } - cache->callbacks.congested_fn = cache_is_congested; - dm_table_add_target_callbacks(ti->table, &cache->callbacks); - cache->metadata_dev = ca->metadata_dev; cache->origin_dev = ca->origin_dev; cache->cache_dev = ca->cache_dev; diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 5ce96ddf1ce1..bdb255edc200 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -68,7 +68,6 @@ struct hash_table_bucket; struct clone { struct dm_target *ti; - struct dm_target_callbacks callbacks; struct dm_dev *metadata_dev; struct dm_dev *dest_dev; @@ -330,7 +329,7 @@ static void submit_bios(struct bio_list *bios) blk_start_plug(&plug); while ((bio = bio_list_pop(bios))) - generic_make_request(bio); + submit_bio_noacct(bio); blk_finish_plug(&plug); } @@ -346,7 +345,7 @@ static void submit_bios(struct bio_list *bios) static void issue_bio(struct clone *clone, struct bio *bio) { if (!bio_triggers_commit(clone, bio)) { - generic_make_request(bio); + submit_bio_noacct(bio); return; } @@ -473,7 +472,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ bio_region_range(clone, bio, &rs, &nr_regions); trim_bio(bio, region_to_sector(clone, rs), nr_regions << clone->region_shift); - generic_make_request(bio); + submit_bio_noacct(bio); } else bio_endio(bio); } @@ -865,7 +864,7 @@ static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio bio->bi_private = hd; atomic_inc(&hd->clone->hydrations_in_flight); - generic_make_request(bio); + submit_bio_noacct(bio); } /* @@ -1281,7 +1280,7 @@ static void process_deferred_flush_bios(struct clone *clone) */ bio_endio(bio); } else { - generic_make_request(bio); + submit_bio_noacct(bio); } } } @@ -1518,18 +1517,6 @@ error: DMEMIT("Error"); } -static int clone_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct request_queue *dest_q, *source_q; - struct clone *clone = container_of(cb, struct clone, callbacks); - - source_q = bdev_get_queue(clone->source_dev->bdev); - dest_q = bdev_get_queue(clone->dest_dev->bdev); - - return (bdi_congested(dest_q->backing_dev_info, bdi_bits) | - bdi_congested(source_q->backing_dev_info, bdi_bits)); -} - static sector_t get_dev_size(struct dm_dev *dev) { return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; @@ -1930,8 +1917,6 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto out_with_mempool; mutex_init(&clone->commit_lock); - clone->callbacks.congested_fn = clone_is_congested; - dm_table_add_target_callbacks(ti->table, &clone->callbacks); /* Enable flushes */ ti->num_flush_bios = 1; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 000ddfab5ba0..ad324abb8c49 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1789,7 +1789,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) return 1; } - generic_make_request(clone); + submit_bio_noacct(clone); return 0; } @@ -1815,7 +1815,7 @@ static void kcryptd_io_write(struct dm_crypt_io *io) { struct bio *clone = io->ctx.bio_out; - generic_make_request(clone); + submit_bio_noacct(clone); } #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) @@ -1893,7 +1893,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) clone->bi_iter.bi_sector = cc->start + io->sector; if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { - generic_make_request(clone); + submit_bio_noacct(clone); return; } diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index f496213f8b67..2628a832787b 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -72,7 +72,7 @@ static void flush_bios(struct bio *bio) while (bio) { n = bio->bi_next; bio->bi_next = NULL; - generic_make_request(bio); + submit_bio_noacct(bio); bio = n; } } diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index bdb84b8e7162..b24e3839bb3a 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1137,7 +1137,6 @@ static int metadata_get_stats(struct era_metadata *md, void *ptr) struct era { struct dm_target *ti; - struct dm_target_callbacks callbacks; struct dm_dev *metadata_dev; struct dm_dev *origin_dev; @@ -1265,7 +1264,7 @@ static void process_deferred_bios(struct era *era) bio_io_error(bio); else while ((bio = bio_list_pop(&marked_bios))) - generic_make_request(bio); + submit_bio_noacct(bio); } static void process_rpc_calls(struct era *era) @@ -1375,18 +1374,6 @@ static void stop_worker(struct era *era) /*---------------------------------------------------------------- * Target methods *--------------------------------------------------------------*/ -static int dev_is_congested(struct dm_dev *dev, int bdi_bits) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(q->backing_dev_info, bdi_bits); -} - -static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct era *era = container_of(cb, struct era, callbacks); - return dev_is_congested(era->origin_dev, bdi_bits); -} - static void era_destroy(struct era *era) { if (era->md) @@ -1514,8 +1501,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->flush_supported = true; ti->num_discard_bios = 1; - era->callbacks.congested_fn = era_is_congested; - dm_table_add_target_callbacks(ti->table, &era->callbacks); return 0; } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 81dc5ff08909..ae866e469e1b 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2115,12 +2115,12 @@ offload_to_thread: dio->in_flight = (atomic_t)ATOMIC_INIT(1); dio->completion = NULL; - generic_make_request(bio); + submit_bio_noacct(bio); return; } - generic_make_request(bio); + submit_bio_noacct(bio); if (need_sync_io) { wait_for_completion_io(&read_comp); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 78cff42d987e..73bb23de6336 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -677,7 +677,7 @@ static void process_queued_bios(struct work_struct *work) bio_endio(bio); break; case DM_MAPIO_REMAPPED: - generic_make_request(bio); + submit_bio_noacct(bio); break; case DM_MAPIO_SUBMITTED: break; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 10e8b2fe787b..d9e270957e18 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -242,7 +242,6 @@ struct raid_set { struct mddev md; struct raid_type *raid_type; - struct dm_target_callbacks callbacks; sector_t array_sectors; sector_t dev_sectors; @@ -1705,13 +1704,6 @@ static void do_table_event(struct work_struct *ws) dm_table_event(rs->ti->table); } -static int raid_is_congested(struct dm_target_callbacks *cb, int bits) -{ - struct raid_set *rs = container_of(cb, struct raid_set, callbacks); - - return mddev_congested(&rs->md, bits); -} - /* * Make sure a valid takover (level switch) is being requested on @rs * @@ -3248,9 +3240,6 @@ size_check: goto bad_md_start; } - rs->callbacks.congested_fn = raid_is_congested; - dm_table_add_target_callbacks(ti->table, &rs->callbacks); - /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */ if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); @@ -3310,7 +3299,6 @@ static void raid_dtr(struct dm_target *ti) { struct raid_set *rs = ti->private; - list_del_init(&rs->callbacks.list); md_stop(&rs->md); raid_set_free(rs); } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 2f655d9f4200..fa09bc4e4c54 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -779,7 +779,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) wakeup_mirrord(ms); } else { map_bio(get_default_mirror(ms), bio); - generic_make_request(bio); + submit_bio_noacct(bio); } } } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 85e0daabad49..7ce387a1cc6a 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -284,7 +284,8 @@ static void dm_complete_request(struct request *rq, blk_status_t error) struct dm_rq_target_io *tio = tio_from_request(rq); tio->error = error; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } /* diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 963d3774c93e..2d1d4a4c399c 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -252,7 +252,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op, /* * Issue the synchronous I/O from a different thread - * to avoid generic_make_request recursion. + * to avoid submit_bio_noacct recursion. */ INIT_WORK_ONSTACK(&req.work, do_metadata); queue_work(ps->metadata_wq, &req.work); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 6b11a266299f..4668b2cd98f4 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1568,7 +1568,7 @@ static void flush_bios(struct bio *bio) while (bio) { n = bio->bi_next; bio->bi_next = NULL; - generic_make_request(bio); + submit_bio_noacct(bio); bio = n; } } @@ -1588,7 +1588,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) bio->bi_next = NULL; r = do_origin(s->origin, bio, false); if (r == DM_MAPIO_REMAPPED) - generic_make_request(bio); + submit_bio_noacct(bio); bio = n; } } @@ -1829,7 +1829,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe, bio->bi_end_io = full_bio_end_io; bio->bi_private = callback_data; - generic_make_request(bio); + submit_bio_noacct(bio); } static struct dm_snap_pending_exception * diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 8277b959e00b..0ea5b7367179 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -64,8 +64,6 @@ struct dm_table { void *event_context; struct dm_md_mempools *mempools; - - struct list_head target_callbacks; }; /* @@ -190,7 +188,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode, return -ENOMEM; INIT_LIST_HEAD(&t->devices); - INIT_LIST_HEAD(&t->target_callbacks); if (!num_targets) num_targets = KEYS_PER_NODE; @@ -361,7 +358,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, * This upgrades the mode on an already open dm_dev, being * careful to leave things as they were if we fail to reopen the * device and not to touch the existing bdev field in case - * it is accessed concurrently inside dm_table_any_congested(). + * it is accessed concurrently. */ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, struct mapped_device *md) @@ -2052,38 +2049,6 @@ int dm_table_resume_targets(struct dm_table *t) return 0; } -void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) -{ - list_add(&cb->list, &t->target_callbacks); -} -EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks); - -int dm_table_any_congested(struct dm_table *t, int bdi_bits) -{ - struct dm_dev_internal *dd; - struct list_head *devices = dm_table_get_devices(t); - struct dm_target_callbacks *cb; - int r = 0; - - list_for_each_entry(dd, devices, list) { - struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); - char b[BDEVNAME_SIZE]; - - if (likely(q)) - r |= bdi_congested(q->backing_dev_info, bdi_bits); - else - DMWARN_LIMIT("%s: any_congested: nonexistent device %s", - dm_device_name(t->md), - bdevname(dd->dm_dev->bdev, b)); - } - - list_for_each_entry(cb, &t->target_callbacks, list) - if (cb->congested_fn) - r |= cb->congested_fn(cb, bdi_bits); - - return r; -} - struct mapped_device *dm_table_get_md(struct dm_table *t) { return t->md; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index fa8d5464c1fb..fff4c50df74d 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -326,7 +326,6 @@ struct pool_c { struct pool *pool; struct dm_dev *data_dev; struct dm_dev *metadata_dev; - struct dm_target_callbacks callbacks; dm_block_t low_water_blocks; struct pool_features requested_pf; /* Features requested during table load */ @@ -758,7 +757,7 @@ static void issue(struct thin_c *tc, struct bio *bio) struct pool *pool = tc->pool; if (!bio_triggers_commit(tc, bio)) { - generic_make_request(bio); + submit_bio_noacct(bio); return; } @@ -2394,7 +2393,7 @@ static void process_deferred_bios(struct pool *pool) if (bio->bi_opf & REQ_PREFLUSH) bio_endio(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); } } @@ -2796,18 +2795,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) } } -static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct pool_c *pt = container_of(cb, struct pool_c, callbacks); - struct request_queue *q; - - if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE) - return 1; - - q = bdev_get_queue(pt->data_dev->bdev); - return bdi_congested(q->backing_dev_info, bdi_bits); -} - static void requeue_bios(struct pool *pool) { struct thin_c *tc; @@ -3420,9 +3407,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) dm_pool_register_pre_commit_callback(pool->pmd, metadata_pre_commit_callback, pool); - pt->callbacks.congested_fn = pool_is_congested; - dm_table_add_target_callbacks(ti->table, &pt->callbacks); - mutex_unlock(&dm_thin_pool_table.mutex); return 0; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index eec9f252e935..75fa4d9b7617 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -681,7 +681,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) verity_submit_prefetch(v, io); - generic_make_request(bio); + submit_bio_noacct(bio); return DM_MAPIO_SUBMITTED; } diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 5358894bb9fd..8aa306ebc2ab 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -1244,7 +1244,7 @@ static int writecache_flush_thread(void *data) bio_end_sector(bio)); wc_unlock(wc); bio_set_dev(bio, wc->dev->bdev); - generic_make_request(bio); + submit_bio_noacct(bio); } else { writecache_flush(wc); wc_unlock(wc); diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 42aa5139df7c..697f9de37355 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -140,7 +140,7 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, bio_advance(bio, clone->bi_iter.bi_size); refcount_inc(&bioctx->ref); - generic_make_request(clone); + submit_bio_noacct(clone); if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) zone->wp_block += nr_blocks; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 52449afd58eb..b2b07d954547 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1272,7 +1272,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) sector_t sector; struct bio *clone = &tio->clone; struct dm_io *io = tio->io; - struct mapped_device *md = io->md; struct dm_target *ti = tio->ti; blk_qc_t ret = BLK_QC_T_NONE; @@ -1294,10 +1293,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) /* the bio has been remapped so dispatch it */ trace_block_bio_remap(clone->bi_disk->queue, clone, bio_dev(io->orig_bio), sector); - if (md->type == DM_TYPE_NVME_BIO_BASED) - ret = direct_make_request(clone); - else - ret = generic_make_request(clone); + ret = submit_bio_noacct(clone); break; case DM_MAPIO_KILL: free_tio(tio); @@ -1644,7 +1640,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, error = __split_and_process_non_flush(&ci); if (current->bio_list && ci.sector_count && !error) { /* - * Remainder must be passed to generic_make_request() + * Remainder must be passed to submit_bio_noacct() * so that it gets handled *after* bios already submitted * have been completely processed. * We take a clone of the original to store in @@ -1669,7 +1665,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, bio_chain(b, bio); trace_block_split(md->queue, b, bio->bi_iter.bi_sector); - ret = generic_make_request(bio); + ret = submit_bio_noacct(bio); break; } } @@ -1737,7 +1733,7 @@ static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struc bio_chain(split, *bio); trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); - generic_make_request(*bio); + submit_bio_noacct(*bio); *bio = split; } } @@ -1762,13 +1758,13 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, } /* - * If in ->make_request_fn we need to use blk_queue_split(), otherwise + * If in ->queue_bio we need to use blk_queue_split(), otherwise * queue_limits for abnormal requests (e.g. discard, writesame, etc) * won't be imposed. */ if (current->bio_list) { if (is_abnormal_io(bio)) - blk_queue_split(md->queue, &bio); + blk_queue_split(&bio); else dm_queue_split(md, ti, &bio); } @@ -1779,9 +1775,9 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, return __split_and_process_bio(md, map, bio); } -static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t dm_submit_bio(struct bio *bio) { - struct mapped_device *md = q->queuedata; + struct mapped_device *md = bio->bi_disk->private_data; blk_qc_t ret = BLK_QC_T_NONE; int srcu_idx; struct dm_table *map; @@ -1790,12 +1786,12 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) /* * We are called with a live reference on q_usage_counter, but * that one will be released as soon as we return. Grab an - * extra one as blk_mq_make_request expects to be able to - * consume a reference (which lives until the request is freed - * in case a request is allocated). + * extra one as blk_mq_submit_bio expects to be able to consume + * a reference (which lives until the request is freed in case a + * request is allocated). */ - percpu_ref_get(&q->q_usage_counter); - return blk_mq_make_request(q, bio); + percpu_ref_get(&bio->bi_disk->queue->q_usage_counter); + return blk_mq_submit_bio(bio); } map = dm_get_live_table(md, &srcu_idx); @@ -1817,31 +1813,6 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) return ret; } -static int dm_any_congested(void *congested_data, int bdi_bits) -{ - int r = bdi_bits; - struct mapped_device *md = congested_data; - struct dm_table *map; - - if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { - if (dm_request_based(md)) { - /* - * With request-based DM we only need to check the - * top-level queue for congestion. - */ - struct backing_dev_info *bdi = md->queue->backing_dev_info; - r = bdi->wb.congested->state & bdi_bits; - } else { - map = dm_get_live_table_fast(md); - if (map) - r = dm_table_any_congested(map, bdi_bits); - dm_put_live_table_fast(md); - } - } - - return r; -} - /*----------------------------------------------------------------- * An IDR is used to keep track of allocated minor numbers. *---------------------------------------------------------------*/ @@ -1980,14 +1951,13 @@ static struct mapped_device *alloc_dev(int minor) spin_lock_init(&md->uevent_lock); /* - * default to bio-based required ->make_request_fn until DM - * table is loaded and md->type established. If request-based - * table is loaded: blk-mq will override accordingly. + * default to bio-based until DM table is loaded and md->type + * established. If request-based table is loaded: blk-mq will + * override accordingly. */ - md->queue = blk_alloc_queue(dm_make_request, numa_node_id); + md->queue = blk_alloc_queue(numa_node_id); if (!md->queue) goto bad; - md->queue->queuedata = md; md->disk = alloc_disk_node(1, md->numa_node_id); if (!md->disk) @@ -2281,12 +2251,6 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_get_queue_limits); -static void dm_init_congested_fn(struct mapped_device *md) -{ - md->queue->backing_dev_info->congested_data = md; - md->queue->backing_dev_info->congested_fn = dm_any_congested; -} - /* * Setup the DM device's queue based on md's type */ @@ -2303,12 +2267,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) DMERR("Cannot initialize queue for request-based dm-mq mapped device"); return r; } - dm_init_congested_fn(md); break; case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: case DM_TYPE_NVME_BIO_BASED: - dm_init_congested_fn(md); break; case DM_TYPE_NONE: WARN_ON_ONCE(true); @@ -2529,7 +2491,7 @@ static void dm_wq_work(struct work_struct *work) break; if (dm_request_based(md)) - (void) generic_make_request(c); + (void) submit_bio_noacct(c); else (void) dm_process_bio(md, map, c); } @@ -3269,6 +3231,7 @@ static const struct pr_ops dm_pr_ops = { }; static const struct block_device_operations dm_blk_dops = { + .submit_bio = dm_submit_bio, .open = dm_blk_open, .release = dm_blk_close, .ioctl = dm_blk_ioctl, diff --git a/drivers/md/dm.h b/drivers/md/dm.h index d7c4f6606b5f..4f5fe664d05a 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -63,7 +63,6 @@ void dm_table_presuspend_targets(struct dm_table *t); void dm_table_presuspend_undo_targets(struct dm_table *t); void dm_table_postsuspend_targets(struct dm_table *t); int dm_table_resume_targets(struct dm_table *t); -int dm_table_any_congested(struct dm_table *t, int bdi_bits); enum dm_queue_mode dm_table_get_type(struct dm_table *t); struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); struct dm_target *dm_table_get_immutable_target(struct dm_table *t); diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c index 50ad4ba86f0e..fda4cb3f936f 100644 --- a/drivers/md/md-faulty.c +++ b/drivers/md/md-faulty.c @@ -169,7 +169,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio) if (bio_data_dir(bio) == WRITE) { /* write request */ if (atomic_read(&conf->counters[WriteAll])) { - /* special case - don't decrement, don't generic_make_request, + /* special case - don't decrement, don't submit_bio_noacct, * just fail immediately */ bio_io_error(bio); @@ -214,7 +214,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio) } else bio_set_dev(bio, conf->rdev->bdev); - generic_make_request(bio); + submit_bio_noacct(bio); return true; } diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 26c75c0199fa..c2ae9125c4c3 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -46,29 +46,6 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) return conf->disks + lo; } -/* - * In linear_congested() conf->raid_disks is used as a copy of - * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks - * and conf->disks[] are created in linear_conf(), they are always - * consitent with each other, but mddev->raid_disks does not. - */ -static int linear_congested(struct mddev *mddev, int bits) -{ - struct linear_conf *conf; - int i, ret = 0; - - rcu_read_lock(); - conf = rcu_dereference(mddev->private); - - for (i = 0; i < conf->raid_disks && !ret ; i++) { - struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); - ret |= bdi_congested(q->backing_dev_info, bits); - } - - rcu_read_unlock(); - return ret; -} - static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) { struct linear_conf *conf; @@ -267,7 +244,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio) struct bio *split = bio_split(bio, end_sector - bio_sector, GFP_NOIO, &mddev->bio_set); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; } @@ -286,7 +263,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio) bio_sector); mddev_check_writesame(mddev, bio); mddev_check_write_zeroes(mddev, bio); - generic_make_request(bio); + submit_bio_noacct(bio); } return true; @@ -322,7 +299,6 @@ static struct md_personality linear_personality = .hot_add_disk = linear_add, .size = linear_size, .quiesce = linear_quiesce, - .congested = linear_congested, }; static int __init linear_init (void) diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index 152f9e65a226..776bbe542db5 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -131,7 +131,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio) mp_bh->bio.bi_private = mp_bh; mddev_check_writesame(mddev, &mp_bh->bio); mddev_check_write_zeroes(mddev, &mp_bh->bio); - generic_make_request(&mp_bh->bio); + submit_bio_noacct(&mp_bh->bio); return true; } @@ -151,28 +151,6 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev) seq_putc(seq, ']'); } -static int multipath_congested(struct mddev *mddev, int bits) -{ - struct mpconf *conf = mddev->private; - int i, ret = 0; - - rcu_read_lock(); - for (i = 0; i < mddev->raid_disks ; i++) { - struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); - if (rdev && !test_bit(Faulty, &rdev->flags)) { - struct request_queue *q = bdev_get_queue(rdev->bdev); - - ret |= bdi_congested(q->backing_dev_info, bits); - /* Just like multipath_map, we just check the - * first available device - */ - break; - } - } - rcu_read_unlock(); - return ret; -} - /* * Careful, this can execute in IRQ contexts as well! */ @@ -348,7 +326,7 @@ static void multipathd(struct md_thread *thread) bio->bi_opf |= REQ_FAILFAST_TRANSPORT; bio->bi_end_io = multipath_end_request; bio->bi_private = mp_bh; - generic_make_request(bio); + submit_bio_noacct(bio); } } spin_unlock_irqrestore(&conf->device_lock, flags); @@ -478,7 +456,6 @@ static struct md_personality multipath_personality = .hot_add_disk = multipath_add_disk, .hot_remove_disk= multipath_remove_disk, .size = multipath_size, - .congested = multipath_congested, }; static int __init multipath_init (void) diff --git a/drivers/md/md.c b/drivers/md/md.c index f567f536b529..96b28f6d025c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -199,7 +199,7 @@ static int rdevs_init_serial(struct mddev *mddev) static int rdev_need_serial(struct md_rdev *rdev) { return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && - rdev->bdev->bd_queue->nr_hw_queues != 1 && + rdev->bdev->bd_disk->queue->nr_hw_queues != 1 && test_bit(WriteMostly, &rdev->flags)); } @@ -463,7 +463,7 @@ check_suspended: } EXPORT_SYMBOL(md_handle_request); -static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t md_submit_bio(struct bio *bio) { const int rw = bio_data_dir(bio); const int sgrp = op_stat_group(bio_op(bio)); @@ -475,7 +475,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } - blk_queue_split(q, &bio); + blk_queue_split(&bio); if (mddev == NULL || mddev->pers == NULL) { bio_io_error(bio); @@ -549,26 +549,6 @@ void mddev_resume(struct mddev *mddev) } EXPORT_SYMBOL_GPL(mddev_resume); -int mddev_congested(struct mddev *mddev, int bits) -{ - struct md_personality *pers = mddev->pers; - int ret = 0; - - rcu_read_lock(); - if (mddev->suspended) - ret = 1; - else if (pers && pers->congested) - ret = pers->congested(mddev, bits); - rcu_read_unlock(); - return ret; -} -EXPORT_SYMBOL_GPL(mddev_congested); -static int md_congested(void *data, int bits) -{ - struct mddev *mddev = data; - return mddev_congested(mddev, bits); -} - /* * Generic flush handling for md */ @@ -5641,7 +5621,7 @@ static int md_alloc(dev_t dev, char *name) mddev->hold_active = UNTIL_STOP; error = -ENOMEM; - mddev->queue = blk_alloc_queue(md_make_request, NUMA_NO_NODE); + mddev->queue = blk_alloc_queue(NUMA_NO_NODE); if (!mddev->queue) goto abort; @@ -5670,6 +5650,7 @@ static int md_alloc(dev_t dev, char *name) * remove it now. */ disk->flags |= GENHD_FL_EXT_DEVT; + disk->events |= DISK_EVENT_MEDIA_CHANGE; mddev->gendisk = disk; /* As soon as we call add_disk(), another thread could get * through to md_open, so make sure it doesn't get too far @@ -5964,8 +5945,6 @@ int md_run(struct mddev *mddev) blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); else blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); - mddev->queue->backing_dev_info->congested_data = mddev; - mddev->queue->backing_dev_info->congested_fn = md_congested; } if (pers->sync_request) { if (mddev->kobj.sd && @@ -6350,7 +6329,6 @@ static int do_md_stop(struct mddev *mddev, int mode, __md_stop_writes(mddev); __md_stop(mddev); - mddev->queue->backing_dev_info->congested_fn = NULL; /* tell userspace to handle 'inactive' */ sysfs_notify_dirent_safe(mddev->sysfs_state); @@ -7806,23 +7784,21 @@ static void md_release(struct gendisk *disk, fmode_t mode) mddev_put(mddev); } -static int md_media_changed(struct gendisk *disk) -{ - struct mddev *mddev = disk->private_data; - - return mddev->changed; -} - -static int md_revalidate(struct gendisk *disk) +static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing) { struct mddev *mddev = disk->private_data; + unsigned int ret = 0; + if (mddev->changed) + ret = DISK_EVENT_MEDIA_CHANGE; mddev->changed = 0; - return 0; + return ret; } + static const struct block_device_operations md_fops = { .owner = THIS_MODULE, + .submit_bio = md_submit_bio, .open = md_open, .release = md_release, .ioctl = md_ioctl, @@ -7830,8 +7806,7 @@ static const struct block_device_operations md_fops = .compat_ioctl = md_compat_ioctl, #endif .getgeo = md_getgeo, - .media_changed = md_media_changed, - .revalidate_disk= md_revalidate, + .check_events = md_check_events, }; static int md_thread(void *arg) diff --git a/drivers/md/md.h b/drivers/md/md.h index 612814d07d35..e2f1ad9afc48 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -597,9 +597,6 @@ struct md_personality * array. */ void *(*takeover) (struct mddev *mddev); - /* congested implements bdi.congested_fn(). - * Will not be called while array is 'suspended' */ - int (*congested)(struct mddev *mddev, int bits); /* Changes the consistency policy of an active array. */ int (*change_consistency_policy)(struct mddev *mddev, const char *buf); }; @@ -710,7 +707,6 @@ extern void md_done_sync(struct mddev *mddev, int blocks, int ok); extern void md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_finish_reshape(struct mddev *mddev); -extern int mddev_congested(struct mddev *mddev, int bits); extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 322386ff5d22..f54a449f97aa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -29,21 +29,6 @@ module_param(default_layout, int, 0644); (1L << MD_HAS_PPL) | \ (1L << MD_HAS_MULTIPLE_PPLS)) -static int raid0_congested(struct mddev *mddev, int bits) -{ - struct r0conf *conf = mddev->private; - struct md_rdev **devlist = conf->devlist; - int raid_disks = conf->strip_zone[0].nb_dev; - int i, ret = 0; - - for (i = 0; i < raid_disks && !ret ; i++) { - struct request_queue *q = bdev_get_queue(devlist[i]->bdev); - - ret |= bdi_congested(q->backing_dev_info, bits); - } - return ret; -} - /* * inform the user of the raid configuration */ @@ -495,7 +480,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, &mddev->bio_set); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; end = zone->zone_end; } else @@ -559,7 +544,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) trace_block_bio_remap(bdev_get_queue(rdev->bdev), discard_bio, disk_devt(mddev->gendisk), bio->bi_iter.bi_sector); - generic_make_request(discard_bio); + submit_bio_noacct(discard_bio); } bio_endio(bio); } @@ -600,7 +585,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) struct bio *split = bio_split(bio, sectors, GFP_NOIO, &mddev->bio_set); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; } @@ -633,7 +618,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) disk_devt(mddev->gendisk), bio_sector); mddev_check_writesame(mddev, bio); mddev_check_write_zeroes(mddev, bio); - generic_make_request(bio); + submit_bio_noacct(bio); return true; } @@ -818,7 +803,6 @@ static struct md_personality raid0_personality= .size = raid0_size, .takeover = raid0_takeover, .quiesce = raid0_quiesce, - .congested = raid0_congested, }; static int __init raid0_init (void) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index dcd27f3da84e..960d854c07f8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -786,36 +786,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect return best_disk; } -static int raid1_congested(struct mddev *mddev, int bits) -{ - struct r1conf *conf = mddev->private; - int i, ret = 0; - - if ((bits & (1 << WB_async_congested)) && - conf->pending_count >= max_queued_requests) - return 1; - - rcu_read_lock(); - for (i = 0; i < conf->raid_disks * 2; i++) { - struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !test_bit(Faulty, &rdev->flags)) { - struct request_queue *q = bdev_get_queue(rdev->bdev); - - BUG_ON(!q); - - /* Note the '|| 1' - when read_balance prefers - * non-congested targets, it can be removed - */ - if ((bits & (1 << WB_async_congested)) || 1) - ret |= bdi_congested(q->backing_dev_info, bits); - else - ret &= bdi_congested(q->backing_dev_info, bits); - } - } - rcu_read_unlock(); - return ret; -} - static void flush_bio_list(struct r1conf *conf, struct bio *bio) { /* flush any pending bitmap writes to disk before proceeding w/ I/O */ @@ -834,7 +804,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) /* Just ignore it */ bio_endio(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); bio = next; cond_resched(); } @@ -1312,7 +1282,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; r1_bio->master_bio = bio; r1_bio->sectors = max_sectors; @@ -1338,7 +1308,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, disk_devt(mddev->gendisk), r1_bio->sector); - generic_make_request(read_bio); + submit_bio_noacct(read_bio); } static void raid1_write_request(struct mddev *mddev, struct bio *bio, @@ -1483,7 +1453,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, struct bio *split = bio_split(bio, max_sectors, GFP_NOIO, &conf->bio_split); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; r1_bio->master_bio = bio; r1_bio->sectors = max_sectors; @@ -2240,7 +2210,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) atomic_inc(&r1_bio->remaining); md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); - generic_make_request(wbio); + submit_bio_noacct(wbio); } put_sync_write_buf(r1_bio, 1); @@ -2926,7 +2896,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; - generic_make_request(bio); + submit_bio_noacct(bio); } } } else { @@ -2935,7 +2905,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; - generic_make_request(bio); + submit_bio_noacct(bio); } return nr_sectors; } @@ -3396,7 +3366,6 @@ static struct md_personality raid1_personality = .check_reshape = raid1_reshape, .quiesce = raid1_quiesce, .takeover = raid1_takeover, - .congested = raid1_congested, }; static int __init raid_init(void) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ec136e44aef7..353288bc4cb7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -848,31 +848,6 @@ static struct md_rdev *read_balance(struct r10conf *conf, return rdev; } -static int raid10_congested(struct mddev *mddev, int bits) -{ - struct r10conf *conf = mddev->private; - int i, ret = 0; - - if ((bits & (1 << WB_async_congested)) && - conf->pending_count >= max_queued_requests) - return 1; - - rcu_read_lock(); - for (i = 0; - (i < conf->geo.raid_disks || i < conf->prev.raid_disks) - && ret == 0; - i++) { - struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !test_bit(Faulty, &rdev->flags)) { - struct request_queue *q = bdev_get_queue(rdev->bdev); - - ret |= bdi_congested(q->backing_dev_info, bits); - } - } - rcu_read_unlock(); - return ret; -} - static void flush_pending_writes(struct r10conf *conf) { /* Any writes that have been queued but are awaiting @@ -917,7 +892,7 @@ static void flush_pending_writes(struct r10conf *conf) /* Just ignore it */ bio_endio(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); bio = next; } blk_finish_plug(&plug); @@ -1102,7 +1077,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) /* Just ignore it */ bio_endio(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); bio = next; } kfree(plug); @@ -1194,7 +1169,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, gfp, &conf->bio_split); bio_chain(split, bio); allow_barrier(conf); - generic_make_request(bio); + submit_bio_noacct(bio); wait_barrier(conf); bio = split; r10_bio->master_bio = bio; @@ -1221,7 +1196,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, disk_devt(mddev->gendisk), r10_bio->sector); - generic_make_request(read_bio); + submit_bio_noacct(read_bio); return; } @@ -1479,7 +1454,7 @@ retry_write: GFP_NOIO, &conf->bio_split); bio_chain(split, bio); allow_barrier(conf); - generic_make_request(bio); + submit_bio_noacct(bio); wait_barrier(conf); bio = split; r10_bio->master_bio = bio; @@ -2099,7 +2074,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) tbio->bi_opf |= MD_FAILFAST; tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); - generic_make_request(tbio); + submit_bio_noacct(tbio); } /* Now write out to any replacement devices @@ -2118,7 +2093,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) atomic_inc(&r10_bio->remaining); md_sync_acct(conf->mirrors[d].replacement->bdev, bio_sectors(tbio)); - generic_make_request(tbio); + submit_bio_noacct(tbio); } done: @@ -2241,7 +2216,7 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) wbio = r10_bio->devs[1].bio; wbio2 = r10_bio->devs[1].repl_bio; /* Need to test wbio2->bi_end_io before we call - * generic_make_request as if the former is NULL, + * submit_bio_noacct as if the former is NULL, * the latter is free to free wbio2. */ if (wbio2 && !wbio2->bi_end_io) @@ -2249,13 +2224,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) if (wbio->bi_end_io) { atomic_inc(&conf->mirrors[d].rdev->nr_pending); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); - generic_make_request(wbio); + submit_bio_noacct(wbio); } if (wbio2) { atomic_inc(&conf->mirrors[d].replacement->nr_pending); md_sync_acct(conf->mirrors[d].replacement->bdev, bio_sectors(wbio2)); - generic_make_request(wbio2); + submit_bio_noacct(wbio2); } } @@ -2889,7 +2864,7 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf) * a number of r10_bio structures, one for each out-of-sync device. * As we setup these structures, we collect all bio's together into a list * which we then process collectively to add pages, and then process again - * to pass to generic_make_request. + * to pass to submit_bio_noacct. * * The r10_bio structures are linked using a borrowed master_bio pointer. * This link is counted in ->remaining. When the r10_bio that points to NULL @@ -3496,7 +3471,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, if (bio->bi_end_io == end_sync_read) { md_sync_acct_bio(bio, nr_sectors); bio->bi_status = 0; - generic_make_request(bio); + submit_bio_noacct(bio); } } @@ -4654,7 +4629,7 @@ read_more: md_sync_acct_bio(read_bio, r10_bio->sectors); atomic_inc(&r10_bio->remaining); read_bio->bi_next = NULL; - generic_make_request(read_bio); + submit_bio_noacct(read_bio); sectors_done += nr_sectors; if (sector_nr <= last) goto read_more; @@ -4717,7 +4692,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) md_sync_acct_bio(b, r10_bio->sectors); atomic_inc(&r10_bio->remaining); b->bi_next = NULL; - generic_make_request(b); + submit_bio_noacct(b); } end_reshape_request(r10_bio); } @@ -4929,7 +4904,6 @@ static struct md_personality raid10_personality = .start_reshape = raid10_start_reshape, .finish_reshape = raid10_finish_reshape, .update_reshape_pos = raid10_update_reshape_pos, - .congested = raid10_congested, }; static int __init raid_init(void) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ab8067f9ce8c..774ea893d47e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -873,7 +873,7 @@ static void dispatch_bio_list(struct bio_list *tmp) struct bio *bio; while ((bio = bio_list_pop(tmp))) - generic_make_request(bio); + submit_bio_noacct(bio); } static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b) @@ -1151,7 +1151,7 @@ again: if (should_defer && op_is_write(op)) bio_list_add(&pending_bios, bi); else - generic_make_request(bi); + submit_bio_noacct(bi); } if (rrdev) { if (s->syncing || s->expanding || s->expanded @@ -1201,7 +1201,7 @@ again: if (should_defer && op_is_write(op)) bio_list_add(&pending_bios, rbi); else - generic_make_request(rbi); + submit_bio_noacct(rbi); } if (!rdev && !rrdev) { if (op_is_write(op)) @@ -5099,28 +5099,6 @@ static void activate_bit_delay(struct r5conf *conf, } } -static int raid5_congested(struct mddev *mddev, int bits) -{ - struct r5conf *conf = mddev->private; - - /* No difference between reads and writes. Just check - * how busy the stripe_cache is - */ - - if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) - return 1; - - /* Also checks whether there is pressure on r5cache log space */ - if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) - return 1; - if (conf->quiesce) - return 1; - if (atomic_read(&conf->empty_inactive_list_nr)) - return 1; - - return 0; -} - static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) { struct r5conf *conf = mddev->private; @@ -5289,7 +5267,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) trace_block_bio_remap(align_bi->bi_disk->queue, align_bi, disk_devt(mddev->gendisk), raid_bio->bi_iter.bi_sector); - generic_make_request(align_bi); + submit_bio_noacct(align_bi); return 1; } else { rcu_read_unlock(); @@ -5309,7 +5287,7 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) struct r5conf *conf = mddev->private; split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); bio_chain(split, raid_bio); - generic_make_request(raid_bio); + submit_bio_noacct(raid_bio); raid_bio = split; } @@ -8427,7 +8405,6 @@ static struct md_personality raid6_personality = .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, .takeover = raid6_takeover, - .congested = raid5_congested, .change_consistency_policy = raid5_change_consistency_policy, }; static struct md_personality raid5_personality = @@ -8452,7 +8429,6 @@ static struct md_personality raid5_personality = .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, .takeover = raid5_takeover, - .congested = raid5_congested, .change_consistency_policy = raid5_change_consistency_policy, }; @@ -8478,7 +8454,6 @@ static struct md_personality raid4_personality = .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, .takeover = raid4_takeover, - .congested = raid5_congested, .change_consistency_policy = raid5_change_consistency_policy, }; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 7896952de1ac..fa313b634135 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -312,10 +312,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode) mutex_lock(&block_mutex); if (md) { - if (md->usage == 2) - check_disk_change(bdev); ret = 0; - if ((mode & FMODE_WRITE) && md->read_only) { mmc_blk_put(md); ret = -EROFS; @@ -1446,7 +1443,7 @@ static void mmc_blk_cqe_req_done(struct mmc_request *mrq) */ if (mq->in_recovery) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); } @@ -1926,7 +1923,7 @@ static void mmc_blk_hsq_req_done(struct mmc_request *mrq) */ if (mq->in_recovery) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); } @@ -1936,7 +1933,7 @@ void mmc_blk_mq_complete(struct request *req) if (mq->use_cqe) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) mmc_blk_mq_complete_rq(mq, req); } @@ -1988,7 +1985,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) */ if (mq->in_recovery) mmc_blk_mq_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); mmc_blk_mq_dec_in_flight(mq, req); diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 39030a324d7f..1f718381a045 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -162,7 +162,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk, return err; } -static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t nd_blk_submit_bio(struct bio *bio) { struct bio_integrity_payload *bip; struct nd_namespace_blk *nsblk = bio->bi_disk->private_data; @@ -225,6 +225,7 @@ static int nsblk_rw_bytes(struct nd_namespace_common *ndns, static const struct block_device_operations nd_blk_fops = { .owner = THIS_MODULE, + .submit_bio = nd_blk_submit_bio, .revalidate_disk = nvdimm_revalidate_disk, }; @@ -250,7 +251,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk)); available_disk_size = internal_nlba * nsblk_sector_size(nsblk); - q = blk_alloc_queue(nd_blk_make_request, NUMA_NO_NODE); + q = blk_alloc_queue(NUMA_NO_NODE); if (!q) return -ENOMEM; if (devm_add_action_or_reset(dev, nd_blk_release_queue, q)) diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 48e9d169b6f9..412d21d8f643 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1439,7 +1439,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, return ret; } -static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t btt_submit_bio(struct bio *bio) { struct bio_integrity_payload *bip = bio_integrity(bio); struct btt *btt = bio->bi_disk->private_data; @@ -1512,6 +1512,7 @@ static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo) static const struct block_device_operations btt_fops = { .owner = THIS_MODULE, + .submit_bio = btt_submit_bio, .rw_page = btt_rw_page, .getgeo = btt_getgeo, .revalidate_disk = nvdimm_revalidate_disk, @@ -1523,7 +1524,7 @@ static int btt_blk_init(struct btt *btt) struct nd_namespace_common *ndns = nd_btt->ndns; /* create a new disk and request queue for btt */ - btt->btt_queue = blk_alloc_queue(btt_make_request, NUMA_NO_NODE); + btt->btt_queue = blk_alloc_queue(NUMA_NO_NODE); if (!btt->btt_queue) return -ENOMEM; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index d25e66fd942d..94790e6e0e4c 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -189,7 +189,7 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem, return rc; } -static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t pmem_submit_bio(struct bio *bio) { int ret = 0; blk_status_t rc = 0; @@ -281,6 +281,7 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, static const struct block_device_operations pmem_fops = { .owner = THIS_MODULE, + .submit_bio = pmem_submit_bio, .rw_page = pmem_rw_page, .revalidate_disk = nvdimm_revalidate_disk, }; @@ -423,7 +424,7 @@ static int pmem_attach_disk(struct device *dev, return -EBUSY; } - q = blk_alloc_queue(pmem_make_request, dev_to_node(dev)); + q = blk_alloc_queue(dev_to_node(dev)); if (!q) return -ENOMEM; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index add040168e67..71c2c1bf3cc1 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -304,7 +304,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) return true; nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; - blk_mq_force_complete_rq(req); + blk_mq_complete_request(req); return true; } EXPORT_SYMBOL_GPL(nvme_cancel_request); @@ -2184,6 +2184,7 @@ static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) const struct block_device_operations nvme_ns_head_ops = { .owner = THIS_MODULE, + .submit_bio = nvme_ns_head_submit_bio, .open = nvme_ns_head_open, .release = nvme_ns_head_release, .ioctl = nvme_ioctl, diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index e999a8c4b7e8..6aa30bb5a762 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -227,6 +227,7 @@ static DECLARE_COMPLETION(nvme_fc_unload_proceed); */ static struct device *fc_udev_device; +static void nvme_fc_complete_rq(struct request *rq); /* *********************** FC-NVME Port Management ************************ */ @@ -2033,7 +2034,8 @@ done: } __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); - nvme_end_request(rq, status, result); + if (!nvme_end_request(rq, status, result)) + nvme_fc_complete_rq(rq); check_error: if (terminate_assoc) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 66509472fe06..5a37a595411e 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -291,8 +291,7 @@ static bool nvme_available_path(struct nvme_ns_head *head) return false; } -static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, - struct bio *bio) +blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) { struct nvme_ns_head *head = bio->bi_disk->private_data; struct device *dev = disk_to_dev(head->disk); @@ -301,12 +300,11 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, int srcu_idx; /* - * The namespace might be going away and the bio might - * be moved to a different queue via blk_steal_bios(), - * so we need to use the bio_split pool from the original - * queue to allocate the bvecs from. + * The namespace might be going away and the bio might be moved to a + * different queue via blk_steal_bios(), so we need to use the bio_split + * pool from the original queue to allocate the bvecs from. */ - blk_queue_split(q, &bio); + blk_queue_split(&bio); srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); @@ -316,7 +314,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, trace_block_bio_remap(bio->bi_disk->queue, bio, disk_devt(ns->head->disk), bio->bi_iter.bi_sector); - ret = direct_make_request(bio); + ret = submit_bio_noacct(bio); } else if (nvme_available_path(head)) { dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); @@ -353,7 +351,7 @@ static void nvme_requeue_work(struct work_struct *work) * path. */ bio->bi_disk = head->disk; - generic_make_request(bio); + submit_bio_noacct(bio); } } @@ -375,7 +373,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath) return 0; - q = blk_alloc_queue(nvme_ns_head_make_request, ctrl->numa_node); + q = blk_alloc_queue(ctrl->numa_node); if (!q) goto out; blk_queue_flag_set(QUEUE_FLAG_NONROT, q); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 1de3f9b827aa..26099eaf1b1c 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -474,7 +474,7 @@ static inline u32 nvme_bytes_to_numd(size_t len) return (len >> 2) - 1; } -static inline void nvme_end_request(struct request *req, __le16 status, +static inline bool nvme_end_request(struct request *req, __le16 status, union nvme_result result) { struct nvme_request *rq = nvme_req(req); @@ -483,7 +483,9 @@ static inline void nvme_end_request(struct request *req, __le16 status, rq->result = result; /* inject error when permitted by fault injection framework */ nvme_should_fail(req); - blk_mq_complete_request(req); + if (unlikely(blk_should_fake_timeout(req->q))) + return true; + return blk_mq_complete_request_remote(req); } static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) @@ -586,6 +588,7 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl); bool nvme_mpath_clear_current_path(struct nvme_ns *ns); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); +blk_qc_t nvme_ns_head_submit_bio(struct bio *bio); static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) { diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b1d18f0633c7..74a2e2e00794 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -963,7 +963,8 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); - nvme_end_request(req, cqe->status, cqe->result); + if (!nvme_end_request(req, cqe->status, cqe->result)) + nvme_pci_complete_rq(req); } static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 13506a87a444..e881f879ac63 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -149,6 +149,7 @@ MODULE_PARM_DESC(register_always, static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event); static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); +static void nvme_rdma_complete_rq(struct request *rq); static const struct blk_mq_ops nvme_rdma_mq_ops; static const struct blk_mq_ops nvme_rdma_admin_mq_ops; @@ -1149,6 +1150,16 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) queue_work(nvme_reset_wq, &ctrl->err_work); } +static void nvme_rdma_end_request(struct nvme_rdma_request *req) +{ + struct request *rq = blk_mq_rq_from_pdu(req); + + if (!refcount_dec_and_test(&req->ref)) + return; + if (!nvme_end_request(rq, req->status, req->result)) + nvme_rdma_complete_rq(rq); +} + static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, const char *op) { @@ -1173,16 +1184,11 @@ static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvme_rdma_request *req = container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); - struct request *rq = blk_mq_rq_from_pdu(req); - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (unlikely(wc->status != IB_WC_SUCCESS)) nvme_rdma_wr_error(cq, wc, "LOCAL_INV"); - return; - } - - if (refcount_dec_and_test(&req->ref)) - nvme_end_request(rq, req->status, req->result); - + else + nvme_rdma_end_request(req); } static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, @@ -1547,15 +1553,11 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); struct nvme_rdma_request *req = container_of(qe, struct nvme_rdma_request, sqe); - struct request *rq = blk_mq_rq_from_pdu(req); - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (unlikely(wc->status != IB_WC_SUCCESS)) nvme_rdma_wr_error(cq, wc, "SEND"); - return; - } - - if (refcount_dec_and_test(&req->ref)) - nvme_end_request(rq, req->status, req->result); + else + nvme_rdma_end_request(req); } static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, @@ -1697,8 +1699,7 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, return; } - if (refcount_dec_and_test(&req->ref)) - nvme_end_request(rq, req->status, req->result); + nvme_rdma_end_request(req); } static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 79ef2b8e2b3c..7006aca89456 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -464,7 +464,8 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, return -EINVAL; } - nvme_end_request(rq, cqe->status, cqe->result); + if (!nvme_end_request(rq, cqe->status, cqe->result)) + nvme_complete_rq(rq); queue->nr_cqe++; return 0; @@ -654,7 +655,8 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status) { union nvme_result res = {}; - nvme_end_request(rq, cpu_to_le16(status << 1), res); + if (!nvme_end_request(rq, cpu_to_le16(status << 1), res)) + nvme_complete_rq(rq); } static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 6e2f623e472e..6816507fba58 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -467,7 +467,7 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns) return -EINVAL; } - if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) { + if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) { pr_err("peer-to-peer DMA is not supported by the driver of %s\n", ns->device_path); return -EINVAL; diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 6344e73c9354..8a0d4fe7bc18 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -116,7 +116,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req) return; } - nvme_end_request(rq, cqe->status, cqe->result); + if (!nvme_end_request(rq, cqe->status, cqe->result)) + nvme_loop_complete_rq(rq); } } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index cf87eb27879f..eb17fea8075c 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2802,7 +2802,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) blk_update_request(req, BLK_STS_OK, blk_rq_bytes(req) - proc_bytes); blk_mq_requeue_request(req, true); - } else { + } else if (likely(!blk_should_fake_timeout(req->q))) { blk_mq_complete_request(req); } } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 384edffe5cb4..299e77ec2c41 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -31,8 +31,7 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode); static void dcssblk_release(struct gendisk *disk, fmode_t mode); -static blk_qc_t dcssblk_make_request(struct request_queue *q, - struct bio *bio); +static blk_qc_t dcssblk_submit_bio(struct bio *bio); static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); @@ -41,6 +40,7 @@ static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; static int dcssblk_major; static const struct block_device_operations dcssblk_devops = { .owner = THIS_MODULE, + .submit_bio = dcssblk_submit_bio, .open = dcssblk_open, .release = dcssblk_release, }; @@ -651,8 +651,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char } dev_info->gd->major = dcssblk_major; dev_info->gd->fops = &dcssblk_devops; - dev_info->dcssblk_queue = - blk_alloc_queue(dcssblk_make_request, NUMA_NO_NODE); + dev_info->dcssblk_queue = blk_alloc_queue(NUMA_NO_NODE); dev_info->gd->queue = dev_info->dcssblk_queue; dev_info->gd->private_data = dev_info; blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); @@ -833,7 +832,6 @@ dcssblk_open(struct block_device *bdev, fmode_t mode) goto out; } atomic_inc(&dev_info->use_count); - bdev->bd_block_size = 4096; rc = 0; out: return rc; @@ -868,7 +866,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) } static blk_qc_t -dcssblk_make_request(struct request_queue *q, struct bio *bio) +dcssblk_submit_bio(struct bio *bio) { struct dcssblk_dev_info *dev_info; struct bio_vec bvec; @@ -878,7 +876,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) unsigned long source_addr; unsigned long bytes_done; - blk_queue_split(q, &bio); + blk_queue_split(&bio); bytes_done = 0; dev_info = bio->bi_disk->private_data; diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index e01889394c84..a4f6f2e62b1d 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -256,7 +256,8 @@ static void scm_request_finish(struct scm_request *scmrq) for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { error = blk_mq_rq_to_pdu(scmrq->request[i]); *error = scmrq->error; - blk_mq_complete_request(scmrq->request[i]); + if (likely(!blk_should_fake_timeout(scmrq->request[i]->q))) + blk_mq_complete_request(scmrq->request[i]); } atomic_dec(&bdev->queued_reqs); diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 45a04daec89e..c2536f7767b3 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -182,7 +182,7 @@ static unsigned long xpram_highest_page_index(void) /* * Block device make request function. */ -static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t xpram_submit_bio(struct bio *bio) { xpram_device_t *xdev = bio->bi_disk->private_data; struct bio_vec bvec; @@ -191,7 +191,7 @@ static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio) unsigned long page_addr; unsigned long bytes; - blk_queue_split(q, &bio); + blk_queue_split(&bio); if ((bio->bi_iter.bi_sector & 7) != 0 || (bio->bi_iter.bi_size & 4095) != 0) @@ -250,6 +250,7 @@ static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo) static const struct block_device_operations xpram_devops = { .owner = THIS_MODULE, + .submit_bio = xpram_submit_bio, .getgeo = xpram_getgeo, }; @@ -343,8 +344,7 @@ static int __init xpram_setup_blkdev(void) xpram_disks[i] = alloc_disk(1); if (!xpram_disks[i]) goto out; - xpram_queues[i] = blk_alloc_queue(xpram_make_request, - NUMA_NO_NODE); + xpram_queues[i] = blk_alloc_queue(NUMA_NO_NODE); if (!xpram_queues[i]) { put_disk(xpram_disks[i]); goto out; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0ba7a65e7c8d..534b85e87c80 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1589,31 +1589,23 @@ static blk_status_t scsi_mq_prep_fn(struct request *req) static void scsi_mq_done(struct scsi_cmnd *cmd) { + if (unlikely(blk_should_fake_timeout(cmd->request->q))) + return; if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) return; trace_scsi_dispatch_cmd_done(cmd); - - /* - * If the block layer didn't complete the request due to a timeout - * injection, scsi must clear its internal completed state so that the - * timeout handler will see it needs to escalate its own error - * recovery. - */ - if (unlikely(!blk_mq_complete_request(cmd->request))) - clear_bit(SCMD_STATE_COMPLETE, &cmd->state); + blk_mq_complete_request(cmd->request); } -static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) +static void scsi_mq_put_budget(struct request_queue *q) { - struct request_queue *q = hctx->queue; struct scsi_device *sdev = q->queuedata; atomic_dec(&sdev->device_busy); } -static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) +static bool scsi_mq_get_budget(struct request_queue *q) { - struct request_queue *q = hctx->queue; struct scsi_device *sdev = q->queuedata; return scsi_dev_queue_ready(q, sdev); @@ -1680,7 +1672,7 @@ out_dec_target_busy: if (scsi_target(sdev)->can_queue > 0) atomic_dec(&scsi_target(sdev)->target_busy); out_put_budget: - scsi_mq_put_budget(hctx); + scsi_mq_put_budget(q); switch (ret) { case BLK_STS_OK: break; diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 7c95afa905a0..a8e39b2cdd55 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -403,7 +403,6 @@ static const struct sysrq_key_op sysrq_moom_op = { .enable_mask = SYSRQ_ENABLE_SIGNAL, }; -#ifdef CONFIG_BLOCK static void sysrq_handle_thaw(int key) { emergency_thaw_all(); @@ -414,7 +413,6 @@ static const struct sysrq_key_op sysrq_thaw_op = { .action_msg = "Emergency Thaw of all frozen filesystems", .enable_mask = SYSRQ_ENABLE_SIGNAL, }; -#endif static void sysrq_handle_kill(int key) { |