aboutsummaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorLinus Torvalds2022-10-07 09:19:14 -0700
committerLinus Torvalds2022-10-07 09:19:14 -0700
commit513389809e138ae903b6ef43c1d5d2ffaf4dca17 (patch)
treec71e478fab1568da4706868b14eb67a75c148a8b /block/blk-core.c
parent0a78a376ef3c2f3d397df48909f00cd75f92137a (diff)
parent30514bd2dd4e86a3ecfd6a93a3eadf7b9ea164a0 (diff)
Merge tag 'for-6.1/block-2022-10-03' of git://git.kernel.dk/linux
Pull block updates from Jens Axboe: - NVMe pull requests via Christoph: - handle number of queue changes in the TCP and RDMA drivers (Daniel Wagner) - allow changing the number of queues in nvmet (Daniel Wagner) - also consider host_iface when checking ip options (Daniel Wagner) - don't map pages which can't come from HIGHMEM (Fabio M. De Francesco) - avoid unnecessary flush bios in nvmet (Guixin Liu) - shrink and better pack the nvme_iod structure (Keith Busch) - add comment for unaligned "fake" nqn (Linjun Bao) - print actual source IP address through sysfs "address" attr (Martin Belanger) - various cleanups (Jackie Liu, Wolfram Sang, Genjian Zhang) - handle effects after freeing the request (Keith Busch) - copy firmware_rev on each init (Keith Busch) - restrict management ioctls to admin (Keith Busch) - ensure subsystem reset is single threaded (Keith Busch) - report the actual number of tagset maps in nvme-pci (Keith Busch) - small fabrics authentication fixups (Christoph Hellwig) - add common code for tagset allocation and freeing (Christoph Hellwig) - stop using the request_queue in nvmet (Christoph Hellwig) - set min_align_mask before calculating max_hw_sectors (Rishabh Bhatnagar) - send a rediscover uevent when a persistent discovery controller reconnects (Sagi Grimberg) - misc nvmet-tcp fixes (Varun Prakash, zhenwei pi) - MD pull request via Song: - Various raid5 fix and clean up, by Logan Gunthorpe and David Sloan. - Raid10 performance optimization, by Yu Kuai. - sbitmap wakeup hang fixes (Hugh, Keith, Jan, Yu) - IO scheduler switching quisce fix (Keith) - s390/dasd block driver updates (Stefan) - support for recovery for the ublk driver (ZiyangZhang) - rnbd drivers fixes and updates (Guoqing, Santosh, ye, Christoph) - blk-mq and null_blk map fixes (Bart) - various bcache fixes (Coly, Jilin, Jules) - nbd signal hang fix (Shigeru) - block writeback throttling fix (Yu) - optimize the passthrough mapping handling (me) - prepare block cgroups to being gendisk based (Christoph) - get rid of an old PSI hack in the block layer, moving it to the callers instead where it belongs (Christoph) - blk-throttle fixes and cleanups (Yu) - misc fixes and cleanups (Liu Shixin, Liu Song, Miaohe, Pankaj, Ping-Xiang, Wolfram, Saurabh, Li Jinlin, Li Lei, Lin, Li zeming, Miaohe, Bart, Coly, Gaosheng * tag 'for-6.1/block-2022-10-03' of git://git.kernel.dk/linux: (162 commits) sbitmap: fix lockup while swapping block: add rationale for not using blk_mq_plug() when applicable block: adapt blk_mq_plug() to not plug for writes that require a zone lock s390/dasd: use blk_mq_alloc_disk blk-cgroup: don't update the blkg lookup hint in blkg_conf_prep nvmet: don't look at the request_queue in nvmet_bdev_set_limits nvmet: don't look at the request_queue in nvmet_bdev_zone_mgmt_emulate_all blk-mq: use quiesced elevator switch when reinitializing queues block: replace blk_queue_nowait with bdev_nowait nvme: remove nvme_ctrl_init_connect_q nvme-loop: use the tagset alloc/free helpers nvme-loop: store the generic nvme_ctrl in set->driver_data nvme-loop: initialize sqsize later nvme-fc: use the tagset alloc/free helpers nvme-fc: store the generic nvme_ctrl in set->driver_data nvme-fc: keep ctrl->sqsize in sync with opts->queue_size nvme-rdma: use the tagset alloc/free helpers nvme-rdma: store the generic nvme_ctrl in set->driver_data nvme-tcp: use the tagset alloc/free helpers nvme-tcp: store the generic nvme_ctrl in set->driver_data ...
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c37
1 files changed, 11 insertions, 26 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 651057c4146b..17667159482e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -37,7 +37,6 @@
#include <linux/t10-pi.h>
#include <linux/debugfs.h>
#include <linux/bpf.h>
-#include <linux/psi.h>
#include <linux/part_stat.h>
#include <linux/sched/sysctl.h>
#include <linux/blk-crypto.h>
@@ -487,18 +486,15 @@ static int __init fail_make_request_debugfs(void)
late_initcall(fail_make_request_debugfs);
#endif /* CONFIG_FAIL_MAKE_REQUEST */
-static inline bool bio_check_ro(struct bio *bio)
+static inline void bio_check_ro(struct bio *bio)
{
if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
- return false;
+ return;
pr_warn("Trying to write to read-only block-device %pg\n",
bio->bi_bdev);
/* Older lvm-tools actually trigger this */
- return false;
}
-
- return false;
}
static noinline int should_fail_bio(struct bio *bio)
@@ -717,13 +713,12 @@ void submit_bio_noacct(struct bio *bio)
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue does not support NOWAIT.
*/
- if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
+ if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
goto not_supported;
if (should_fail_bio(bio))
goto end_io;
- if (unlikely(bio_check_ro(bio)))
- goto end_io;
+ bio_check_ro(bio);
if (!bio_flagged(bio, BIO_REMAPPED)) {
if (unlikely(bio_check_eod(bio)))
goto end_io;
@@ -814,7 +809,7 @@ EXPORT_SYMBOL(submit_bio_noacct);
*
* The success/failure status of the request, along with notification of
* completion, is delivered asynchronously through the ->bi_end_io() callback
- * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
+ * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has
* been called.
*/
void submit_bio(struct bio *bio)
@@ -829,22 +824,6 @@ void submit_bio(struct bio *bio)
count_vm_events(PGPGOUT, bio_sectors(bio));
}
- /*
- * If we're reading data that is part of the userspace workingset, count
- * submission time as memory stall. When the device is congested, or
- * the submitting cgroup IO-throttled, submission can be a significant
- * part of overall IO time.
- */
- if (unlikely(bio_op(bio) == REQ_OP_READ &&
- bio_flagged(bio, BIO_WORKINGSET))) {
- unsigned long pflags;
-
- psi_memstall_enter(&pflags);
- submit_bio_noacct(bio);
- psi_memstall_leave(&pflags);
- return;
- }
-
submit_bio_noacct(bio);
}
EXPORT_SYMBOL(submit_bio);
@@ -871,6 +850,12 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0;
+ /*
+ * As the requests that require a zone lock are not plugged in the
+ * first place, directly accessing the plug instead of using
+ * blk_mq_plug() should not have any consequences during flushing for
+ * zoned devices.
+ */
blk_flush_plug(current->plug, false);
if (bio_queue_enter(bio))