aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe2019-08-15 11:09:16 -0600
committerJens Axboe2019-08-15 11:09:16 -0600
commit7b6620d7db566a46f49b4b9deab9fa061fd4b59b (patch)
treeffa0da802600d0550043e195c102ea99c75637b5
parent99c79f6692ccdc42e04deea8a36e22bb48168a62 (diff)
block: remove REQ_NOWAIT_INLINE
We had a few issues with this code, and there's still a problem around how we deal with error handling for chained/split bios. For now, just revert the code and we'll try again with a thoroug solution. This reverts commits: e15c2ffa1091 ("block: fix O_DIRECT error handling for bio fragments") 0eb6ddfb865c ("block: Fix __blkdev_direct_IO() for bio fragments") 6a43074e2f46 ("block: properly handle IOCB_NOWAIT for async O_DIRECT IO") 893a1c97205a ("blk-mq: allow REQ_NOWAIT to return an error inline") Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq.c8
-rw-r--r--fs/block_dev.c49
-rw-r--r--include/linux/blk_types.h5
3 files changed, 8 insertions, 54 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a8e6a58f5f28..0835f4d8d42e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1958,13 +1958,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
-
- cookie = BLK_QC_T_NONE;
- if (bio->bi_opf & REQ_NOWAIT_INLINE)
- cookie = BLK_QC_T_EAGAIN;
- else if (bio->bi_opf & REQ_NOWAIT)
+ if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
- return cookie;
+ return BLK_QC_T_NONE;
}
trace_block_getrq(q, bio, bio->bi_opf);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index eb657ab94060..677cb364d33f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -345,24 +345,15 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
struct bio *bio;
bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
- bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
- gfp_t gfp;
- int ret;
+ int ret = 0;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- if (nowait)
- gfp = GFP_NOWAIT;
- else
- gfp = GFP_KERNEL;
-
- bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
- if (!bio)
- return -EAGAIN;
+ bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
dio = container_of(bio, struct blkdev_dio, bio);
dio->is_sync = is_sync = is_sync_kiocb(iocb);
@@ -384,7 +375,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
if (!is_poll)
blk_start_plug(&plug);
- ret = 0;
for (;;) {
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> 9;
@@ -409,14 +399,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
task_io_account_write(bio->bi_iter.bi_size);
}
- /*
- * Tell underlying layer to not block for resource shortage.
- * And if we would have blocked, return error inline instead
- * of through the bio->bi_end_io() callback.
- */
- if (nowait)
- bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
-
+ dio->size += bio->bi_iter.bi_size;
pos += bio->bi_iter.bi_size;
nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
@@ -428,13 +411,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
polled = true;
}
- dio->size += bio->bi_iter.bi_size;
qc = submit_bio(bio);
- if (qc == BLK_QC_T_EAGAIN) {
- dio->size -= bio->bi_iter.bi_size;
- ret = -EAGAIN;
- goto error;
- }
if (polled)
WRITE_ONCE(iocb->ki_cookie, qc);
@@ -455,19 +432,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
atomic_inc(&dio->ref);
}
- dio->size += bio->bi_iter.bi_size;
- qc = submit_bio(bio);
- if (qc == BLK_QC_T_EAGAIN) {
- dio->size -= bio->bi_iter.bi_size;
- ret = -EAGAIN;
- goto error;
- }
-
- bio = bio_alloc(gfp, nr_pages);
- if (!bio) {
- ret = -EAGAIN;
- goto error;
- }
+ submit_bio(bio);
+ bio = bio_alloc(GFP_KERNEL, nr_pages);
}
if (!is_poll)
@@ -487,7 +453,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
__set_current_state(TASK_RUNNING);
-out:
if (!ret)
ret = blk_status_to_errno(dio->bio.bi_status);
if (likely(!ret))
@@ -495,10 +460,6 @@ out:
bio_put(&dio->bio);
return ret;
-error:
- if (!is_poll)
- blk_finish_plug(&plug);
- goto out;
}
static ssize_t
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 1b1fa1557e68..feff3fe4467e 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -311,7 +311,6 @@ enum req_flag_bits {
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
__REQ_NOWAIT, /* Don't wait if request will block */
- __REQ_NOWAIT_INLINE, /* Return would-block error inline */
/*
* When a shared kthread needs to issue a bio for a cgroup, doing
* so synchronously can lead to priority inversions as the kthread
@@ -346,7 +345,6 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
-#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
@@ -420,13 +418,12 @@ static inline int op_stat_group(unsigned int op)
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
-#define BLK_QC_T_EAGAIN -2U
#define BLK_QC_T_SHIFT 16
#define BLK_QC_T_INTERNAL (1U << 31)
static inline bool blk_qc_t_valid(blk_qc_t cookie)
{
- return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
+ return cookie != BLK_QC_T_NONE;
}
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)