aboutsummaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorKonstantin Khlebnikov2020-05-27 07:24:16 +0200
committerJens Axboe2020-05-27 05:21:23 -0600
commitb5af37ab3a2b143e278340d2c6fa5790d53817e7 (patch)
treeff8c29b292e97a0ae4ceaa79e37bdb0bb4f8c8d7 /block/blk-core.c
parentb9c54f5660e7eff10dd2ddd1eae554573105b15d (diff)
block: add a blk_account_io_merge_bio helper
Move the non-"new_io" branch of blk_account_io_start() into separate function. Fix merge accounting for discards (they were counted as write merges). The new blk_account_io_merge_bio() doesn't call update_io_ticks() unlike blk_account_io_start(), as there is no reason for that. [hch: rebased] Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c1675d43c2da..bf2f7d4bc0c1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -636,6 +636,16 @@ void blk_put_request(struct request *req)
}
EXPORT_SYMBOL(blk_put_request);
+static void blk_account_io_merge_bio(struct request *req)
+{
+ if (!blk_do_io_stat(req))
+ return;
+
+ part_stat_lock();
+ part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
+ part_stat_unlock();
+}
+
bool bio_attempt_back_merge(struct request *req, struct bio *bio,
unsigned int nr_segs)
{
@@ -656,7 +666,7 @@ bool bio_attempt_back_merge(struct request *req, struct bio *bio,
bio_crypt_free_ctx(bio);
- blk_account_io_start(req, false);
+ blk_account_io_merge_bio(req);
return true;
}
@@ -682,7 +692,7 @@ bool bio_attempt_front_merge(struct request *req, struct bio *bio,
bio_crypt_do_front_merge(req, bio);
- blk_account_io_start(req, false);
+ blk_account_io_merge_bio(req);
return true;
}
@@ -704,7 +714,7 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
req->__data_len += bio->bi_iter.bi_size;
req->nr_phys_segments = segments + 1;
- blk_account_io_start(req, false);
+ blk_account_io_merge_bio(req);
return true;
no_merge:
req_set_nomerge(q, req);
@@ -1329,7 +1339,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
return BLK_STS_IOERR;
if (blk_queue_io_stat(q))
- blk_account_io_start(rq, true);
+ blk_account_io_start(rq);
/*
* Since we have a scheduler attached on the top device,
@@ -1433,16 +1443,13 @@ void blk_account_io_done(struct request *req, u64 now)
}
}
-void blk_account_io_start(struct request *rq, bool new_io)
+void blk_account_io_start(struct request *rq)
{
if (!blk_do_io_stat(rq))
return;
part_stat_lock();
- if (!new_io)
- part_stat_inc(rq->part, merges[rq_data_dir(rq)]);
- else
- rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+ rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
update_io_ticks(rq->part, jiffies, false);
part_stat_unlock();
}