diff options
author | Jens Axboe | 2022-08-05 16:39:04 -0600 |
---|---|---|
committer | Jens Axboe | 2022-08-22 10:07:56 -0600 |
commit | f5d632d15e9e0a037339601680d82bb840f85d10 (patch) | |
tree | 9b13ba0fcdf8da7c7fdd6a93e072517cd79d5b6e | |
parent | d322f355e9368045ff89b7a3df9324fe0c33839b (diff) |
block: shrink rq_map_data a bit
We don't need full ints for several of these members. Change the
page_order and nr_entries to unsigned shorts, and the true/false from_user
and null_mapped to booleans.
This shrinks the struct from 32 to 24 bytes on 64-bit archs.
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-map.c | 2 | ||||
-rw-r--r-- | include/linux/blk-mq.h | 8 |
2 files changed, 5 insertions, 5 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index 7196a6b64c80..379c52d2f2d1 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -158,7 +158,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); if (map_data) { - nr_pages = 1 << map_data->page_order; + nr_pages = 1U << map_data->page_order; i = map_data->offset / PAGE_SIZE; } while (len) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c38575209d51..74b99d716b0b 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -963,11 +963,11 @@ blk_status_t blk_insert_cloned_request(struct request *rq); struct rq_map_data { struct page **pages; - int page_order; - int nr_entries; unsigned long offset; - int null_mapped; - int from_user; + unsigned short page_order; + unsigned short nr_entries; + bool null_mapped; + bool from_user; }; int blk_rq_map_user(struct request_queue *, struct request *, |