aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig2021-11-26 12:58:10 +0100
committerJens Axboe2021-11-29 06:41:29 -0700
commit87dd1d63dcbd0f508a8b23785752e78d082fd176 (patch)
tree4a1486a7bda709766ec38214da8adb2850fd2554 /block
parent3304742562d27fb87a6d8291cc48824dd20f6964 (diff)
block: move blk_mq_sched_assign_ioc to blk-ioc.c
Move blk_mq_sched_assign_ioc so that many interfaces from the file can be marked static. Rename the function to ioc_find_get_icq as well and return the icq to simplify the interface. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20211126115817.2087431-8-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/blk-ioc.c39
-rw-r--r--block/blk-mq-sched.c31
-rw-r--r--block/blk-mq-sched.h2
-rw-r--r--block/blk.h6
5 files changed, 37 insertions, 43 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index ecc2e57e6863..2d484d3f7f22 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -6666,7 +6666,7 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
*/
static void bfq_prepare_request(struct request *rq)
{
- blk_mq_sched_assign_ioc(rq);
+ rq->elv.icq = ioc_find_get_icq(rq->q);
/*
* Regardless of whether we have an icq attached, we have to
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index f3ff495756cb..f4f84a2072be 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -24,7 +24,7 @@ static struct kmem_cache *iocontext_cachep;
*
* Increment reference count to @ioc.
*/
-void get_io_context(struct io_context *ioc)
+static void get_io_context(struct io_context *ioc)
{
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
atomic_long_inc(&ioc->refcount);
@@ -248,7 +248,8 @@ void ioc_clear_queue(struct request_queue *q)
__ioc_clear_queue(&icq_list);
}
-int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
+static int create_task_io_context(struct task_struct *task, gfp_t gfp_flags,
+ int node)
{
struct io_context *ioc;
int ret;
@@ -397,8 +398,8 @@ EXPORT_SYMBOL(ioc_lookup_icq);
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
-struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
- gfp_t gfp_mask)
+static struct io_cq *ioc_create_icq(struct io_context *ioc,
+ struct request_queue *q, gfp_t gfp_mask)
{
struct elevator_type *et = q->elevator->type;
struct io_cq *icq;
@@ -441,6 +442,36 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
return icq;
}
+struct io_cq *ioc_find_get_icq(struct request_queue *q)
+{
+ struct io_context *ioc;
+ struct io_cq *icq;
+
+ /* create task io_context, if we don't have one already */
+ if (unlikely(!current->io_context))
+ create_task_io_context(current, GFP_ATOMIC, q->node);
+
+ /*
+ * May not have an IO context if it's a passthrough request
+ */
+ ioc = current->io_context;
+ if (!ioc)
+ return NULL;
+
+ spin_lock_irq(&q->queue_lock);
+ icq = ioc_lookup_icq(ioc, q);
+ spin_unlock_irq(&q->queue_lock);
+
+ if (!icq) {
+ icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
+ if (!icq)
+ return NULL;
+ }
+ get_io_context(icq->ioc);
+ return icq;
+}
+EXPORT_SYMBOL_GPL(ioc_find_get_icq);
+
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index b942b38000e5..0d7257848f7e 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -18,37 +18,6 @@
#include "blk-mq-tag.h"
#include "blk-wbt.h"
-void blk_mq_sched_assign_ioc(struct request *rq)
-{
- struct request_queue *q = rq->q;
- struct io_context *ioc;
- struct io_cq *icq;
-
- /* create task io_context, if we don't have one already */
- if (unlikely(!current->io_context))
- create_task_io_context(current, GFP_ATOMIC, q->node);
-
- /*
- * May not have an IO context if it's a passthrough request
- */
- ioc = current->io_context;
- if (!ioc)
- return;
-
- spin_lock_irq(&q->queue_lock);
- icq = ioc_lookup_icq(ioc, q);
- spin_unlock_irq(&q->queue_lock);
-
- if (!icq) {
- icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
- if (!icq)
- return;
- }
- get_io_context(icq->ioc);
- rq->elv.icq = icq;
-}
-EXPORT_SYMBOL_GPL(blk_mq_sched_assign_ioc);
-
/*
* Mark a hardware queue as needing a restart. For shared queues, maintain
* a count of how many hardware queues are marked for restart.
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 25d1034952b6..025013972453 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -8,8 +8,6 @@
#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
-void blk_mq_sched_assign_ioc(struct request *rq);
-
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs, struct request **merged_request);
bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
diff --git a/block/blk.h b/block/blk.h
index a57c84654d0a..187cb2654ffd 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -363,14 +363,10 @@ static inline unsigned int bio_aligned_discard_max_sectors(
/*
* Internal io_context interface
*/
-void get_io_context(struct io_context *ioc);
+struct io_cq *ioc_find_get_icq(struct request_queue *q);
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
-struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
- gfp_t gfp_mask);
void ioc_clear_queue(struct request_queue *q);
-int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
-
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,