aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-iolatency.c1
-rw-r--r--block/blk-merge.c15
-rw-r--r--block/blk-mq.c8
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-sysfs.c12
7 files changed, 27 insertions, 23 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 77f37ef8ef06..617a2b3f7582 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1736,8 +1736,8 @@ out:
/**
* blkcg_schedule_throttle - this task needs to check for throttling
- * @q - the request queue IO was submitted on
- * @use_memdelay - do we charge this to memory delay for PSI
+ * @q: the request queue IO was submitted on
+ * @use_memdelay: do we charge this to memory delay for PSI
*
* This is called by the IO controller when we know there's delay accumulated
* for the blkg for this task. We do not pass the blkg because there are places
@@ -1769,8 +1769,9 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
/**
* blkcg_add_delay - add delay to this blkg
- * @now - the current time in nanoseconds
- * @delta - how many nanoseconds of delay to add
+ * @blkg: blkg of interest
+ * @now: the current time in nanoseconds
+ * @delta: how many nanoseconds of delay to add
*
* Charge @delta to the blkg's current delay accumulation. This is used to
* throttle tasks if an IO controller thinks we need more throttling.
diff --git a/block/blk-core.c b/block/blk-core.c
index 6b78ec56a4f2..4673ebe42255 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -500,8 +500,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q->stats)
goto fail_stats;
- q->backing_dev_info->ra_pages =
- (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
+ q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info->name = "block";
q->node = node_id;
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 2620baa1f699..507212d75ee2 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -75,6 +75,7 @@
#include <linux/blk-mq.h>
#include "blk-rq-qos.h"
#include "blk-stat.h"
+#include "blk.h"
#define DEFAULT_SCALE_COOKIE 1000000U
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 22467f475ab4..1c9d4f0f96ea 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -180,7 +180,7 @@ static unsigned get_max_segment_size(struct request_queue *q,
*/
static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
unsigned *nsegs, unsigned *last_seg_size,
- unsigned *front_seg_size, unsigned *sectors)
+ unsigned *front_seg_size, unsigned *sectors, unsigned max_segs)
{
unsigned len = bv->bv_len;
unsigned total_len = 0;
@@ -190,7 +190,7 @@ static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
* Multi-page bvec may be too big to hold in one segment, so the
* current bvec has to be splitted as multiple segments.
*/
- while (len && new_nsegs + *nsegs < queue_max_segments(q)) {
+ while (len && new_nsegs + *nsegs < max_segs) {
seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
seg_size = min(seg_size, len);
@@ -240,6 +240,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bool do_split = true;
struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
+ const unsigned max_segs = queue_max_segments(q);
bio_for_each_bvec(bv, bio, iter) {
/*
@@ -254,14 +255,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
* Consider this a new segment if we're splitting in
* the middle of this vector.
*/
- if (nsegs < queue_max_segments(q) &&
+ if (nsegs < max_segs &&
sectors < max_sectors) {
/* split in the middle of bvec */
bv.bv_len = (max_sectors - sectors) << 9;
bvec_split_segs(q, &bv, &nsegs,
&seg_size,
&front_seg_size,
- &sectors);
+ &sectors, max_segs);
}
goto split;
}
@@ -283,7 +284,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
continue;
}
new_segment:
- if (nsegs == queue_max_segments(q))
+ if (nsegs == max_segs)
goto split;
bvprv = bv;
@@ -296,7 +297,7 @@ new_segment:
if (nsegs == 1 && seg_size > front_seg_size)
front_seg_size = seg_size;
} else if (bvec_split_segs(q, &bv, &nsegs, &seg_size,
- &front_seg_size, &sectors)) {
+ &front_seg_size, &sectors, max_segs)) {
goto split;
}
}
@@ -415,7 +416,7 @@ new_segment:
bvprv = bv;
prev = 1;
bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
- &front_seg_size, NULL);
+ &front_seg_size, NULL, UINT_MAX);
}
bbio = bio;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a9c181603cbd..70b210a308c4 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -782,7 +782,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
if (kick_requeue_list)
blk_mq_kick_requeue_list(q);
}
-EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q)
{
@@ -1093,8 +1092,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
bool ret;
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
- if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ blk_mq_sched_mark_restart_hctx(hctx);
/*
* It's possible that a tag was freed in the window between the
@@ -2857,7 +2855,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/*
* Default to classic polling
*/
- q->poll_nsec = -1;
+ q->poll_nsec = BLK_MQ_POLL_CLASSIC;
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_add_queue_tag_set(set, q);
@@ -3392,7 +3390,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
{
struct request *rq;
- if (q->poll_nsec == -1)
+ if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
return false;
if (!blk_qc_t_is_internal(cookie))
diff --git a/block/blk-mq.h b/block/blk-mq.h
index c11353a3749d..0ed8e5a8729f 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -41,6 +41,8 @@ void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+ bool kick_requeue_list);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 59685918167e..422327089e0f 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -360,8 +360,8 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
{
int val;
- if (q->poll_nsec == -1)
- val = -1;
+ if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
+ val = BLK_MQ_POLL_CLASSIC;
else
val = q->poll_nsec / 1000;
@@ -380,10 +380,12 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
if (err < 0)
return err;
- if (val == -1)
- q->poll_nsec = -1;
- else
+ if (val == BLK_MQ_POLL_CLASSIC)
+ q->poll_nsec = BLK_MQ_POLL_CLASSIC;
+ else if (val >= 0)
q->poll_nsec = val * 1000;
+ else
+ return -EINVAL;
return count;
}