diff options
author | Yu Kuai | 2022-09-16 15:19:39 +0800 |
---|---|---|
committer | Jens Axboe | 2022-11-01 07:09:44 -0600 |
commit | 71f8ca77cb8764d46f656b725999e8b8b1aec215 (patch) | |
tree | 0382b29e3b2b8121239f00ca1b89d3a31a85127e /block/bfq-wf2q.c | |
parent | 60a6e10c537a7459dd53882186bd16fff257fb03 (diff) |
block, bfq: refactor the counting of 'num_groups_with_pending_reqs'
Currently, bfq can't handle sync io concurrently as long as they
are not issued from root group. This is because
'bfqd->num_groups_with_pending_reqs > 0' is always true in
bfq_asymmetric_scenario().
The way that bfqg is counted into 'num_groups_with_pending_reqs':
Before this patch:
1) root group will never be counted.
2) Count if bfqg or it's child bfqgs have pending requests.
3) Don't count if bfqg and it's child bfqgs complete all the requests.
After this patch:
1) root group is counted.
2) Count if bfqg have pending requests.
3) Don't count if bfqg complete all the requests.
With this change, the occasion that only one group is activated can be
detected, and next patch will support concurrent sync io in the
occasion.
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Paolo Valente <paolo.valente@linaro.org>
Link: https://lore.kernel.org/r/20220916071942.214222-4-yukuai1@huaweicloud.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-wf2q.c')
-rw-r--r-- | block/bfq-wf2q.c | 23 |
1 files changed, 8 insertions, 15 deletions
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 5549ccf09cd2..5e8224c96921 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -984,19 +984,6 @@ static void __bfq_activate_entity(struct bfq_entity *entity, entity->on_st_or_in_serv = true; } -#ifdef CONFIG_BFQ_GROUP_IOSCHED - if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */ - struct bfq_group *bfqg = - container_of(entity, struct bfq_group, entity); - struct bfq_data *bfqd = bfqg->bfqd; - - if (!entity->in_groups_with_pending_reqs) { - entity->in_groups_with_pending_reqs = true; - bfqd->num_groups_with_pending_reqs++; - } - } -#endif - bfq_update_fin_time_enqueue(entity, st, backshifted); } @@ -1653,7 +1640,8 @@ void bfq_add_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq) if (!entity->in_groups_with_pending_reqs) { entity->in_groups_with_pending_reqs = true; #ifdef CONFIG_BFQ_GROUP_IOSCHED - bfqq_group(bfqq)->num_queues_with_pending_reqs++; + if (!(bfqq_group(bfqq)->num_queues_with_pending_reqs++)) + bfqq->bfqd->num_groups_with_pending_reqs++; #endif } } @@ -1665,7 +1653,8 @@ void bfq_del_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq) if (entity->in_groups_with_pending_reqs) { entity->in_groups_with_pending_reqs = false; #ifdef CONFIG_BFQ_GROUP_IOSCHED - bfqq_group(bfqq)->num_queues_with_pending_reqs--; + if (!(--bfqq_group(bfqq)->num_queues_with_pending_reqs)) + bfqq->bfqd->num_groups_with_pending_reqs--; #endif } } @@ -1694,6 +1683,10 @@ void bfq_del_bfqq_busy(struct bfq_queue *bfqq, bool expiration) if (!bfqq->dispatched) { bfq_del_bfqq_in_groups_with_pending_reqs(bfqq); + /* + * Next function is invoked last, because it causes bfqq to be + * freed. DO NOT use bfqq after the next function invocation. + */ bfq_weights_tree_remove(bfqd, bfqq); } } |