aboutsummaryrefslogtreecommitdiff
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorLinus Torvalds2022-04-01 16:20:00 -0700
committerLinus Torvalds2022-04-01 16:20:00 -0700
commitd589ae0d44607a0af65b83113e4cfba1a8af7eb3 (patch)
treebc82d4293b2301a6380db120247d7aeda7559fb9 /block/blk-cgroup.c
parent3b1509f275ce13865c28ce254c36dc7c915808eb (diff)
parent8d7829ebc1e48208b3c02c2a10c5f8856246033c (diff)
Merge tag 'for-5.18/block-2022-04-01' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Either fixes or a few additions that got missed in the initial merge window pull. In detail: - List iterator fix to avoid leaking value post loop (Jakob) - One-off fix in minor count (Christophe) - Fix for a regression in how io priority setting works for an exiting task (Jiri) - Fix a regression in this merge window with blkg_free() being called in an inappropriate context (Ming) - Misc fixes (Ming, Tom)" * tag 'for-5.18/block-2022-04-01' of git://git.kernel.dk/linux-block: blk-wbt: remove wbt_track stub block: use dedicated list iterator variable block: Fix the maximum minor value is blk_alloc_ext_minor() block: restore the old set_task_ioprio() behaviour wrt PF_EXITING block: avoid calling blkg_free() in atomic context lib/sbitmap: allocate sb->map via kvzalloc_node
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c32
1 files changed, 22 insertions, 10 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 0430926426fe..8dfe62786cd5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -65,19 +65,12 @@ static bool blkcg_policy_enabled(struct request_queue *q,
return pol && test_bit(pol->plid, q->blkcg_pols);
}
-/**
- * blkg_free - free a blkg
- * @blkg: blkg to free
- *
- * Free @blkg which may be partially allocated.
- */
-static void blkg_free(struct blkcg_gq *blkg)
+static void blkg_free_workfn(struct work_struct *work)
{
+ struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+ free_work);
int i;
- if (!blkg)
- return;
-
for (i = 0; i < BLKCG_MAX_POLS; i++)
if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
@@ -89,6 +82,25 @@ static void blkg_free(struct blkcg_gq *blkg)
kfree(blkg);
}
+/**
+ * blkg_free - free a blkg
+ * @blkg: blkg to free
+ *
+ * Free @blkg which may be partially allocated.
+ */
+static void blkg_free(struct blkcg_gq *blkg)
+{
+ if (!blkg)
+ return;
+
+ /*
+ * Both ->pd_free_fn() and request queue's release handler may
+ * sleep, so free us by scheduling one work func
+ */
+ INIT_WORK(&blkg->free_work, blkg_free_workfn);
+ schedule_work(&blkg->free_work);
+}
+
static void __blkg_release(struct rcu_head *rcu)
{
struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);