diff options
author | Nikolay Borisov | 2018-01-31 10:52:04 +0200 |
---|---|---|
committer | David Sterba | 2018-03-26 15:09:38 +0200 |
commit | 5d23515be66904fa3b1b5d6bd72d2199cd2447ab (patch) | |
tree | 5b3788f6fd8f308ed1c4478299e2aa447d3aa50d /fs/btrfs/qgroup.c | |
parent | 7ce311d552e473a5176f8bb143fb45de64b8bd81 (diff) |
btrfs: Move qgroup rescan on quota enable to btrfs_quota_enable
Currently btrfs_run_qgroups is doing a bit too much. Not only is it
responsible for synchronizing in-memory state of qgroups to disk but
it also contains code to trigger the initial qgroup rescan when
quota is enabled initially. This condition is detected by checking that
BTRFS_FS_QUOTA_ENABLED is not set and BTRFS_FS_QUOTA_ENABLING is set.
Nothing really requires from the code to be structured (and scattered)
the way it is so let's streamline things. First move the quota rescan
code into btrfs_quota_enable, where its invocation is closer to the
use. This also makes the FS_QUOTA_ENABLING flag redundant so let's
remove it as well.
This has been tested with a full xfstest run with qgroups enabled on
the scratch device of every xfstest and no regressions were observed.
Signed-off-by: Nikolay Borisov <nborisov@suse.com>
Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/qgroup.c')
-rw-r--r-- | fs/btrfs/qgroup.c | 35 |
1 files changed, 10 insertions, 25 deletions
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index aa259d6986e1..0fa4f07b80b8 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -826,10 +826,8 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans, int slot; mutex_lock(&fs_info->qgroup_ioctl_lock); - if (fs_info->quota_root) { - set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags); + if (fs_info->quota_root) goto out; - } fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); if (!fs_info->qgroup_ulist) { @@ -923,8 +921,15 @@ out_add_root: } spin_lock(&fs_info->qgroup_lock); fs_info->quota_root = quota_root; - set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags); + set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); spin_unlock(&fs_info->qgroup_lock); + ret = qgroup_rescan_init(fs_info, 0, 1); + if (!ret) { + qgroup_rescan_zero_tracking(fs_info); + btrfs_queue_work(fs_info->qgroup_rescan_workers, + &fs_info->qgroup_rescan_work); + } + out_free_path: btrfs_free_path(path); out_free_root: @@ -2080,17 +2085,9 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans, { struct btrfs_root *quota_root = fs_info->quota_root; int ret = 0; - int start_rescan_worker = 0; if (!quota_root) - goto out; - - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && - test_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags)) - start_rescan_worker = 1; - - if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags)) - set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); + return ret; spin_lock(&fs_info->qgroup_lock); while (!list_empty(&fs_info->dirty_qgroups)) { @@ -2119,18 +2116,6 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans, if (ret) fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - if (!ret && start_rescan_worker) { - ret = qgroup_rescan_init(fs_info, 0, 1); - if (!ret) { - qgroup_rescan_zero_tracking(fs_info); - btrfs_queue_work(fs_info->qgroup_rescan_workers, - &fs_info->qgroup_rescan_work); - } - ret = 0; - } - -out: - return ret; } |