aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTimofey Titovets2017-09-28 17:33:36 +0300
committerDavid Sterba2017-11-01 20:45:35 +0100
commit4e439a0b184f014a5833fa468af36cc9f59b8fb1 (patch)
treecaf20ea247d44cfb0aa65421d0e41c93d8b7964a
parentddfae63cc8e03ef48622cb780e6c66367b2d4f1a (diff)
Btrfs: compression: separate heuristic/compression workspaces
Compression heuristic itself is not a compression type, as current infrastructure provides workspaces for several compression types, it's difficult to just add heuristic workspace. Just refactor the code to support compression/heuristic workspaces with maximum code sharing and minimum changes in it. Signed-off-by: Timofey Titovets <nefelim4ag@gmail.com> Reviewed-by: David Sterba <dsterba@suse.com> [ coding style fixes ] Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/compression.c139
1 files changed, 121 insertions, 18 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 083f9c485875..b155542c5e6b 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -707,7 +707,34 @@ out:
return ret;
}
-static struct {
+
+struct heuristic_ws {
+ struct list_head list;
+};
+
+static void free_heuristic_ws(struct list_head *ws)
+{
+ struct heuristic_ws *workspace;
+
+ workspace = list_entry(ws, struct heuristic_ws, list);
+
+ kfree(workspace);
+}
+
+static struct list_head *alloc_heuristic_ws(void)
+{
+ struct heuristic_ws *ws;
+
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ if (!ws)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ws->list);
+
+ return &ws->list;
+}
+
+struct workspaces_list {
struct list_head idle_ws;
spinlock_t ws_lock;
/* Number of free workspaces */
@@ -716,7 +743,11 @@ static struct {
atomic_t total_ws;
/* Waiters for a free workspace */
wait_queue_head_t ws_wait;
-} btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
+};
+
+static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
+
+static struct workspaces_list btrfs_heuristic_ws;
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zlib_compress,
@@ -726,11 +757,25 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
void __init btrfs_init_compress(void)
{
+ struct list_head *workspace;
int i;
- for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
- struct list_head *workspace;
+ INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
+ spin_lock_init(&btrfs_heuristic_ws.ws_lock);
+ atomic_set(&btrfs_heuristic_ws.total_ws, 0);
+ init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
+ workspace = alloc_heuristic_ws();
+ if (IS_ERR(workspace)) {
+ pr_warn(
+ "BTRFS: cannot preallocate heuristic workspace, will try later\n");
+ } else {
+ atomic_set(&btrfs_heuristic_ws.total_ws, 1);
+ btrfs_heuristic_ws.free_ws = 1;
+ list_add(workspace, &btrfs_heuristic_ws.idle_ws);
+ }
+
+ for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
spin_lock_init(&btrfs_comp_ws[i].ws_lock);
atomic_set(&btrfs_comp_ws[i].total_ws, 0);
@@ -757,18 +802,32 @@ void __init btrfs_init_compress(void)
* Preallocation makes a forward progress guarantees and we do not return
* errors.
*/
-static struct list_head *find_workspace(int type)
+static struct list_head *__find_workspace(int type, bool heuristic)
{
struct list_head *workspace;
int cpus = num_online_cpus();
int idx = type - 1;
unsigned nofs_flag;
+ struct list_head *idle_ws;
+ spinlock_t *ws_lock;
+ atomic_t *total_ws;
+ wait_queue_head_t *ws_wait;
+ int *free_ws;
+
+ if (heuristic) {
+ idle_ws = &btrfs_heuristic_ws.idle_ws;
+ ws_lock = &btrfs_heuristic_ws.ws_lock;
+ total_ws = &btrfs_heuristic_ws.total_ws;
+ ws_wait = &btrfs_heuristic_ws.ws_wait;
+ free_ws = &btrfs_heuristic_ws.free_ws;
+ } else {
+ idle_ws = &btrfs_comp_ws[idx].idle_ws;
+ ws_lock = &btrfs_comp_ws[idx].ws_lock;
+ total_ws = &btrfs_comp_ws[idx].total_ws;
+ ws_wait = &btrfs_comp_ws[idx].ws_wait;
+ free_ws = &btrfs_comp_ws[idx].free_ws;
+ }
- struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
- spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
- atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
- wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
- int *free_ws = &btrfs_comp_ws[idx].free_ws;
again:
spin_lock(ws_lock);
if (!list_empty(idle_ws)) {
@@ -798,7 +857,10 @@ again:
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
- workspace = btrfs_compress_op[idx]->alloc_workspace();
+ if (heuristic)
+ workspace = alloc_heuristic_ws();
+ else
+ workspace = btrfs_compress_op[idx]->alloc_workspace();
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) {
@@ -829,18 +891,38 @@ again:
return workspace;
}
+static struct list_head *find_workspace(int type)
+{
+ return __find_workspace(type, false);
+}
+
/*
* put a workspace struct back on the list or free it if we have enough
* idle ones sitting around
*/
-static void free_workspace(int type, struct list_head *workspace)
+static void __free_workspace(int type, struct list_head *workspace,
+ bool heuristic)
{
int idx = type - 1;
- struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
- spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
- atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
- wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
- int *free_ws = &btrfs_comp_ws[idx].free_ws;
+ struct list_head *idle_ws;
+ spinlock_t *ws_lock;
+ atomic_t *total_ws;
+ wait_queue_head_t *ws_wait;
+ int *free_ws;
+
+ if (heuristic) {
+ idle_ws = &btrfs_heuristic_ws.idle_ws;
+ ws_lock = &btrfs_heuristic_ws.ws_lock;
+ total_ws = &btrfs_heuristic_ws.total_ws;
+ ws_wait = &btrfs_heuristic_ws.ws_wait;
+ free_ws = &btrfs_heuristic_ws.free_ws;
+ } else {
+ idle_ws = &btrfs_comp_ws[idx].idle_ws;
+ ws_lock = &btrfs_comp_ws[idx].ws_lock;
+ total_ws = &btrfs_comp_ws[idx].total_ws;
+ ws_wait = &btrfs_comp_ws[idx].ws_wait;
+ free_ws = &btrfs_comp_ws[idx].free_ws;
+ }
spin_lock(ws_lock);
if (*free_ws <= num_online_cpus()) {
@@ -851,7 +933,10 @@ static void free_workspace(int type, struct list_head *workspace)
}
spin_unlock(ws_lock);
- btrfs_compress_op[idx]->free_workspace(workspace);
+ if (heuristic)
+ free_heuristic_ws(workspace);
+ else
+ btrfs_compress_op[idx]->free_workspace(workspace);
atomic_dec(total_ws);
wake:
/*
@@ -862,6 +947,11 @@ wake:
wake_up(ws_wait);
}
+static void free_workspace(int type, struct list_head *ws)
+{
+ return __free_workspace(type, ws, false);
+}
+
/*
* cleanup function for module exit
*/
@@ -870,6 +960,13 @@ static void free_workspaces(void)
struct list_head *workspace;
int i;
+ while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
+ workspace = btrfs_heuristic_ws.idle_ws.next;
+ list_del(workspace);
+ free_heuristic_ws(workspace);
+ atomic_dec(&btrfs_heuristic_ws.total_ws);
+ }
+
for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
workspace = btrfs_comp_ws[i].idle_ws.next;
@@ -1090,11 +1187,15 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
*/
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
{
+ struct list_head *ws_list = __find_workspace(0, true);
+ struct heuristic_ws *ws;
u64 index = start >> PAGE_SHIFT;
u64 end_index = end >> PAGE_SHIFT;
struct page *page;
int ret = 1;
+ ws = list_entry(ws_list, struct heuristic_ws, list);
+
while (index <= end_index) {
page = find_get_page(inode->i_mapping, index);
kmap(page);
@@ -1103,6 +1204,8 @@ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
index++;
}
+ __free_workspace(0, ws_list, true);
+
return ret;
}