From dd841a749d1ded8e2e5facc4242ee0b6779fc0cb Mon Sep 17 00:00:00 2001 From: Matthew Wilcox (Oracle) Date: Sun, 14 Jun 2020 06:07:10 -0400 Subject: radix tree test suite: Fix compilation Introducing local_lock broke compilation; fix it all up. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/radix-tree.h | 1 + lib/radix-tree.c | 1 - tools/testing/radix-tree/linux/kernel.h | 1 + tools/testing/radix-tree/linux/local_lock.h | 8 ++++++++ tools/testing/radix-tree/test.h | 4 ---- 5 files changed, 10 insertions(+), 5 deletions(-) create mode 100644 tools/testing/radix-tree/linux/local_lock.h diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index c2a9f7c90727..5c85059a92ba 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 8e4a3a4397f2..0f10485d46b6 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -20,7 +20,6 @@ #include #include #include -#include #include /* in_interrupt() */ #include #include diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h index 4568248222ae..39867fd80c8f 100644 --- a/tools/testing/radix-tree/linux/kernel.h +++ b/tools/testing/radix-tree/linux/kernel.h @@ -22,4 +22,5 @@ #define __releases(x) #define __must_hold(x) +#define EXPORT_PER_CPU_SYMBOL_GPL(x) #endif /* _KERNEL_H */ diff --git a/tools/testing/radix-tree/linux/local_lock.h b/tools/testing/radix-tree/linux/local_lock.h new file mode 100644 index 000000000000..b3cf8b233ca4 --- /dev/null +++ b/tools/testing/radix-tree/linux/local_lock.h @@ -0,0 +1,8 @@ +#ifndef _LINUX_LOCAL_LOCK +#define _LINUX_LOCAL_LOCK +typedef struct { } local_lock_t; + +static inline void local_lock(local_lock_t *lock) { } +static inline void local_unlock(local_lock_t *lock) { } +#define INIT_LOCAL_LOCK(x) { } +#endif diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 34dab4d18744..7ef7067e942c 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -56,8 +56,4 @@ int root_tag_get(struct radix_tree_root *root, unsigned int tag); unsigned long node_maxindex(struct radix_tree_node *); unsigned long shift_maxindex(unsigned int shift); int radix_tree_cpu_dead(unsigned int cpu); -struct radix_tree_preload { - unsigned nr; - struct radix_tree_node *nodes; -}; extern struct radix_tree_preload radix_tree_preloads; -- cgit v1.2.3 From a219b856a2b993da234108307be772448f22b0ce Mon Sep 17 00:00:00 2001 From: Matthew Wilcox (Oracle) Date: Thu, 2 Apr 2020 14:26:13 -0400 Subject: ida: Free allocated bitmap in error path If a bitmap needs to be allocated, and then by the time the thread is scheduled to be run again all the indices which would satisfy the allocation have been allocated then we would leak the allocation. Almost impossible to hit in practice, but a trivial fix. Found by Coverity. Fixes: f32f004cddf8 ("ida: Convert to XArray") Reported-by: coverity-bot Reviewed-by: Kees Cook Signed-off-by: Matthew Wilcox (Oracle) --- lib/idr.c | 1 + tools/testing/radix-tree/idr-test.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/lib/idr.c b/lib/idr.c index c2cf2c52bbde..4d2eef0259d2 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -470,6 +470,7 @@ alloc: goto retry; nospc: xas_unlock_irqrestore(&xas, flags); + kfree(alloc); return -ENOSPC; } EXPORT_SYMBOL(ida_alloc_range); diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 8995092d541e..3b796dd5e577 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -523,8 +523,27 @@ static void *ida_random_fn(void *arg) return NULL; } +static void *ida_leak_fn(void *arg) +{ + struct ida *ida = arg; + time_t s = time(NULL); + int i, ret; + + rcu_register_thread(); + + do for (i = 0; i < 1000; i++) { + ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL); + if (ret >= 0) + ida_free(ida, 128); + } while (time(NULL) < s + 2); + + rcu_unregister_thread(); + return NULL; +} + void ida_thread_tests(void) { + DEFINE_IDA(ida); pthread_t threads[20]; int i; @@ -536,6 +555,16 @@ void ida_thread_tests(void) while (i--) pthread_join(threads[i], NULL); + + for (i = 0; i < ARRAY_SIZE(threads); i++) + if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) { + perror("creating ida thread"); + exit(1); + } + + while (i--) + pthread_join(threads[i], NULL); + assert(ida_is_empty(&ida)); } void ida_tests(void) -- cgit v1.2.3 From 062b735912b9f3aa3e14cd02b5ede08cf8bc093f Mon Sep 17 00:00:00 2001 From: Matthew Wilcox (Oracle) Date: Tue, 31 Mar 2020 14:23:59 -0400 Subject: XArray: Test two more things about xa_cmpxchg 1. If we xa_cmpxchg() an entry in, it marks the index as not free. 2. If we xa_cmpxchg() NULL in, it marks the index as free. Signed-off-by: Matthew Wilcox (Oracle) --- lib/test_xarray.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index d4f97925dbd8..9fc3da430aba 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -393,6 +393,9 @@ static noinline void check_cmpxchg(struct xarray *xa) XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); + XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); + XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); xa_erase_index(xa, 12345678); xa_erase_index(xa, 5); XA_BUG_ON(xa, !xa_empty(xa)); -- cgit v1.2.3 From 04e9e9bb8470bea74eafad1cafd552f3f06c32d9 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox (Oracle) Date: Sun, 14 Jun 2020 21:52:04 -0400 Subject: XArray: Test marked multiorder iterations Demonstrate that starting a marked iteration partway through a marked multi-order entry works. Signed-off-by: Matthew Wilcox (Oracle) --- lib/test_xarray.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 9fc3da430aba..1122c4453c87 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -289,6 +289,27 @@ static noinline void check_xa_mark_2(struct xarray *xa) xa_destroy(xa); } +static noinline void check_xa_mark_3(struct xarray *xa) +{ +#ifdef CONFIG_XARRAY_MULTI + XA_STATE(xas, xa, 0x41); + void *entry; + int count = 0; + + xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL); + xa_set_mark(xa, 0x41, XA_MARK_0); + + rcu_read_lock(); + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) { + count++; + XA_BUG_ON(xa, entry != xa_mk_index(0x40)); + } + XA_BUG_ON(xa, count != 1); + rcu_read_unlock(); + xa_destroy(xa); +#endif +} + static noinline void check_xa_mark(struct xarray *xa) { unsigned long index; @@ -297,6 +318,7 @@ static noinline void check_xa_mark(struct xarray *xa) check_xa_mark_1(xa, index); check_xa_mark_2(xa); + check_xa_mark_3(xa); } static noinline void check_xa_shrink(struct xarray *xa) -- cgit v1.2.3 From 8446466c9dd645da4c1848f35ffd0fc1df3524ee Mon Sep 17 00:00:00 2001 From: Matthew Wilcox (Oracle) Date: Thu, 6 Aug 2020 10:07:24 -0400 Subject: XArray: Fix xas_for_each_conflict documentation At one point, xas_for_each_conflict() was going to work this way, and I forgot to update the documentation when I changed my mind. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/xarray.h | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/include/linux/xarray.h b/include/linux/xarray.h index b4d70e7568b2..6b336098fca7 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1714,13 +1714,12 @@ enum { * @xas: XArray operation state. * @entry: Entry retrieved from the array. * - * The loop body will be executed for each entry in the XArray that lies - * within the range specified by @xas. If the loop completes successfully, - * any entries that lie in this range will be replaced by @entry. The caller - * may break out of the loop; if they do so, the contents of the XArray will - * be unchanged. The operation may fail due to an out of memory condition. - * The caller may also call xa_set_err() to exit the loop while setting an - * error to record the reason. + * The loop body will be executed for each entry in the XArray that + * lies within the range specified by @xas. If the loop terminates + * normally, @entry will be %NULL. The user may break out of the loop, + * which will leave @entry set to the conflicting entry. The caller + * may also call xa_set_err() to exit the loop while setting an error + * to record the reason. */ #define xas_for_each_conflict(xas, entry) \ while ((entry = xas_find_conflict(xas))) -- cgit v1.2.3 From f82cd2f0b5eb715b1a296e20b34da7d296b6e9a4 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox (Oracle) Date: Tue, 18 Aug 2020 09:05:56 -0400 Subject: XArray: Add private interface for workingset node deletion Move the tricky bits of dealing with the XArray from the workingset code to the XArray. Make it clear in the documentation that this is a private interface, and only export it for the benefit of the test suite. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/xarray.h | 2 ++ lib/test_xarray.c | 7 +------ lib/xarray.c | 23 +++++++++++++++++++++++ mm/workingset.c | 13 ++----------- 4 files changed, 28 insertions(+), 17 deletions(-) diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 6b336098fca7..29db4e16eb89 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1286,6 +1286,8 @@ static inline bool xa_is_advanced(const void *entry) */ typedef void (*xa_update_node_t)(struct xa_node *node); +void xa_delete_node(struct xa_node *, xa_update_node_t); + /* * The xa_state is opaque to its users. It contains various different pieces * of state involved in the current operation on the XArray. It should be diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 1122c4453c87..64ae07b1bcf4 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1600,14 +1600,9 @@ static noinline void shadow_remove(struct xarray *xa) xa_lock(xa); while ((node = list_first_entry_or_null(&shadow_nodes, struct xa_node, private_list))) { - XA_STATE(xas, node->array, 0); XA_BUG_ON(xa, node->array != xa); list_del_init(&node->private_list); - xas.xa_node = xa_parent_locked(node->array, node); - xas.xa_offset = node->offset; - xas.xa_shift = node->shift + XA_CHUNK_SHIFT; - xas_set_update(&xas, test_update_node); - xas_store(&xas, NULL); + xa_delete_node(node, test_update_node); } xa_unlock(xa); } diff --git a/lib/xarray.c b/lib/xarray.c index e9e641d3c0c3..1fa5c5658e63 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1973,6 +1973,29 @@ unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start, } EXPORT_SYMBOL(xa_extract); +/** + * xa_delete_node() - Private interface for workingset code. + * @node: Node to be removed from the tree. + * @update: Function to call to update ancestor nodes. + * + * Context: xa_lock must be held on entry and will not be released. + */ +void xa_delete_node(struct xa_node *node, xa_update_node_t update) +{ + struct xa_state xas = { + .xa = node->array, + .xa_index = (unsigned long)node->offset << + (node->shift + XA_CHUNK_SHIFT), + .xa_shift = node->shift + XA_CHUNK_SHIFT, + .xa_offset = node->offset, + .xa_node = xa_parent_locked(node->array, node), + .xa_update = update, + }; + + xas_store(&xas, NULL); +} +EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */ + /** * xa_destroy() - Free all internal data structures. * @xa: XArray. diff --git a/mm/workingset.c b/mm/workingset.c index 92e66113a577..e185bfb8bd4e 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -519,12 +519,11 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, void *arg) __must_hold(lru_lock) { struct xa_node *node = container_of(item, struct xa_node, private_list); - XA_STATE(xas, node->array, 0); struct address_space *mapping; int ret; /* - * Page cache insertions and deletions synchroneously maintain + * Page cache insertions and deletions synchronously maintain * the shadow node LRU under the i_pages lock and the * lru_lock. Because the page cache tree is emptied before * the inode can be destroyed, holding the lru_lock pins any @@ -559,15 +558,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, if (WARN_ON_ONCE(node->count != node->nr_values)) goto out_invalid; mapping->nrexceptional -= node->nr_values; - xas.xa_node = xa_parent_locked(&mapping->i_pages, node); - xas.xa_offset = node->offset; - xas.xa_shift = node->shift + XA_CHUNK_SHIFT; - xas_set_update(&xas, workingset_update_node); - /* - * We could store a shadow entry here which was the minimum of the - * shadow entries we were tracking ... - */ - xas_store(&xas, NULL); + xa_delete_node(node, workingset_update_node); __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM); out_invalid: -- cgit v1.2.3 From ca7b639e8611b3260a30b18aaa0d6db9c80a75ef Mon Sep 17 00:00:00 2001 From: Matthew Wilcox (Oracle) Date: Sun, 2 Aug 2020 14:17:21 -0400 Subject: XArray: Fix xas_reload for multi-index entries xas_reload() was only checking that the head entry was still at the head index. If the entry has been split, that's not enough as there may be a different entry at the specified index now. Solve this by checking the slot for the requested index instead of the head index. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/xarray.h | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 29db4e16eb89..4be9c57132fe 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1524,10 +1524,21 @@ void xas_create_range(struct xa_state *); static inline void *xas_reload(struct xa_state *xas) { struct xa_node *node = xas->xa_node; - - if (node) - return xa_entry(xas->xa, node, xas->xa_offset); - return xa_head(xas->xa); + void *entry; + char offset; + + if (!node) + return xa_head(xas->xa); + if (IS_ENABLED(CONFIG_XARRAY_MULTI)) { + offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK; + entry = xa_entry(xas->xa, node, offset); + if (!xa_is_sibling(entry)) + return entry; + offset = xa_to_sibling(entry); + } else { + offset = xas->xa_offset; + } + return xa_entry(xas->xa, node, offset); } /** -- cgit v1.2.3 From f78b8250a076ac63ddd021c7ea9739bcc2f6f737 Mon Sep 17 00:00:00 2001 From: Hui Su Date: Mon, 28 Sep 2020 01:15:53 +0800 Subject: radix-tree: fix the comment of radix_tree_next_slot() fix the comment of radix_tree_next_slot(): interator --> iterator. Signed-off-by: Hui Su Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/radix-tree.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 5c85059a92ba..64ad900ac742 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -377,7 +377,7 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) * radix_tree_next_slot - find next slot in chunk * * @slot: pointer to current slot - * @iter: pointer to interator state + * @iter: pointer to iterator state * @flags: RADIX_TREE_ITER_*, should be constant * Returns: pointer to next slot, or NULL if there no more left * -- cgit v1.2.3 From 84c34df158cf215b0cd1475ab3b8e6f212f81f23 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox (Oracle) Date: Tue, 13 Oct 2020 08:46:29 -0400 Subject: XArray: Fix xas_create_range for ranges above 4 billion The 'sibs' variable would be shifted as a 32-bit integer, so if 'shift' is more than 32, this is undefined behaviour. In practice, this doesn't happen because the page cache is the only user and nobody uses 16TB pages. Signed-off-by: Matthew Wilcox (Oracle) --- lib/xarray.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/xarray.c b/lib/xarray.c index 1fa5c5658e63..2046d676ab41 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -703,7 +703,7 @@ void xas_create_range(struct xa_state *xas) unsigned char shift = xas->xa_shift; unsigned char sibs = xas->xa_sibs; - xas->xa_index |= ((sibs + 1) << shift) - 1; + xas->xa_index |= ((sibs + 1UL) << shift) - 1; if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift) xas->xa_offset |= sibs; xas->xa_shift = 0; -- cgit v1.2.3