diff options
author | Matthew Wilcox | 2018-07-04 10:50:12 -0400 |
---|---|---|
committer | Matthew Wilcox | 2018-10-21 10:46:32 -0400 |
commit | 371c752dc66948714ee3b66c3306f3ff1ff71d2e (patch) | |
tree | a9707d94787474e20b4efcd9bc81d5e6e1328a66 /lib | |
parent | 3d5bd6e1a04ae779bc5e0de9ba2e92aa55c40fe8 (diff) |
xarray: Track free entries in an XArray
Add the optional ability to track which entries in an XArray are free
and provide xa_alloc() to replace most of the functionality of the IDR.
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/test_xarray.c | 61 | ||||
-rw-r--r-- | lib/xarray.c | 88 |
2 files changed, 145 insertions, 4 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 6aafd411a5c3..a752e6a37e6f 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -33,6 +33,15 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp); } +static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + u32 id = 0; + + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX), + gfp) != 0); + XA_BUG_ON(xa, id != index); +} + static void xa_erase_index(struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX)); @@ -404,6 +413,57 @@ static noinline void check_multi_store(struct xarray *xa) #endif } +static DEFINE_XARRAY_ALLOC(xa0); + +static noinline void check_xa_alloc(void) +{ + int i; + u32 id; + + /* An empty array should assign 0 to the first alloc */ + xa_alloc_index(&xa0, 0, GFP_KERNEL); + + /* Erasing it should make the array empty again */ + xa_erase_index(&xa0, 0); + XA_BUG_ON(&xa0, !xa_empty(&xa0)); + + /* And it should assign 0 again */ + xa_alloc_index(&xa0, 0, GFP_KERNEL); + + /* The next assigned ID should be 1 */ + xa_alloc_index(&xa0, 1, GFP_KERNEL); + xa_erase_index(&xa0, 1); + + /* Storing a value should mark it used */ + xa_store_index(&xa0, 1, GFP_KERNEL); + xa_alloc_index(&xa0, 2, GFP_KERNEL); + + /* If we then erase 0, it should be free */ + xa_erase_index(&xa0, 0); + xa_alloc_index(&xa0, 0, GFP_KERNEL); + + xa_erase_index(&xa0, 1); + xa_erase_index(&xa0, 2); + + for (i = 1; i < 5000; i++) { + xa_alloc_index(&xa0, i, GFP_KERNEL); + } + + xa_destroy(&xa0); + + id = 0xfffffffeU; + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), + GFP_KERNEL) != 0); + XA_BUG_ON(&xa0, id != 0xfffffffeU); + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), + GFP_KERNEL) != 0); + XA_BUG_ON(&xa0, id != 0xffffffffU); + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), + GFP_KERNEL) != -ENOSPC); + XA_BUG_ON(&xa0, id != 0xffffffffU); + xa_destroy(&xa0); +} + static noinline void __check_store_iter(struct xarray *xa, unsigned long start, unsigned int order, unsigned int present) { @@ -849,6 +909,7 @@ static int xarray_checks(void) check_cmpxchg(&array); check_reserve(&array); check_multi_store(&array); + check_xa_alloc(); check_find(&array); check_destroy(&array); check_move(&array); diff --git a/lib/xarray.c b/lib/xarray.c index 546461914282..9a0d49d4b5f0 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -52,6 +52,11 @@ static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) xas_unlock(xas); } +static inline bool xa_track_free(const struct xarray *xa) +{ + return xa->xa_flags & XA_FLAGS_TRACK_FREE; +} + static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) { if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) @@ -94,6 +99,11 @@ static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark) return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE); } +static inline void node_mark_all(struct xa_node *node, xa_mark_t mark) +{ + bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE); +} + #define mark_inc(mark) do { \ mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \ } while (0) @@ -416,6 +426,8 @@ static void xas_shrink(struct xa_state *xas) xas->xa_node = XAS_BOUNDS; RCU_INIT_POINTER(xa->xa_head, entry); + if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK)) + xa_mark_clear(xa, XA_FREE_MARK); node->count = 0; node->nr_values = 0; @@ -549,8 +561,15 @@ static int xas_expand(struct xa_state *xas, void *head) /* Propagate the aggregated mark info to the new child */ for (;;) { - if (xa_marked(xa, mark)) + if (xa_track_free(xa) && mark == XA_FREE_MARK) { + node_mark_all(node, XA_FREE_MARK); + if (!xa_marked(xa, XA_FREE_MARK)) { + node_clear_mark(node, 0, XA_FREE_MARK); + xa_mark_set(xa, XA_FREE_MARK); + } + } else if (xa_marked(xa, mark)) { node_set_mark(node, 0, mark); + } if (mark == XA_MARK_MAX) break; mark_inc(mark); @@ -624,6 +643,8 @@ static void *xas_create(struct xa_state *xas) node = xas_alloc(xas, shift); if (!node) break; + if (xa_track_free(xa)) + node_mark_all(node, XA_FREE_MARK); rcu_assign_pointer(*slot, xa_mk_node(node)); } else if (xa_is_node(entry)) { node = xa_to_node(entry); @@ -882,7 +903,10 @@ void xas_init_marks(const struct xa_state *xas) xa_mark_t mark = 0; for (;;) { - xas_clear_mark(xas, mark); + if (xa_track_free(xas->xa) && mark == XA_FREE_MARK) + xas_set_mark(xas, mark); + else + xas_clear_mark(xas, mark); if (mark == XA_MARK_MAX) break; mark_inc(mark); @@ -1333,6 +1357,8 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) do { xas_lock(&xas); curr = xas_store(&xas, entry); + if (xa_track_free(xa) && entry) + xas_clear_mark(&xas, XA_FREE_MARK); xas_unlock(&xas); } while (xas_nomem(&xas, gfp)); @@ -1365,6 +1391,8 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) do { curr = xas_store(&xas, entry); + if (xa_track_free(xa) && entry) + xas_clear_mark(&xas, XA_FREE_MARK); } while (__xas_nomem(&xas, gfp)); return xas_result(&xas, curr); @@ -1400,8 +1428,11 @@ void *xa_cmpxchg(struct xarray *xa, unsigned long index, curr = xas_load(&xas); if (curr == XA_ZERO_ENTRY) curr = NULL; - if (curr == old) + if (curr == old) { xas_store(&xas, entry); + if (xa_track_free(xa) && entry) + xas_clear_mark(&xas, XA_FREE_MARK); + } xas_unlock(&xas); } while (xas_nomem(&xas, gfp)); @@ -1438,8 +1469,11 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, curr = xas_load(&xas); if (curr == XA_ZERO_ENTRY) curr = NULL; - if (curr == old) + if (curr == old) { xas_store(&xas, entry); + if (xa_track_free(xa) && entry) + xas_clear_mark(&xas, XA_FREE_MARK); + } } while (__xas_nomem(&xas, gfp)); return xas_result(&xas, curr); @@ -1484,6 +1518,52 @@ int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) EXPORT_SYMBOL(xa_reserve); /** + * __xa_alloc() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @max: Maximum ID to allocate (inclusive). + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Allocates an unused ID in the range specified by @id and @max. + * Updates the @id pointer with the index, then stores the entry at that + * index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if + * there is no more space in the XArray. + */ +int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) +{ + XA_STATE(xas, xa, 0); + int err; + + if (WARN_ON_ONCE(xa_is_internal(entry))) + return -EINVAL; + if (WARN_ON_ONCE(!xa_track_free(xa))) + return -EINVAL; + + if (!entry) + entry = XA_ZERO_ENTRY; + + do { + xas.xa_index = *id; + xas_find_marked(&xas, max, XA_FREE_MARK); + if (xas.xa_node == XAS_RESTART) + xas_set_err(&xas, -ENOSPC); + xas_store(&xas, entry); + xas_clear_mark(&xas, XA_FREE_MARK); + } while (__xas_nomem(&xas, gfp)); + + err = xas_error(&xas); + if (!err) + *id = xas.xa_index; + return err; +} +EXPORT_SYMBOL(__xa_alloc); + +/** * __xa_set_mark() - Set this mark on this entry while locked. * @xa: XArray. * @index: Index of entry. |