aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJason A. Donenfeld2022-10-09 20:44:02 -0600
committerJason A. Donenfeld2022-11-18 02:18:02 +0100
commite8a533cbeb79809206f8724e89961e0079508c3c (patch)
treeb81da4151f67029174482ab2fdbee7dc8c98c931 /mm
parentd247aabd391c3b2fa4f26874ed9136a7a142fcfd (diff)
treewide: use get_random_u32_inclusive() when possible
These cases were done with this Coccinelle: @@ expression H; expression L; @@ - (get_random_u32_below(H) + L) + get_random_u32_inclusive(L, H + L - 1) @@ expression H; expression L; expression E; @@ get_random_u32_inclusive(L, H - + E - - E ) @@ expression H; expression L; expression E; @@ get_random_u32_inclusive(L, H - - E - + E ) @@ expression H; expression L; expression E; expression F; @@ get_random_u32_inclusive(L, H - - E + F - + E ) @@ expression H; expression L; expression E; expression F; @@ get_random_u32_inclusive(L, H - + E + F - - E ) And then subsequently cleaned up by hand, with several automatic cases rejected if it didn't make sense contextually. Reviewed-by: Kees Cook <keescook@chromium.org> Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> # for infiniband Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/kasan_test.c6
-rw-r--r--mm/kfence/kfence_test.c2
-rw-r--r--mm/swapfile.c5
3 files changed, 6 insertions, 7 deletions
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 640f9c7f8e44..54181eba3e24 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -1299,7 +1299,7 @@ static void match_all_not_assigned(struct kunit *test)
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
for (i = 0; i < 256; i++) {
- size = get_random_u32_below(1024) + 1;
+ size = get_random_u32_inclusive(1, 1024);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
@@ -1308,7 +1308,7 @@ static void match_all_not_assigned(struct kunit *test)
}
for (i = 0; i < 256; i++) {
- order = get_random_u32_below(4) + 1;
+ order = get_random_u32_inclusive(1, 4);
pages = alloc_pages(GFP_KERNEL, order);
ptr = page_address(pages);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -1321,7 +1321,7 @@ static void match_all_not_assigned(struct kunit *test)
return;
for (i = 0; i < 256; i++) {
- size = get_random_u32_below(1024) + 1;
+ size = get_random_u32_inclusive(1, 1024);
ptr = vmalloc(size);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index 20028c179796..b5d66a69200d 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -532,7 +532,7 @@ static void test_free_bulk(struct kunit *test)
int iter;
for (iter = 0; iter < 5; iter++) {
- const size_t size = setup_test_cache(test, 8 + get_random_u32_below(300),
+ const size_t size = setup_test_cache(test, get_random_u32_inclusive(8, 307),
0, (iter & 1) ? ctor_set_x : NULL);
void *objects[] = {
test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e9318305a24a..4ee31056d3f8 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -772,8 +772,7 @@ static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
/* No free swap slots available */
if (si->highest_bit <= si->lowest_bit)
return;
- next = si->lowest_bit +
- get_random_u32_below(si->highest_bit - si->lowest_bit + 1);
+ next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit);
next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
next = max_t(unsigned int, next, si->lowest_bit);
}
@@ -3089,7 +3088,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
*/
for_each_possible_cpu(cpu) {
per_cpu(*p->cluster_next_cpu, cpu) =
- 1 + get_random_u32_below(p->highest_bit);
+ get_random_u32_inclusive(1, p->highest_bit);
}
nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);