aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Kconfig.ubsan5
-rw-r--r--lib/Makefile3
-rw-r--r--lib/crypto/Kconfig4
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--lib/crypto/blake2s-selftest.c41
-rw-r--r--lib/crypto/blake2s.c37
-rw-r--r--lib/crypto/sha1.c (renamed from lib/sha1.c)3
-rw-r--r--lib/idr.c3
-rw-r--r--lib/iov_iter.c375
-rw-r--r--lib/kunit/executor.c125
-rw-r--r--lib/kunit/executor_test.c144
-rw-r--r--lib/kunit/test.c58
-rw-r--r--lib/lockref.c25
-rw-r--r--lib/memneq.c176
-rw-r--r--lib/mpi/mpi-add.c2
-rw-r--r--lib/mpi/mpi-mul.c1
-rw-r--r--lib/overflow_kunit.c6
-rw-r--r--lib/sbitmap.c5
-rw-r--r--lib/test_kasan.c10
21 files changed, 562 insertions, 469 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 6a843639814f..eaaad4d85bf2 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -120,6 +120,9 @@ config INDIRECT_IOMEM_FALLBACK
source "lib/crypto/Kconfig"
+config LIB_MEMNEQ
+ bool
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2e24db4bff19..79a71eb96111 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -498,7 +498,7 @@ config STACK_VALIDATION
runtime stack traces are more reliable.
For more information, see
- tools/objtool/Documentation/stack-validation.txt.
+ tools/objtool/Documentation/objtool.txt.
config NOINSTR_VALIDATION
bool
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index c4fe15d38b60..fd15230a703b 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -84,6 +84,9 @@ config UBSAN_SHIFT
config UBSAN_DIV_ZERO
bool "Perform checking for integer divide-by-zero"
depends on $(cc-option,-fsanitize=integer-divide-by-zero)
+ # https://github.com/ClangBuiltLinux/linux/issues/1657
+ # https://github.com/llvm/llvm-project/issues/56289
+ depends on !CC_IS_CLANG
help
This option enables -fsanitize=integer-divide-by-zero which checks
for integer division by zero. This is effectively redundant with the
@@ -94,7 +97,7 @@ config UBSAN_UNREACHABLE
bool "Perform checking for unreachable code"
# objtool already handles unreachable checking and gets angry about
# seeing UBSan instrumentation located in unreachable places.
- depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || X86_SMAP))
+ depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || HAVE_UACCESS_VALIDATION))
depends on $(cc-option,-fsanitize=unreachable)
help
This option enables -fsanitize=unreachable which checks for control
diff --git a/lib/Makefile b/lib/Makefile
index ea54294d73bf..67482f5ec0e8 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,7 +29,7 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
- idr.o extable.o sha1.o irq_regs.o argv_split.o \
+ idr.o extable.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
@@ -251,6 +251,7 @@ obj-$(CONFIG_DIMLIB) += dim/
obj-$(CONFIG_SIGNATURE) += digsig.o
lib-$(CONFIG_CLZ_TAB) += clz_tab.o
+lib-$(CONFIG_LIB_MEMNEQ) += memneq.o
obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 9856e291f414..9ff549f63540 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -71,6 +71,7 @@ config CRYPTO_LIB_CURVE25519
tristate "Curve25519 scalar multiplication library"
depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
+ select LIB_MEMNEQ
help
Enable the Curve25519 library interface. This interface may be
fulfilled by either the generic implementation or an arch-specific
@@ -120,6 +121,9 @@ config CRYPTO_LIB_CHACHA20POLY1305
select CRYPTO_LIB_POLY1305
select CRYPTO_ALGAPI
+config CRYPTO_LIB_SHA1
+ tristate
+
config CRYPTO_LIB_SHA256
tristate
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 26be2bbe09c5..919cbb2c220d 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -34,6 +34,9 @@ libpoly1305-y := poly1305-donna32.o
libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o
libpoly1305-y += poly1305.o
+obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o
+libsha1-y := sha1.o
+
obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
libsha256-y := sha256.o
diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c
index 409e4b728770..7d77dea15587 100644
--- a/lib/crypto/blake2s-selftest.c
+++ b/lib/crypto/blake2s-selftest.c
@@ -4,6 +4,8 @@
*/
#include <crypto/internal/blake2s.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
#include <linux/string.h>
/*
@@ -587,5 +589,44 @@ bool __init blake2s_selftest(void)
}
}
+ for (i = 0; i < 32; ++i) {
+ enum { TEST_ALIGNMENT = 16 };
+ u8 unaligned_block[BLAKE2S_BLOCK_SIZE + TEST_ALIGNMENT - 1]
+ __aligned(TEST_ALIGNMENT);
+ u8 blocks[BLAKE2S_BLOCK_SIZE * 2];
+ struct blake2s_state state1, state2;
+
+ get_random_bytes(blocks, sizeof(blocks));
+ get_random_bytes(&state, sizeof(state));
+
+#if defined(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) && \
+ defined(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)
+ memcpy(&state1, &state, sizeof(state1));
+ memcpy(&state2, &state, sizeof(state2));
+ blake2s_compress(&state1, blocks, 2, BLAKE2S_BLOCK_SIZE);
+ blake2s_compress_generic(&state2, blocks, 2, BLAKE2S_BLOCK_SIZE);
+ if (memcmp(&state1, &state2, sizeof(state1))) {
+ pr_err("blake2s random compress self-test %d: FAIL\n",
+ i + 1);
+ success = false;
+ }
+#endif
+
+ memcpy(&state1, &state, sizeof(state1));
+ blake2s_compress(&state1, blocks, 1, BLAKE2S_BLOCK_SIZE);
+ for (l = 1; l < TEST_ALIGNMENT; ++l) {
+ memcpy(unaligned_block + l, blocks,
+ BLAKE2S_BLOCK_SIZE);
+ memcpy(&state2, &state, sizeof(state2));
+ blake2s_compress(&state2, unaligned_block + l, 1,
+ BLAKE2S_BLOCK_SIZE);
+ if (memcmp(&state1, &state2, sizeof(state1))) {
+ pr_err("blake2s random compress align %d self-test %d: FAIL\n",
+ l, i + 1);
+ success = false;
+ }
+ }
+ }
+
return success;
}
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index c71c09621c09..98e688c6d891 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -16,16 +16,44 @@
#include <linux/init.h>
#include <linux/bug.h>
+static inline void blake2s_set_lastblock(struct blake2s_state *state)
+{
+ state->f[0] = -1;
+}
+
void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
{
- __blake2s_update(state, in, inlen, false);
+ const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
+
+ if (unlikely(!inlen))
+ return;
+ if (inlen > fill) {
+ memcpy(state->buf + state->buflen, in, fill);
+ blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
+ state->buflen = 0;
+ in += fill;
+ inlen -= fill;
+ }
+ if (inlen > BLAKE2S_BLOCK_SIZE) {
+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
+ blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
+ in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
}
EXPORT_SYMBOL(blake2s_update);
void blake2s_final(struct blake2s_state *state, u8 *out)
{
WARN_ON(IS_ENABLED(DEBUG) && !out);
- __blake2s_final(state, out, false);
+ blake2s_set_lastblock(state);
+ memset(state->buf + state->buflen, 0,
+ BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
+ blake2s_compress(state, state->buf, 1, state->buflen);
+ cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
+ memcpy(out, state->h, state->outlen);
memzero_explicit(state, sizeof(*state));
}
EXPORT_SYMBOL(blake2s_final);
@@ -38,12 +66,7 @@ static int __init blake2s_mod_init(void)
return 0;
}
-static void __exit blake2s_mod_exit(void)
-{
-}
-
module_init(blake2s_mod_init);
-module_exit(blake2s_mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("BLAKE2s hash function");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/sha1.c b/lib/crypto/sha1.c
index 0494766fc574..1aebe7be9401 100644
--- a/lib/sha1.c
+++ b/lib/crypto/sha1.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
+#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <crypto/sha1.h>
@@ -135,3 +136,5 @@ void sha1_init(__u32 *buf)
buf[4] = 0xc3d2e1f0;
}
EXPORT_SYMBOL(sha1_init);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/idr.c b/lib/idr.c
index f4ab4f4aa3c7..7ecdfdb5309e 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -491,7 +491,8 @@ void ida_free(struct ida *ida, unsigned int id)
struct ida_bitmap *bitmap;
unsigned long flags;
- BUG_ON((int)id < 0);
+ if ((int)id < 0)
+ return;
xas_lock_irqsave(&xas, flags);
bitmap = xas_load(&xas);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 2bf20b48a04a..0e0be334dbee 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -168,174 +168,6 @@ static int copyin(void *to, const void __user *from, size_t n)
return n;
}
-static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
-{
- size_t skip, copy, left, wanted;
- const struct iovec *iov;
- char __user *buf;
- void *kaddr, *from;
-
- if (unlikely(bytes > i->count))
- bytes = i->count;
-
- if (unlikely(!bytes))
- return 0;
-
- might_fault();
- wanted = bytes;
- iov = i->iov;
- skip = i->iov_offset;
- buf = iov->iov_base + skip;
- copy = min(bytes, iov->iov_len - skip);
-
- if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
- kaddr = kmap_atomic(page);
- from = kaddr + offset;
-
- /* first chunk, usually the only one */
- left = copyout(buf, from, copy);
- copy -= left;
- skip += copy;
- from += copy;
- bytes -= copy;
-
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyout(buf, from, copy);
- copy -= left;
- skip = copy;
- from += copy;
- bytes -= copy;
- }
- if (likely(!bytes)) {
- kunmap_atomic(kaddr);
- goto done;
- }
- offset = from - kaddr;
- buf += copy;
- kunmap_atomic(kaddr);
- copy = min(bytes, iov->iov_len - skip);
- }
- /* Too bad - revert to non-atomic kmap */
-
- kaddr = kmap(page);
- from = kaddr + offset;
- left = copyout(buf, from, copy);
- copy -= left;
- skip += copy;
- from += copy;
- bytes -= copy;
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyout(buf, from, copy);
- copy -= left;
- skip = copy;
- from += copy;
- bytes -= copy;
- }
- kunmap(page);
-
-done:
- if (skip == iov->iov_len) {
- iov++;
- skip = 0;
- }
- i->count -= wanted - bytes;
- i->nr_segs -= iov - i->iov;
- i->iov = iov;
- i->iov_offset = skip;
- return wanted - bytes;
-}
-
-static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
-{
- size_t skip, copy, left, wanted;
- const struct iovec *iov;
- char __user *buf;
- void *kaddr, *to;
-
- if (unlikely(bytes > i->count))
- bytes = i->count;
-
- if (unlikely(!bytes))
- return 0;
-
- might_fault();
- wanted = bytes;
- iov = i->iov;
- skip = i->iov_offset;
- buf = iov->iov_base + skip;
- copy = min(bytes, iov->iov_len - skip);
-
- if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
- kaddr = kmap_atomic(page);
- to = kaddr + offset;
-
- /* first chunk, usually the only one */
- left = copyin(to, buf, copy);
- copy -= left;
- skip += copy;
- to += copy;
- bytes -= copy;
-
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyin(to, buf, copy);
- copy -= left;
- skip = copy;
- to += copy;
- bytes -= copy;
- }
- if (likely(!bytes)) {
- kunmap_atomic(kaddr);
- goto done;
- }
- offset = to - kaddr;
- buf += copy;
- kunmap_atomic(kaddr);
- copy = min(bytes, iov->iov_len - skip);
- }
- /* Too bad - revert to non-atomic kmap */
-
- kaddr = kmap(page);
- to = kaddr + offset;
- left = copyin(to, buf, copy);
- copy -= left;
- skip += copy;
- to += copy;
- bytes -= copy;
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyin(to, buf, copy);
- copy -= left;
- skip = copy;
- to += copy;
- bytes -= copy;
- }
- kunmap(page);
-
-done:
- if (skip == iov->iov_len) {
- iov++;
- skip = 0;
- }
- i->count -= wanted - bytes;
- i->nr_segs -= iov - i->iov;
- i->iov = iov;
- i->iov_offset = skip;
- return wanted - bytes;
-}
-
#ifdef PIPE_PARANOIA
static bool sanity(const struct iov_iter *i)
{
@@ -855,24 +687,14 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
- if (likely(iter_is_iovec(i)))
- return copy_page_to_iter_iovec(page, offset, bytes, i);
- if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
+ return copy_page_to_iter_pipe(page, offset, bytes, i);
+ } else {
void *kaddr = kmap_local_page(page);
size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
kunmap_local(kaddr);
return wanted;
}
- if (iov_iter_is_pipe(i))
- return copy_page_to_iter_pipe(page, offset, bytes, i);
- if (unlikely(iov_iter_is_discard(i))) {
- if (unlikely(i->count < bytes))
- bytes = i->count;
- i->count -= bytes;
- return bytes;
- }
- WARN_ON(1);
- return 0;
}
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
@@ -903,17 +725,12 @@ EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
- if (unlikely(!page_copy_sane(page, offset, bytes)))
- return 0;
- if (likely(iter_is_iovec(i)))
- return copy_page_from_iter_iovec(page, offset, bytes, i);
- if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+ if (page_copy_sane(page, offset, bytes)) {
void *kaddr = kmap_local_page(page);
size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
kunmap_local(kaddr);
return wanted;
}
- WARN_ON(1);
return 0;
}
EXPORT_SYMBOL(copy_page_from_iter);
@@ -1036,17 +853,22 @@ static void pipe_advance(struct iov_iter *i, size_t size)
static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
{
- struct bvec_iter bi;
+ const struct bio_vec *bvec, *end;
- bi.bi_size = i->count;
- bi.bi_bvec_done = i->iov_offset;
- bi.bi_idx = 0;
- bvec_iter_advance(i->bvec, &bi, size);
+ if (!i->count)
+ return;
+ i->count -= size;
+
+ size += i->iov_offset;
- i->bvec += bi.bi_idx;
- i->nr_segs -= bi.bi_idx;
- i->count = bi.bi_size;
- i->iov_offset = bi.bi_bvec_done;
+ for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
+ if (likely(size < bvec->bv_len))
+ break;
+ size -= bvec->bv_len;
+ }
+ i->iov_offset = size;
+ i->nr_segs -= bvec - i->bvec;
+ i->bvec = bvec;
}
static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
@@ -1275,6 +1097,98 @@ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
}
EXPORT_SYMBOL(iov_iter_discard);
+static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
+{
+ size_t size = i->count;
+ size_t skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->iov[k].iov_len - skip;
+
+ if (len > size)
+ len = size;
+ if (len & len_mask)
+ return false;
+ if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
+ return false;
+
+ size -= len;
+ if (!size)
+ break;
+ }
+ return true;
+}
+
+static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
+{
+ size_t size = i->count;
+ unsigned skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->bvec[k].bv_len - skip;
+
+ if (len > size)
+ len = size;
+ if (len & len_mask)
+ return false;
+ if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
+ return false;
+
+ size -= len;
+ if (!size)
+ break;
+ }
+ return true;
+}
+
+/**
+ * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
+ * are aligned to the parameters.
+ *
+ * @i: &struct iov_iter to restore
+ * @addr_mask: bit mask to check against the iov element's addresses
+ * @len_mask: bit mask to check against the iov element's lengths
+ *
+ * Return: false if any addresses or lengths intersect with the provided masks
+ */
+bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
+{
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_iter_aligned_iovec(i, addr_mask, len_mask);
+
+ if (iov_iter_is_bvec(i))
+ return iov_iter_aligned_bvec(i, addr_mask, len_mask);
+
+ if (iov_iter_is_pipe(i)) {
+ unsigned int p_mask = i->pipe->ring_size - 1;
+ size_t size = i->count;
+
+ if (size & len_mask)
+ return false;
+ if (size && allocated(&i->pipe->bufs[i->head & p_mask])) {
+ if (i->iov_offset & addr_mask)
+ return false;
+ }
+
+ return true;
+ }
+
+ if (iov_iter_is_xarray(i)) {
+ if (i->count & len_mask)
+ return false;
+ if ((i->xarray_start + i->iov_offset) & addr_mask)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
+
static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
{
unsigned long res = 0;
@@ -1472,47 +1386,36 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i,
}
/* must be done on non-empty ITER_IOVEC one */
-static unsigned long first_iovec_segment(const struct iov_iter *i,
- size_t *size, size_t *start,
- size_t maxsize, unsigned maxpages)
+static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
{
size_t skip;
long k;
for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
- unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
size_t len = i->iov[k].iov_len - skip;
if (unlikely(!len))
continue;
- if (len > maxsize)
- len = maxsize;
- len += (*start = addr % PAGE_SIZE);
- if (len > maxpages * PAGE_SIZE)
- len = maxpages * PAGE_SIZE;
- *size = len;
- return addr & PAGE_MASK;
+ if (*size > len)
+ *size = len;
+ return (unsigned long)i->iov[k].iov_base + skip;
}
BUG(); // if it had been empty, we wouldn't get called
}
/* must be done on non-empty ITER_BVEC one */
static struct page *first_bvec_segment(const struct iov_iter *i,
- size_t *size, size_t *start,
- size_t maxsize, unsigned maxpages)
+ size_t *size, size_t *start)
{
struct page *page;
size_t skip = i->iov_offset, len;
len = i->bvec->bv_len - skip;
- if (len > maxsize)
- len = maxsize;
+ if (*size > len)
+ *size = len;
skip += i->bvec->bv_offset;
page = i->bvec->bv_page + skip / PAGE_SIZE;
- len += (*start = skip % PAGE_SIZE);
- if (len > maxpages * PAGE_SIZE)
- len = maxpages * PAGE_SIZE;
- *size = len;
+ *start = skip % PAGE_SIZE;
return page;
}
@@ -1520,13 +1423,14 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
{
- size_t len;
int n, res;
if (maxsize > i->count)
maxsize = i->count;
if (!maxsize)
return 0;
+ if (maxsize > MAX_RW_COUNT)
+ maxsize = MAX_RW_COUNT;
if (likely(iter_is_iovec(i))) {
unsigned int gup_flags = 0;
@@ -1537,21 +1441,27 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (i->nofault)
gup_flags |= FOLL_NOFAULT;
- addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
- n = DIV_ROUND_UP(len, PAGE_SIZE);
+ addr = first_iovec_segment(i, &maxsize);
+ *start = addr % PAGE_SIZE;
+ addr &= PAGE_MASK;
+ n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
+ if (n > maxpages)
+ n = maxpages;
res = get_user_pages_fast(addr, n, gup_flags, pages);
if (unlikely(res <= 0))
return res;
- return (res == n ? len : res * PAGE_SIZE) - *start;
+ return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
}
if (iov_iter_is_bvec(i)) {
struct page *page;
- page = first_bvec_segment(i, &len, start, maxsize, maxpages);
- n = DIV_ROUND_UP(len, PAGE_SIZE);
- while (n--)
+ page = first_bvec_segment(i, &maxsize, start);
+ n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
+ if (n > maxpages)
+ n = maxpages;
+ for (int k = 0; k < n; k++)
get_page(*pages++ = page++);
- return len - *start;
+ return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
}
if (iov_iter_is_pipe(i))
return pipe_get_pages(i, pages, maxsize, maxpages, start);
@@ -1640,13 +1550,14 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
size_t *start)
{
struct page **p;
- size_t len;
int n, res;
if (maxsize > i->count)
maxsize = i->count;
if (!maxsize)
return 0;
+ if (maxsize > MAX_RW_COUNT)
+ maxsize = MAX_RW_COUNT;
if (likely(iter_is_iovec(i))) {
unsigned int gup_flags = 0;
@@ -1657,8 +1568,10 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
if (i->nofault)
gup_flags |= FOLL_NOFAULT;
- addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
- n = DIV_ROUND_UP(len, PAGE_SIZE);
+ addr = first_iovec_segment(i, &maxsize);
+ *start = addr % PAGE_SIZE;
+ addr &= PAGE_MASK;
+ n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
p = get_pages_array(n);
if (!p)
return -ENOMEM;
@@ -1669,19 +1582,19 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
return res;
}
*pages = p;
- return (res == n ? len : res * PAGE_SIZE) - *start;
+ return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
}
if (iov_iter_is_bvec(i)) {
struct page *page;
- page = first_bvec_segment(i, &len, start, maxsize, ~0U);
- n = DIV_ROUND_UP(len, PAGE_SIZE);
+ page = first_bvec_segment(i, &maxsize, start);
+ n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
*pages = p = get_pages_array(n);
if (!p)
return -ENOMEM;
- while (n--)
+ for (int k = 0; k < n; k++)
get_page(*p++ = page++);
- return len - *start;
+ return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
}
if (iov_iter_is_pipe(i))
return pipe_get_pages_alloc(i, pages, maxsize, start);
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 96f96e42ce06..5e223327196a 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -9,8 +9,8 @@
* These symbols point to the .kunit_test_suites section and are defined in
* include/asm-generic/vmlinux.lds.h, and consequently must be extern.
*/
-extern struct kunit_suite * const * const __kunit_suites_start[];
-extern struct kunit_suite * const * const __kunit_suites_end[];
+extern struct kunit_suite * const __kunit_suites_start[];
+extern struct kunit_suite * const __kunit_suites_end[];
#if IS_BUILTIN(CONFIG_KUNIT)
@@ -55,7 +55,7 @@ static void kunit_parse_filter_glob(struct kunit_test_filter *parsed,
/* Create a copy of suite with only tests that match test_glob. */
static struct kunit_suite *
-kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob)
+kunit_filter_tests(const struct kunit_suite *const suite, const char *test_glob)
{
int n = 0;
struct kunit_case *filtered, *test_case;
@@ -69,15 +69,15 @@ kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob)
if (n == 0)
return NULL;
- /* Use memcpy to workaround copy->name being const. */
- copy = kmalloc(sizeof(*copy), GFP_KERNEL);
+ copy = kmemdup(suite, sizeof(*copy), GFP_KERNEL);
if (!copy)
return ERR_PTR(-ENOMEM);
- memcpy(copy, suite, sizeof(*copy));
filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
- if (!filtered)
+ if (!filtered) {
+ kfree(copy);
return ERR_PTR(-ENOMEM);
+ }
n = 0;
kunit_suite_for_each_test_case(suite, test_case) {
@@ -92,62 +92,18 @@ kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob)
static char *kunit_shutdown;
core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
-static struct kunit_suite * const *
-kunit_filter_subsuite(struct kunit_suite * const * const subsuite,
- struct kunit_test_filter *filter)
-{
- int i, n = 0;
- struct kunit_suite **filtered, *filtered_suite;
-
- n = 0;
- for (i = 0; subsuite[i]; ++i) {
- if (glob_match(filter->suite_glob, subsuite[i]->name))
- ++n;
- }
-
- if (n == 0)
- return NULL;
-
- filtered = kmalloc_array(n + 1, sizeof(*filtered), GFP_KERNEL);
- if (!filtered)
- return ERR_PTR(-ENOMEM);
-
- n = 0;
- for (i = 0; subsuite[i] != NULL; ++i) {
- if (!glob_match(filter->suite_glob, subsuite[i]->name))
- continue;
- filtered_suite = kunit_filter_tests(subsuite[i], filter->test_glob);
- if (IS_ERR(filtered_suite))
- return ERR_CAST(filtered_suite);
- else if (filtered_suite)
- filtered[n++] = filtered_suite;
- }
- filtered[n] = NULL;
-
- return filtered;
-}
-
+/* Stores an array of suites, end points one past the end */
struct suite_set {
- struct kunit_suite * const * const *start;
- struct kunit_suite * const * const *end;
+ struct kunit_suite * const *start;
+ struct kunit_suite * const *end;
};
-static void kunit_free_subsuite(struct kunit_suite * const *subsuite)
-{
- unsigned int i;
-
- for (i = 0; subsuite[i]; i++)
- kfree(subsuite[i]);
-
- kfree(subsuite);
-}
-
static void kunit_free_suite_set(struct suite_set suite_set)
{
- struct kunit_suite * const * const *suites;
+ struct kunit_suite * const *suites;
for (suites = suite_set.start; suites < suite_set.end; suites++)
- kunit_free_subsuite(*suites);
+ kfree(*suites);
kfree(suite_set.start);
}
@@ -156,7 +112,7 @@ static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
int *err)
{
int i;
- struct kunit_suite * const **copy, * const *filtered_subsuite;
+ struct kunit_suite **copy, *filtered_suite;
struct suite_set filtered;
struct kunit_test_filter filter;
@@ -171,14 +127,19 @@ static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
kunit_parse_filter_glob(&filter, filter_glob);
- for (i = 0; i < max; ++i) {
- filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], &filter);
- if (IS_ERR(filtered_subsuite)) {
- *err = PTR_ERR(filtered_subsuite);
+ for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
+ if (!glob_match(filter.suite_glob, suite_set->start[i]->name))
+ continue;
+
+ filtered_suite = kunit_filter_tests(suite_set->start[i], filter.test_glob);
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
return filtered;
}
- if (filtered_subsuite)
- *copy++ = filtered_subsuite;
+ if (!filtered_suite)
+ continue;
+
+ *copy++ = filtered_suite;
}
filtered.end = copy;
@@ -201,52 +162,33 @@ static void kunit_handle_shutdown(void)
}
-static void kunit_print_tap_header(struct suite_set *suite_set)
-{
- struct kunit_suite * const * const *suites, * const *subsuite;
- int num_of_suites = 0;
-
- for (suites = suite_set->start; suites < suite_set->end; suites++)
- for (subsuite = *suites; *subsuite != NULL; subsuite++)
- num_of_suites++;
-
- pr_info("TAP version 14\n");
- pr_info("1..%d\n", num_of_suites);
-}
-
static void kunit_exec_run_tests(struct suite_set *suite_set)
{
- struct kunit_suite * const * const *suites;
+ size_t num_suites = suite_set->end - suite_set->start;
- kunit_print_tap_header(suite_set);
+ pr_info("TAP version 14\n");
+ pr_info("1..%zu\n", num_suites);
- for (suites = suite_set->start; suites < suite_set->end; suites++)
- __kunit_test_suites_init(*suites);
+ __kunit_test_suites_init(suite_set->start, num_suites);
}
static void kunit_exec_list_tests(struct suite_set *suite_set)
{
- unsigned int i;
- struct kunit_suite * const * const *suites;
+ struct kunit_suite * const *suites;
struct kunit_case *test_case;
/* Hack: print a tap header so kunit.py can find the start of KUnit output. */
pr_info("TAP version 14\n");
for (suites = suite_set->start; suites < suite_set->end; suites++)
- for (i = 0; (*suites)[i] != NULL; i++) {
- kunit_suite_for_each_test_case((*suites)[i], test_case) {
- pr_info("%s.%s\n", (*suites)[i]->name, test_case->name);
- }
+ kunit_suite_for_each_test_case((*suites), test_case) {
+ pr_info("%s.%s\n", (*suites)->name, test_case->name);
}
}
int kunit_run_all_tests(void)
{
- struct suite_set suite_set = {
- .start = __kunit_suites_start,
- .end = __kunit_suites_end,
- };
+ struct suite_set suite_set = {__kunit_suites_start, __kunit_suites_end};
int err = 0;
if (filter_glob_param) {
@@ -264,11 +206,10 @@ int kunit_run_all_tests(void)
else
pr_err("kunit executor: unknown action '%s'\n", action_param);
- if (filter_glob_param) { /* a copy was made of each array */
+ if (filter_glob_param) { /* a copy was made of each suite */
kunit_free_suite_set(suite_set);
}
-
out:
kunit_handle_shutdown();
return err;
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index eac6ff480273..0cea31c27b23 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -9,8 +9,6 @@
#include <kunit/test.h>
static void kfree_at_end(struct kunit *test, const void *to_free);
-static void free_subsuite_at_end(struct kunit *test,
- struct kunit_suite *const *to_free);
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
const char *suite_name,
struct kunit_case *test_cases);
@@ -41,126 +39,80 @@ static void parse_filter_test(struct kunit *test)
kfree(filter.test_glob);
}
-static void filter_subsuite_test(struct kunit *test)
+static void filter_suites_test(struct kunit *test)
{
- struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
- struct kunit_suite * const *filtered;
- struct kunit_test_filter filter = {
- .suite_glob = "suite2",
- .test_glob = NULL,
- };
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]};
+ struct suite_set got;
+ int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
/* Want: suite1, suite2, NULL -> suite2, NULL */
- filtered = kunit_filter_subsuite(subsuite, &filter);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered);
- free_subsuite_at_end(test, filtered);
+ got = kunit_filter_suites(&suite_set, "suite2", &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ kfree_at_end(test, got.start);
/* Validate we just have suite2 */
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]);
- KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2");
- KUNIT_EXPECT_FALSE(test, filtered[1]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2");
+
+ /* Contains one element (end is 1 past end) */
+ KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
}
-static void filter_subsuite_test_glob_test(struct kunit *test)
+static void filter_suites_test_glob_test(struct kunit *test)
{
- struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
- struct kunit_suite * const *filtered;
- struct kunit_test_filter filter = {
- .suite_glob = "suite2",
- .test_glob = "test2",
- };
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]};
+ struct suite_set got;
+ int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
/* Want: suite1, suite2, NULL -> suite2 (just test1), NULL */
- filtered = kunit_filter_subsuite(subsuite, &filter);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered);
- free_subsuite_at_end(test, filtered);
+ got = kunit_filter_suites(&suite_set, "suite2.test2", &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ kfree_at_end(test, got.start);
/* Validate we just have suite2 */
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]);
- KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2");
- KUNIT_EXPECT_FALSE(test, filtered[1]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2");
+ KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
/* Now validate we just have test2 */
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]->test_cases);
- KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->test_cases[0].name, "test2");
- KUNIT_EXPECT_FALSE(test, filtered[0]->test_cases[1].name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+ KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->test_cases[0].name, "test2");
+ KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].name);
}
-static void filter_subsuite_to_empty_test(struct kunit *test)
+static void filter_suites_to_empty_test(struct kunit *test)
{
- struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
- struct kunit_suite * const *filtered;
- struct kunit_test_filter filter = {
- .suite_glob = "not_found",
- .test_glob = NULL,
- };
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]};
+ struct suite_set got;
+ int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
- filtered = kunit_filter_subsuite(subsuite, &filter);
- free_subsuite_at_end(test, filtered); /* just in case */
+ got = kunit_filter_suites(&suite_set, "not_found", &err);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ kfree_at_end(test, got.start); /* just in case */
- KUNIT_EXPECT_FALSE_MSG(test, filtered,
- "should be NULL to indicate no match");
-}
-
-static void kfree_subsuites_at_end(struct kunit *test, struct suite_set *suite_set)
-{
- struct kunit_suite * const * const *suites;
-
- kfree_at_end(test, suite_set->start);
- for (suites = suite_set->start; suites < suite_set->end; suites++)
- free_subsuite_at_end(test, *suites);
-}
-
-static void filter_suites_test(struct kunit *test)
-{
- /* Suites per-file are stored as a NULL terminated array */
- struct kunit_suite *subsuites[2][2] = {
- {NULL, NULL},
- {NULL, NULL},
- };
- /* Match the memory layout of suite_set */
- struct kunit_suite * const * const suites[2] = {
- subsuites[0], subsuites[1],
- };
-
- const struct suite_set suite_set = {
- .start = suites,
- .end = suites + 2,
- };
- struct suite_set filtered = {.start = NULL, .end = NULL};
- int err = 0;
-
- /* Emulate two files, each having one suite */
- subsuites[0][0] = alloc_fake_suite(test, "suite0", dummy_test_cases);
- subsuites[1][0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
-
- /* Filter out suite1 */
- filtered = kunit_filter_suites(&suite_set, "suite0", &err);
- kfree_subsuites_at_end(test, &filtered); /* let us use ASSERTs without leaking */
- KUNIT_EXPECT_EQ(test, err, 0);
- KUNIT_ASSERT_EQ(test, filtered.end - filtered.start, (ptrdiff_t)1);
-
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0]);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0][0]);
- KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0");
+ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
+ "should be empty to indicate no match");
}
static struct kunit_case executor_test_cases[] = {
KUNIT_CASE(parse_filter_test),
- KUNIT_CASE(filter_subsuite_test),
- KUNIT_CASE(filter_subsuite_test_glob_test),
- KUNIT_CASE(filter_subsuite_to_empty_test),
KUNIT_CASE(filter_suites_test),
+ KUNIT_CASE(filter_suites_test_glob_test),
+ KUNIT_CASE(filter_suites_to_empty_test),
{}
};
@@ -190,20 +142,6 @@ static void kfree_at_end(struct kunit *test, const void *to_free)
(void *)to_free);
}
-static void free_subsuite_res_free(struct kunit_resource *res)
-{
- kunit_free_subsuite(res->data);
-}
-
-static void free_subsuite_at_end(struct kunit *test,
- struct kunit_suite *const *to_free)
-{
- if (IS_ERR_OR_NULL(to_free))
- return;
- kunit_alloc_resource(test, NULL, free_subsuite_res_free,
- GFP_KERNEL, (void *)to_free);
-}
-
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
const char *suite_name,
struct kunit_case *test_cases)
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index a5053a07409f..b73d5bb5c473 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -10,7 +10,9 @@
#include <kunit/test.h>
#include <kunit/test-bug.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/panic.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
@@ -501,6 +503,9 @@ int kunit_run_tests(struct kunit_suite *suite)
struct kunit_result_stats suite_stats = { 0 };
struct kunit_result_stats total_stats = { 0 };
+ /* Taint the kernel so we know we've run tests. */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
if (suite->suite_init) {
suite->suite_init_err = suite->suite_init(suite);
if (suite->suite_init_err) {
@@ -581,11 +586,11 @@ static void kunit_init_suite(struct kunit_suite *suite)
suite->suite_init_err = 0;
}
-int __kunit_test_suites_init(struct kunit_suite * const * const suites)
+int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites)
{
unsigned int i;
- for (i = 0; suites[i] != NULL; i++) {
+ for (i = 0; i < num_suites; i++) {
kunit_init_suite(suites[i]);
kunit_run_tests(suites[i]);
}
@@ -598,17 +603,54 @@ static void kunit_exit_suite(struct kunit_suite *suite)
kunit_debugfs_destroy_suite(suite);
}
-void __kunit_test_suites_exit(struct kunit_suite **suites)
+void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites)
{
unsigned int i;
- for (i = 0; suites[i] != NULL; i++)
+ for (i = 0; i < num_suites; i++)
kunit_exit_suite(suites[i]);
kunit_suite_counter = 1;
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
+#ifdef CONFIG_MODULES
+static void kunit_module_init(struct module *mod)
+{
+ __kunit_test_suites_init(mod->kunit_suites, mod->num_kunit_suites);
+}
+
+static void kunit_module_exit(struct module *mod)
+{
+ __kunit_test_suites_exit(mod->kunit_suites, mod->num_kunit_suites);
+}
+
+static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct module *mod = data;
+
+ switch (val) {
+ case MODULE_STATE_LIVE:
+ kunit_module_init(mod);
+ break;
+ case MODULE_STATE_GOING:
+ kunit_module_exit(mod);
+ break;
+ case MODULE_STATE_COMING:
+ case MODULE_STATE_UNFORMED:
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block kunit_mod_nb = {
+ .notifier_call = kunit_module_notify,
+ .priority = 0,
+};
+#endif
+
struct kunit_kmalloc_array_params {
size_t n;
size_t size;
@@ -703,13 +745,19 @@ EXPORT_SYMBOL_GPL(kunit_cleanup);
static int __init kunit_init(void)
{
kunit_debugfs_init();
-
+#ifdef CONFIG_MODULES
+ return register_module_notifier(&kunit_mod_nb);
+#else
return 0;
+#endif
}
late_initcall(kunit_init);
static void __exit kunit_exit(void)
{
+#ifdef CONFIG_MODULES
+ unregister_module_notifier(&kunit_mod_nb);
+#endif
kunit_debugfs_cleanup();
}
module_exit(kunit_exit);
diff --git a/lib/lockref.c b/lib/lockref.c
index c6f0b183b937..45e93ece8ba0 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -111,31 +111,6 @@ int lockref_put_not_zero(struct lockref *lockref)
EXPORT_SYMBOL(lockref_put_not_zero);
/**
- * lockref_get_or_lock - Increments count unless the count is 0 or dead
- * @lockref: pointer to lockref structure
- * Return: 1 if count updated successfully or 0 if count was zero
- * and we got the lock instead.
- */
-int lockref_get_or_lock(struct lockref *lockref)
-{
- CMPXCHG_LOOP(
- new.count++;
- if (old.count <= 0)
- break;
- ,
- return 1;
- );
-
- spin_lock(&lockref->lock);
- if (lockref->count <= 0)
- return 0;
- lockref->count++;
- spin_unlock(&lockref->lock);
- return 1;
-}
-EXPORT_SYMBOL(lockref_get_or_lock);
-
-/**
* lockref_put_return - Decrement reference count if possible
* @lockref: pointer to lockref structure
*
diff --git a/lib/memneq.c b/lib/memneq.c
new file mode 100644
index 000000000000..fb11608b1ec1
--- /dev/null
+++ b/lib/memneq.c
@@ -0,0 +1,176 @@
+/*
+ * Constant-time equality testing of memory regions.
+ *
+ * Authors:
+ *
+ * James Yonan <james@openvpn.net>
+ * Daniel Borkmann <dborkman@redhat.com>
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of OpenVPN Technologies nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <crypto/algapi.h>
+#include <asm/unaligned.h>
+
+#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
+
+/* Generic path for arbitrary size */
+static inline unsigned long
+__crypto_memneq_generic(const void *a, const void *b, size_t size)
+{
+ unsigned long neq = 0;
+
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ while (size >= sizeof(unsigned long)) {
+ neq |= get_unaligned((unsigned long *)a) ^
+ get_unaligned((unsigned long *)b);
+ OPTIMIZER_HIDE_VAR(neq);
+ a += sizeof(unsigned long);
+ b += sizeof(unsigned long);
+ size -= sizeof(unsigned long);
+ }
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+ while (size > 0) {
+ neq |= *(unsigned char *)a ^ *(unsigned char *)b;
+ OPTIMIZER_HIDE_VAR(neq);
+ a += 1;
+ b += 1;
+ size -= 1;
+ }
+ return neq;
+}
+
+/* Loop-free fast-path for frequently used 16-byte size */
+static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
+{
+ unsigned long neq = 0;
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (sizeof(unsigned long) == 8) {
+ neq |= get_unaligned((unsigned long *)a) ^
+ get_unaligned((unsigned long *)b);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned long *)(a + 8)) ^
+ get_unaligned((unsigned long *)(b + 8));
+ OPTIMIZER_HIDE_VAR(neq);
+ } else if (sizeof(unsigned int) == 4) {
+ neq |= get_unaligned((unsigned int *)a) ^
+ get_unaligned((unsigned int *)b);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned int *)(a + 4)) ^
+ get_unaligned((unsigned int *)(b + 4));
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned int *)(a + 8)) ^
+ get_unaligned((unsigned int *)(b + 8));
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned int *)(a + 12)) ^
+ get_unaligned((unsigned int *)(b + 12));
+ OPTIMIZER_HIDE_VAR(neq);
+ } else
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+ {
+ neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15);
+ OPTIMIZER_HIDE_VAR(neq);
+ }
+
+ return neq;
+}
+
+/* Compare two areas of memory without leaking timing information,
+ * and with special optimizations for common sizes. Users should
+ * not call this function directly, but should instead use
+ * crypto_memneq defined in crypto/algapi.h.
+ */
+noinline unsigned long __crypto_memneq(const void *a, const void *b,
+ size_t size)
+{
+ switch (size) {
+ case 16:
+ return __crypto_memneq_16(a, b);
+ default:
+ return __crypto_memneq_generic(a, b, size);
+ }
+}
+EXPORT_SYMBOL(__crypto_memneq);
+
+#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
diff --git a/lib/mpi/mpi-add.c b/lib/mpi/mpi-add.c
index 2cdae54c1bd0..9056fc5167fc 100644
--- a/lib/mpi/mpi-add.c
+++ b/lib/mpi/mpi-add.c
@@ -138,7 +138,7 @@ void mpi_sub(MPI w, MPI u, MPI v)
mpi_add(w, u, vv);
mpi_free(vv);
}
-
+EXPORT_SYMBOL_GPL(mpi_sub);
void mpi_addm(MPI w, MPI u, MPI v, MPI m)
{
diff --git a/lib/mpi/mpi-mul.c b/lib/mpi/mpi-mul.c
index 8f5fa200f297..7f4eda8560dc 100644
--- a/lib/mpi/mpi-mul.c
+++ b/lib/mpi/mpi-mul.c
@@ -82,6 +82,7 @@ void mpi_mul(MPI w, MPI u, MPI v)
if (tmp_limb)
mpi_free_limb_space(tmp_limb);
}
+EXPORT_SYMBOL_GPL(mpi_mul);
void mpi_mulm(MPI w, MPI u, MPI v, MPI m)
{
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index 475f0c064bf6..7e3e43679b73 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -91,6 +91,7 @@ DEFINE_TEST_ARRAY(u32) = {
{-4U, 5U, 1U, -9U, -20U, true, false, true},
};
+#if BITS_PER_LONG == 64
DEFINE_TEST_ARRAY(u64) = {
{0, 0, 0, 0, 0, false, false, false},
{1, 1, 2, 0, 1, false, false, false},
@@ -114,6 +115,7 @@ DEFINE_TEST_ARRAY(u64) = {
false, true, false},
{-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true},
};
+#endif
DEFINE_TEST_ARRAY(s8) = {
{0, 0, 0, 0, 0, false, false, false},
@@ -188,6 +190,8 @@ DEFINE_TEST_ARRAY(s32) = {
{S32_MIN, S32_MIN, 0, 0, 0, true, false, true},
{S32_MAX, S32_MAX, -2, 0, 1, true, false, true},
};
+
+#if BITS_PER_LONG == 64
DEFINE_TEST_ARRAY(s64) = {
{0, 0, 0, 0, 0, false, false, false},
@@ -216,6 +220,7 @@ DEFINE_TEST_ARRAY(s64) = {
{-128, -1, -129, -127, 128, false, false, false},
{0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false},
};
+#endif
#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
t _r; \
@@ -650,6 +655,7 @@ static struct kunit_case overflow_test_cases[] = {
KUNIT_CASE(s16_overflow_test),
KUNIT_CASE(u32_overflow_test),
KUNIT_CASE(s32_overflow_test),
+/* Clang 13 and earlier generate unwanted libcalls on 32-bit. */
#if BITS_PER_LONG == 64
KUNIT_CASE(u64_overflow_test),
KUNIT_CASE(s64_overflow_test),
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index ae4fd4de9ebe..29eb0484215a 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -528,7 +528,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
sbitmap_deferred_clear(map);
if (map->word == (1UL << (map_depth - 1)) - 1)
- continue;
+ goto next;
nr = find_first_zero_bit(&map->word, map_depth);
if (nr + nr_tags <= map_depth) {
@@ -539,6 +539,8 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
get_mask = ((1UL << map_tags) - 1) << nr;
do {
val = READ_ONCE(map->word);
+ if ((val & ~get_mask) != val)
+ goto next;
ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
} while (ret != val);
get_mask = (get_mask & ~ret) >> nr;
@@ -549,6 +551,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
return get_mask;
}
}
+next:
/* Jump to next index. */
if (++index >= sb->map_nr)
index = 0;
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index c233b1a4e984..58c1b01ccfe2 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -131,6 +131,7 @@ static void kmalloc_oob_right(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(ptr);
/*
* An unaligned access past the requested kmalloc size.
* Only generic KASAN can precisely detect these.
@@ -159,6 +160,7 @@ static void kmalloc_oob_left(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(ptr);
KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
kfree(ptr);
}
@@ -171,6 +173,7 @@ static void kmalloc_node_oob_right(struct kunit *test)
ptr = kmalloc_node(size, GFP_KERNEL, 0);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
kfree(ptr);
}
@@ -191,6 +194,7 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
kfree(ptr);
@@ -271,6 +275,7 @@ static void kmalloc_large_oob_right(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
kfree(ptr);
}
@@ -410,6 +415,8 @@ static void kmalloc_oob_16(struct kunit *test)
ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+ OPTIMIZER_HIDE_VAR(ptr1);
+ OPTIMIZER_HIDE_VAR(ptr2);
KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
kfree(ptr1);
kfree(ptr2);
@@ -756,6 +763,8 @@ static void ksize_unpoisons_memory(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
real_size = ksize(ptr);
+ OPTIMIZER_HIDE_VAR(ptr);
+
/* This access shouldn't trigger a KASAN report. */
ptr[size] = 'x';
@@ -778,6 +787,7 @@ static void ksize_uaf(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
+ OPTIMIZER_HIDE_VAR(ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);