aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/configs/skiroot_defconfig1
-rw-r--r--include/linux/slab.h2
-rw-r--r--mm/slab.c13
-rw-r--r--mm/slab_common.c8
-rw-r--r--mm/slub.c14
-rw-r--r--security/Kconfig14
6 files changed, 0 insertions, 52 deletions
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index b806a5d3a695..c3ba614c973d 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -275,7 +275,6 @@ CONFIG_NLS_UTF8=y
CONFIG_ENCRYPTED_KEYS=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
-# CONFIG_HARDENED_USERCOPY_FALLBACK is not set
CONFIG_HARDENED_USERCOPY_PAGESPAN=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_LOCKDOWN_LSM=y
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 837cb16232ef..181045148b06 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -142,8 +142,6 @@ struct mem_cgroup;
void __init kmem_cache_init(void);
bool slab_is_available(void);
-extern bool usercopy_fallback;
-
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
unsigned int align, slab_flags_t flags,
void (*ctor)(void *));
diff --git a/mm/slab.c b/mm/slab.c
index 64ec17a3bc2b..da132a9ae6f8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4204,19 +4204,6 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
n <= cachep->useroffset - offset + cachep->usersize)
return;
- /*
- * If the copy is still within the allocated object, produce
- * a warning instead of rejecting the copy. This is intended
- * to be a temporary method to find any missing usercopy
- * whitelists.
- */
- if (usercopy_fallback &&
- offset <= cachep->object_size &&
- n <= cachep->object_size - offset) {
- usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
- return;
- }
-
usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
}
#endif /* CONFIG_HARDENED_USERCOPY */
diff --git a/mm/slab_common.c b/mm/slab_common.c
index ec2bb0beed75..e5d080a93009 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -37,14 +37,6 @@ LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
-#ifdef CONFIG_HARDENED_USERCOPY
-bool usercopy_fallback __ro_after_init =
- IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
-module_param(usercopy_fallback, bool, 0400);
-MODULE_PARM_DESC(usercopy_fallback,
- "WARN instead of reject usercopy whitelist violations");
-#endif
-
static LIST_HEAD(slab_caches_to_rcu_destroy);
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
diff --git a/mm/slub.c b/mm/slub.c
index e9a51dcf8bf9..432145d7b4ec 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4489,7 +4489,6 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
{
struct kmem_cache *s;
unsigned int offset;
- size_t object_size;
bool is_kfence = is_kfence_address(ptr);
ptr = kasan_reset_tag(ptr);
@@ -4522,19 +4521,6 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
n <= s->useroffset - offset + s->usersize)
return;
- /*
- * If the copy is still within the allocated object, produce
- * a warning instead of rejecting the copy. This is intended
- * to be a temporary method to find any missing usercopy
- * whitelists.
- */
- object_size = slab_ksize(s);
- if (usercopy_fallback &&
- offset <= object_size && n <= object_size - offset) {
- usercopy_warn("SLUB object", s->name, to_user, offset, n);
- return;
- }
-
usercopy_abort("SLUB object", s->name, to_user, offset, n);
}
#endif /* CONFIG_HARDENED_USERCOPY */
diff --git a/security/Kconfig b/security/Kconfig
index 0ced7fd33e4d..d9698900c9b7 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -163,20 +163,6 @@ config HARDENED_USERCOPY
or are part of the kernel text. This kills entire classes
of heap overflow exploits and similar kernel memory exposures.
-config HARDENED_USERCOPY_FALLBACK
- bool "Allow usercopy whitelist violations to fallback to object size"
- depends on HARDENED_USERCOPY
- default y
- help
- This is a temporary option that allows missing usercopy whitelists
- to be discovered via a WARN() to the kernel log, instead of
- rejecting the copy, falling back to non-whitelisted hardened
- usercopy that checks the slab allocation size instead of the
- whitelist size. This option will be removed once it seems like
- all missing usercopy whitelists have been identified and fixed.
- Booting with "slab_common.usercopy_fallback=Y/N" can change
- this setting.
-
config HARDENED_USERCOPY_PAGESPAN
bool "Refuse to copy allocations that span multiple pages"
depends on HARDENED_USERCOPY