From 5e6039d8a307d8411422c154f3d446b44fa32b6d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 18:00:15 -0500 Subject: uaccess: move VERIFY_{READ,WRITE} definitions to linux/uaccess.h Signed-off-by: Al Viro --- arch/mips/include/asm/uaccess.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 5347cfe15af2..a058c04b8dd4 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -71,9 +71,6 @@ extern u64 __ua_limit; #define USER_DS ((mm_segment_t) { __UA_LIMIT }) #endif -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -- cgit v1.2.3 From af1d5b37d6211c814fac0d5d0b71ec695618054a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 18:14:09 -0500 Subject: uaccess: drop duplicate includes from asm/uaccess.h Signed-off-by: Al Viro --- arch/alpha/include/asm/uaccess.h | 4 ---- arch/arc/include/asm/uaccess.h | 2 -- arch/arm/include/asm/uaccess.h | 2 -- arch/arm64/include/asm/uaccess.h | 2 -- arch/avr32/include/asm/uaccess.h | 3 --- arch/blackfin/include/asm/uaccess.h | 1 - arch/cris/include/asm/uaccess.h | 2 -- arch/frv/include/asm/uaccess.h | 1 - arch/hexagon/include/asm/uaccess.h | 1 - arch/ia64/include/asm/uaccess.h | 2 -- arch/m32r/include/asm/uaccess.h | 2 -- arch/m68k/include/asm/uaccess_mm.h | 2 -- arch/m68k/include/asm/uaccess_no.h | 1 - arch/metag/include/asm/uaccess.h | 1 - arch/microblaze/include/asm/uaccess.h | 2 -- arch/mips/include/asm/uaccess.h | 2 -- arch/mn10300/include/asm/uaccess.h | 2 -- arch/nios2/include/asm/uaccess.h | 2 -- arch/openrisc/include/asm/uaccess.h | 2 -- arch/parisc/include/asm/uaccess.h | 2 -- arch/powerpc/include/asm/uaccess.h | 2 -- arch/s390/include/asm/uaccess.h | 2 -- arch/score/include/asm/uaccess.h | 2 -- arch/sh/include/asm/uaccess.h | 2 -- arch/sparc/include/asm/uaccess_32.h | 2 -- arch/sparc/include/asm/uaccess_64.h | 2 -- arch/tile/include/asm/uaccess.h | 1 - arch/um/include/asm/uaccess.h | 1 - arch/unicore32/include/asm/uaccess.h | 3 --- arch/x86/include/asm/uaccess.h | 2 -- arch/x86/include/asm/uaccess_32.h | 2 -- arch/x86/include/asm/uaccess_64.h | 1 - arch/xtensa/include/asm/uaccess.h | 3 --- include/asm-generic/uaccess.h | 1 - include/linux/uaccess.h | 1 + 35 files changed, 1 insertion(+), 64 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index a37267a5d399..77c55ce89936 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -1,10 +1,6 @@ #ifndef __ALPHA_UACCESS_H #define __ALPHA_UACCESS_H -#include -#include - - /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 41faf17cd28d..0431f5668354 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -24,8 +24,6 @@ #ifndef _ASM_ARC_UACCESS_H #define _ASM_ARC_UACCESS_H -#include -#include #include /* for generic string functions */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index a13f39b3e9f8..9677a7cf7987 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -12,8 +12,6 @@ * User space memory access functions */ #include -#include -#include #include #include #include diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index f5e1e090b4d2..7c514e10a08e 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -28,11 +28,9 @@ #include #include #include -#include #include #include -#include #include #include diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index 1c7f234385fc..7ca5cb33369b 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -8,9 +8,6 @@ #ifndef __ASM_AVR32_UACCESS_H #define __ASM_AVR32_UACCESS_H -#include -#include - typedef struct { unsigned int is_user_space; } mm_segment_t; diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index d9a91108964f..c9fedc3be30c 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -12,7 +12,6 @@ /* * User space memory access functions */ -#include #include #include diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index 82bfcb5e2c9b..bb3004a2b2f7 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -16,8 +16,6 @@ #define _CRIS_UACCESS_H #ifndef __ASSEMBLY__ -#include -#include #include #include diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h index 9e01bd798a03..55b3a69c6c53 100644 --- a/arch/frv/include/asm/uaccess.h +++ b/arch/frv/include/asm/uaccess.h @@ -15,7 +15,6 @@ /* * User space memory access functions */ -#include #include #include #include diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h index 21f63593e2b6..3a7f818e5ef7 100644 --- a/arch/hexagon/include/asm/uaccess.h +++ b/arch/hexagon/include/asm/uaccess.h @@ -23,7 +23,6 @@ /* * User space memory access functions */ -#include #include #include #include diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index c60ff6cc8dbd..d471d1a1afd0 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -33,8 +33,6 @@ */ #include -#include -#include #include #include diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index 7d993a837e39..96b0efdb5f22 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -11,8 +11,6 @@ /* * User space memory access functions */ -#include -#include #include #include diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index fa84e9c6e8f4..fb72b710759e 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -5,9 +5,7 @@ * User space memory access functions */ #include -#include #include -#include #include /* We let the MMU do all checking */ diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h index fab489a25b95..e77ce66c14d5 100644 --- a/arch/m68k/include/asm/uaccess_no.h +++ b/arch/m68k/include/asm/uaccess_no.h @@ -4,7 +4,6 @@ /* * User space memory access functions */ -#include #include #include diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 46c1f6c54103..7fc5277ae71f 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -4,7 +4,6 @@ /* * User space memory access functions */ -#include /* * The fs value determines whether argument validity checking should be diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 70cf5f3dfae3..a3c0a06d7848 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -15,8 +15,6 @@ #ifndef __ASSEMBLY__ #include -#include -#include /* RLIMIT_FSIZE */ #include #include diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index a058c04b8dd4..dd25b312c973 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -12,8 +12,6 @@ #define _ASM_UACCESS_H #include -#include -#include #include #include #include diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 3e16850c4ccd..2da7b0fed4aa 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -14,10 +14,8 @@ /* * User space memory access functions */ -#include #include #include -#include /* * The fs value determines whether argument validity checking should be diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index 07fc68c3e23c..198bbf15f644 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -13,8 +13,6 @@ #ifndef _ASM_NIOS2_UACCESS_H #define _ASM_NIOS2_UACCESS_H -#include -#include #include #include diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 6f88cf8bd112..0b0f60444b76 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -22,8 +22,6 @@ /* * User space memory access functions */ -#include -#include #include #include #include diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 598b52e5aa03..a0b461336b6a 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -6,12 +6,10 @@ */ #include #include -#include #include #include #include -#include #define KERNEL_DS ((mm_segment_t){0}) #define USER_DS ((mm_segment_t){1}) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 81307633c33f..2ec70aa1cc5d 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -4,8 +4,6 @@ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ -#include -#include #include #include #include diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 0e1f515d239b..9e9a5e8d6cf6 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -12,8 +12,6 @@ /* * User space memory access functions */ -#include -#include #include #include diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h index 51914244e867..7a6c6982420a 100644 --- a/arch/score/include/asm/uaccess.h +++ b/arch/score/include/asm/uaccess.h @@ -2,8 +2,6 @@ #define __SCORE_UACCESS_H #include -#include -#include #include #define get_ds() (KERNEL_DS) diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 6b66d67c21d2..89a28dfbabfa 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -1,8 +1,6 @@ #ifndef __ASM_SH_UACCESS_H #define __ASM_SH_UACCESS_H -#include -#include #include #define __addr_ok(addr) \ diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index d8857f5fafad..b10f7d626f0e 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -9,9 +9,7 @@ #ifdef __KERNEL__ #include -#include #include -#include #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 619223dc9022..d76362cad80f 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -6,10 +6,8 @@ */ #ifdef __KERNEL__ -#include #include #include -#include #include #include #include diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index 730073326b46..14ea3d1ca2c7 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h @@ -18,7 +18,6 @@ /* * User space memory access functions */ -#include #include #include #include diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index 3705620ca298..076bdcb0c2ad 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -7,7 +7,6 @@ #ifndef __UM_UACCESS_H #define __UM_UACCESS_H -#include #include #define __under_task_size(addr, size) \ diff --git a/arch/unicore32/include/asm/uaccess.h b/arch/unicore32/include/asm/uaccess.h index 897e11ad8124..1622f37a0514 100644 --- a/arch/unicore32/include/asm/uaccess.h +++ b/arch/unicore32/include/asm/uaccess.h @@ -12,9 +12,6 @@ #ifndef __UNICORE_UACCESS_H__ #define __UNICORE_UACCESS_H__ -#include -#include - #include #define __copy_from_user __copy_from_user diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8dffb8b1d328..0522d88a7f90 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -3,10 +3,8 @@ /* * User space memory access functions */ -#include #include #include -#include #include #include #include diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 7d3bdd1ed697..5268ecceea96 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -4,8 +4,6 @@ /* * User space memory access functions */ -#include -#include #include #include #include diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 673059a109fe..8ddadd93639e 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -5,7 +5,6 @@ * User space memory access functions */ #include -#include #include #include #include diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index dd6b13649aad..bd8861c811ef 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -16,12 +16,9 @@ #ifndef _XTENSA_UACCESS_H #define _XTENSA_UACCESS_H -#include #include #include -#include - /* * The fs value determines whether argument validity checking should * be performed or not. If get_fs() == USER_DS, checking is diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index d7c17bfd4601..d20955e495b3 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -6,7 +6,6 @@ * on any machine that has kernel and user data in the same * address space, e.g. all NOMMU machines. */ -#include #include #include diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index b660f37beaf5..b786ca2419b4 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -2,6 +2,7 @@ #define __LINUX_UACCESS_H__ #include +#include #define VERIFY_READ 0 #define VERIFY_WRITE 1 -- cgit v1.2.3 From fc69910f329d61821897871e0e957eda39beb3d8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 8 Mar 2017 08:29:31 +0100 Subject: MIPS: Add missing include files After the split of linux/sched.h, several platforms in arch/mips stopped building. Add the respective additional #include statements to fix the problem I first tried adding these into asm/processor.h, but ran into circular header dependencies with that which I could not figure out. The commit I listed as causing the problem is the branch merge, as there is likely a combination of multiple patches in that branch. Signed-off-by: Arnd Bergmann Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mips@linux-mips.org Cc: ralf@linux-mips.org Fixes: 1827adb11ad2 ("Merge branch 'WIP.sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip") Link: http://lkml.kernel.org/r/20170308072931.3836696-1-arnd@arndb.de Signed-off-by: Ingo Molnar --- arch/mips/cavium-octeon/cpu.c | 2 ++ arch/mips/cavium-octeon/crypto/octeon-crypto.c | 1 + arch/mips/cavium-octeon/smp.c | 1 + arch/mips/include/asm/fpu.h | 1 + arch/mips/kernel/smp-bmips.c | 1 + arch/mips/kernel/smp-mt.c | 1 + arch/mips/loongson64/loongson-3/cop2-ex.c | 1 + arch/mips/netlogic/common/smp.c | 1 + arch/mips/netlogic/xlp/cop2-ex.c | 3 +++ arch/mips/sgi-ip22/ip28-berr.c | 1 + arch/mips/sgi-ip27/ip27-berr.c | 2 ++ arch/mips/sgi-ip27/ip27-smp.c | 3 +++ arch/mips/sgi-ip32/ip32-berr.c | 1 + arch/mips/sgi-ip32/ip32-reset.c | 1 + 14 files changed, 20 insertions(+) (limited to 'arch/mips/include') diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c index a5b427909b5c..036d56cc4591 100644 --- a/arch/mips/cavium-octeon/cpu.c +++ b/arch/mips/cavium-octeon/cpu.c @@ -10,7 +10,9 @@ #include #include #include +#include #include +#include #include #include diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c index 4d22365844af..cfb4a146cf17 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-crypto.c +++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "octeon-crypto.h" diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 4b94b7fbafa3..3de786545ded 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 321752bcbab6..f94455f964ec 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -12,6 +12,7 @@ #include #include +#include #include #include diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 3daa2cae50b0..1b070a76fcdd 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index e077ea3e11fb..e398cbc3d776 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/loongson64/loongson-3/cop2-ex.c b/arch/mips/loongson64/loongson-3/cop2-ex.c index ea13764d0a03..621d6af5f6eb 100644 --- a/arch/mips/loongson64/loongson-3/cop2-ex.c +++ b/arch/mips/loongson64/loongson-3/cop2-ex.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index 10d86d54880a..bddf1ef553a4 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/netlogic/xlp/cop2-ex.c b/arch/mips/netlogic/xlp/cop2-ex.c index 52bc5de42005..21e439b3db70 100644 --- a/arch/mips/netlogic/xlp/cop2-ex.c +++ b/arch/mips/netlogic/xlp/cop2-ex.c @@ -9,11 +9,14 @@ * Copyright (C) 2009 Wind River Systems, * written by Ralf Baechle */ +#include #include #include #include #include +#include #include +#include #include #include diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c index 1f2a5bc4779e..75460e1e106b 100644 --- a/arch/mips/sgi-ip22/ip28-berr.c +++ b/arch/mips/sgi-ip22/ip28-berr.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c index d12879eb2b1f..83efe03d5c60 100644 --- a/arch/mips/sgi-ip27/ip27-berr.c +++ b/arch/mips/sgi-ip27/ip27-berr.c @@ -12,7 +12,9 @@ #include /* for SIGBUS */ #include /* schow_regs(), force_sig() */ #include +#include +#include #include #include #include diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index f5ed45e8f442..4cd47d23d81a 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -8,10 +8,13 @@ */ #include #include +#include #include #include + #include #include +#include #include #include #include diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c index 57d8c7486fe6..c1f12a9cf305 100644 --- a/arch/mips/sgi-ip32/ip32-berr.c +++ b/arch/mips/sgi-ip32/ip32-berr.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c index 8bd415c8729f..b3b442def423 100644 --- a/arch/mips/sgi-ip32/ip32-reset.c +++ b/arch/mips/sgi-ip32/ip32-reset.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From 9849a5697d3defb2087cb6b9be5573a142697889 Mon Sep 17 00:00:00 2001 From: Kirill A. Shutemov Date: Thu, 9 Mar 2017 17:24:05 +0300 Subject: arch, mm: convert all architectures to use 5level-fixup.h If an architecture uses 4level-fixup.h we don't need to do anything as it includes 5level-fixup.h. If an architecture uses pgtable-nop*d.h, define __ARCH_USE_5LEVEL_HACK before inclusion of the header. It makes asm-generic code to use 5level-fixup.h. If an architecture has 4-level paging or folds levels on its own, include 5level-fixup.h directly. Signed-off-by: Kirill A. Shutemov Acked-by: Michal Hocko Signed-off-by: Linus Torvalds --- arch/arc/include/asm/hugepage.h | 1 + arch/arc/include/asm/pgtable.h | 1 + arch/arm/include/asm/pgtable.h | 1 + arch/arm64/include/asm/pgtable-types.h | 4 ++++ arch/avr32/include/asm/pgtable-2level.h | 1 + arch/cris/include/asm/pgtable.h | 1 + arch/frv/include/asm/pgtable.h | 1 + arch/h8300/include/asm/pgtable.h | 1 + arch/hexagon/include/asm/pgtable.h | 1 + arch/ia64/include/asm/pgtable.h | 2 ++ arch/metag/include/asm/pgtable.h | 1 + arch/microblaze/include/asm/page.h | 3 ++- arch/mips/include/asm/pgtable-32.h | 1 + arch/mips/include/asm/pgtable-64.h | 1 + arch/mn10300/include/asm/page.h | 1 + arch/nios2/include/asm/pgtable.h | 1 + arch/openrisc/include/asm/pgtable.h | 1 + arch/powerpc/include/asm/book3s/32/pgtable.h | 1 + arch/powerpc/include/asm/book3s/64/pgtable.h | 3 +++ arch/powerpc/include/asm/nohash/32/pgtable.h | 1 + arch/powerpc/include/asm/nohash/64/pgtable-4k.h | 3 +++ arch/powerpc/include/asm/nohash/64/pgtable-64k.h | 1 + arch/s390/include/asm/pgtable.h | 1 + arch/score/include/asm/pgtable.h | 1 + arch/sh/include/asm/pgtable-2level.h | 1 + arch/sh/include/asm/pgtable-3level.h | 1 + arch/sparc/include/asm/pgtable_64.h | 1 + arch/tile/include/asm/pgtable_32.h | 1 + arch/tile/include/asm/pgtable_64.h | 1 + arch/um/include/asm/pgtable-2level.h | 1 + arch/um/include/asm/pgtable-3level.h | 1 + arch/unicore32/include/asm/pgtable.h | 1 + arch/x86/include/asm/pgtable_types.h | 4 ++++ arch/xtensa/include/asm/pgtable.h | 1 + 34 files changed, 46 insertions(+), 1 deletion(-) (limited to 'arch/mips/include') diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h index 317ff773e1ca..b18fcb606908 100644 --- a/arch/arc/include/asm/hugepage.h +++ b/arch/arc/include/asm/hugepage.h @@ -11,6 +11,7 @@ #define _ASM_ARC_HUGEPAGE_H #include +#define __ARCH_USE_5LEVEL_HACK #include static inline pte_t pmd_pte(pmd_t pmd) diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index e94ca72b974e..ee22d40afef4 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -37,6 +37,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index a8d656d9aec7..1c462381c225 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -20,6 +20,7 @@ #else +#define __ARCH_USE_5LEVEL_HACK #include #include #include diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h index 69b2fd41503c..345a072b5856 100644 --- a/arch/arm64/include/asm/pgtable-types.h +++ b/arch/arm64/include/asm/pgtable-types.h @@ -55,9 +55,13 @@ typedef struct { pteval_t pgprot; } pgprot_t; #define __pgprot(x) ((pgprot_t) { (x) } ) #if CONFIG_PGTABLE_LEVELS == 2 +#define __ARCH_USE_5LEVEL_HACK #include #elif CONFIG_PGTABLE_LEVELS == 3 +#define __ARCH_USE_5LEVEL_HACK #include +#elif CONFIG_PGTABLE_LEVELS == 4 +#include #endif #endif /* __ASM_PGTABLE_TYPES_H */ diff --git a/arch/avr32/include/asm/pgtable-2level.h b/arch/avr32/include/asm/pgtable-2level.h index 425dd567b5b9..d5b1c63993ec 100644 --- a/arch/avr32/include/asm/pgtable-2level.h +++ b/arch/avr32/include/asm/pgtable-2level.h @@ -8,6 +8,7 @@ #ifndef __ASM_AVR32_PGTABLE_2LEVEL_H #define __ASM_AVR32_PGTABLE_2LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h index 2a3210ba4c72..fa3a73004cc5 100644 --- a/arch/cris/include/asm/pgtable.h +++ b/arch/cris/include/asm/pgtable.h @@ -6,6 +6,7 @@ #define _CRIS_PGTABLE_H #include +#define __ARCH_USE_5LEVEL_HACK #include #ifndef __ASSEMBLY__ diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index a0513d463a1f..ab6e7e961b54 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -16,6 +16,7 @@ #ifndef _ASM_PGTABLE_H #define _ASM_PGTABLE_H +#include #include #include #include diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h index 8341db67821d..7d265d28ba5e 100644 --- a/arch/h8300/include/asm/pgtable.h +++ b/arch/h8300/include/asm/pgtable.h @@ -1,5 +1,6 @@ #ifndef _H8300_PGTABLE_H #define _H8300_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #include #define pgtable_cache_init() do { } while (0) diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h index 49eab8136ec3..24a9177fb897 100644 --- a/arch/hexagon/include/asm/pgtable.h +++ b/arch/hexagon/include/asm/pgtable.h @@ -26,6 +26,7 @@ */ #include #include +#define __ARCH_USE_5LEVEL_HACK #include /* A handy thing to have if one has the RAM. Declared in head.S */ diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 384794e665fc..6cc22c8d8923 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -587,8 +587,10 @@ extern struct page *zero_page_memmap_ptr; #if CONFIG_PGTABLE_LEVELS == 3 +#define __ARCH_USE_5LEVEL_HACK #include #endif +#include #include #endif /* _ASM_IA64_PGTABLE_H */ diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h index ffa3a3a2ecad..0c151e5af079 100644 --- a/arch/metag/include/asm/pgtable.h +++ b/arch/metag/include/asm/pgtable.h @@ -6,6 +6,7 @@ #define _METAG_PGTABLE_H #include +#define __ARCH_USE_5LEVEL_HACK #include /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index fd850879854d..d506bb0893f9 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -95,7 +95,8 @@ typedef struct { unsigned long pgd; } pgd_t; # else /* CONFIG_MMU */ typedef struct { unsigned long ste[64]; } pmd_t; typedef struct { pmd_t pue[1]; } pud_t; -typedef struct { pud_t pge[1]; } pgd_t; +typedef struct { pud_t p4e[1]; } p4d_t; +typedef struct { p4d_t pge[1]; } pgd_t; # endif /* CONFIG_MMU */ # define pte_val(x) ((x).pte) diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index d21f3da7bdb6..6f94bed571c4 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -16,6 +16,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #include extern int temp_tlb_entry; diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 514cbc0a6a67..130a2a6c1531 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -17,6 +17,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48) #include #else diff --git a/arch/mn10300/include/asm/page.h b/arch/mn10300/include/asm/page.h index 3810a6f740fd..dfe730a5ede0 100644 --- a/arch/mn10300/include/asm/page.h +++ b/arch/mn10300/include/asm/page.h @@ -57,6 +57,7 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) }) +#define __ARCH_USE_5LEVEL_HACK #include #endif /* !__ASSEMBLY__ */ diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 298393c3cb42..db4f7d179220 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -22,6 +22,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #include #define FIRST_USER_ADDRESS 0UL diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index 3567aa7be555..ff97374ca069 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h @@ -25,6 +25,7 @@ #ifndef __ASM_OPENRISC_PGTABLE_H #define __ASM_OPENRISC_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 012223638815..26ed228d4dc6 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -1,6 +1,7 @@ #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 1eeeb72c7015..13c39b6d5d64 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1,9 +1,12 @@ #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ +#include + #ifndef __ASSEMBLY__ #include #endif + /* * Common bits between hash and Radix page table */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index ba9921bf202e..5134ade2e850 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -1,6 +1,7 @@ #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H #define _ASM_POWERPC_NOHASH_32_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h index d0db98793dd8..9f4de0a1035e 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h @@ -1,5 +1,8 @@ #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H #define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H + +#include + /* * Entries per page directory level. The PTE level must use a 64b record * for each page table entry. The PMD and PGD level use a 32b record for diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h index 55b28ef3409a..1facb584dd29 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h @@ -1,6 +1,7 @@ #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H #define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H +#define __ARCH_USE_5LEVEL_HACK #include diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 7ed1972b1920..93e37b12e882 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -24,6 +24,7 @@ * the S390 page table tree. */ #ifndef __ASSEMBLY__ +#include #include #include #include diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index 0553e5cd5985..46ff8fd678a7 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -2,6 +2,7 @@ #define _ASM_SCORE_PGTABLE_H #include +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h index 19bd89db17e7..f75cf4387257 100644 --- a/arch/sh/include/asm/pgtable-2level.h +++ b/arch/sh/include/asm/pgtable-2level.h @@ -1,6 +1,7 @@ #ifndef __ASM_SH_PGTABLE_2LEVEL_H #define __ASM_SH_PGTABLE_2LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h index 249a985d9648..9b1e776eca31 100644 --- a/arch/sh/include/asm/pgtable-3level.h +++ b/arch/sh/include/asm/pgtable-3level.h @@ -1,6 +1,7 @@ #ifndef __ASM_SH_PGTABLE_3LEVEL_H #define __ASM_SH_PGTABLE_3LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 56e49c8f770d..8a598528ec1f 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -12,6 +12,7 @@ * the SpitFire page tables. */ +#include #include #include #include diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h index d26a42279036..5f8c615cb5e9 100644 --- a/arch/tile/include/asm/pgtable_32.h +++ b/arch/tile/include/asm/pgtable_32.h @@ -74,6 +74,7 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */; #define MAXMEM (_VMALLOC_START - PAGE_OFFSET) /* We have no pmd or pud since we are strictly a two-level page table */ +#define __ARCH_USE_5LEVEL_HACK #include static inline int pud_huge_page(pud_t pud) { return 0; } diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h index e96cec52f6d8..96fe58b45118 100644 --- a/arch/tile/include/asm/pgtable_64.h +++ b/arch/tile/include/asm/pgtable_64.h @@ -59,6 +59,7 @@ #ifndef __ASSEMBLY__ /* We have no pud since we are a three-level page table. */ +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h index cfbe59752469..179c0ea87a0c 100644 --- a/arch/um/include/asm/pgtable-2level.h +++ b/arch/um/include/asm/pgtable-2level.h @@ -8,6 +8,7 @@ #ifndef __UM_PGTABLE_2LEVEL_H #define __UM_PGTABLE_2LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* PGDIR_SHIFT determines what a third-level page table entry can map */ diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h index bae8523a162f..c4d876dfb9ac 100644 --- a/arch/um/include/asm/pgtable-3level.h +++ b/arch/um/include/asm/pgtable-3level.h @@ -7,6 +7,7 @@ #ifndef __UM_PGTABLE_3LEVEL_H #define __UM_PGTABLE_3LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* PGDIR_SHIFT determines what a third-level page table entry can map */ diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h index 818d0f5598e3..a4f2bef37e70 100644 --- a/arch/unicore32/include/asm/pgtable.h +++ b/arch/unicore32/include/asm/pgtable.h @@ -12,6 +12,7 @@ #ifndef __UNICORE_PGTABLE_H__ #define __UNICORE_PGTABLE_H__ +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 8b4de22d6429..62484333673d 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -273,6 +273,8 @@ static inline pgdval_t pgd_flags(pgd_t pgd) } #if CONFIG_PGTABLE_LEVELS > 3 +#include + typedef struct { pudval_t pud; } pud_t; static inline pud_t native_make_pud(pmdval_t val) @@ -285,6 +287,7 @@ static inline pudval_t native_pud_val(pud_t pud) return pud.pud; } #else +#define __ARCH_USE_5LEVEL_HACK #include static inline pudval_t native_pud_val(pud_t pud) @@ -306,6 +309,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) return pmd.pmd; } #else +#define __ARCH_USE_5LEVEL_HACK #include static inline pmdval_t native_pmd_val(pmd_t pmd) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 8aa0e0d9cbb2..30dd5b2e4ad5 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -11,6 +11,7 @@ #ifndef _XTENSA_PGTABLE_H #define _XTENSA_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #include #include -- cgit v1.2.3 From db68ce10c4f0a27c1ff9fa0e789e5c41f8c4ea63 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 21:08:07 -0400 Subject: new helper: uaccess_kernel() Signed-off-by: Al Viro --- arch/alpha/include/asm/uaccess.h | 2 +- arch/arc/include/asm/uaccess.h | 2 +- arch/arm/include/asm/uaccess.h | 2 +- arch/arm/lib/uaccess_with_memcpy.c | 4 ++-- arch/blackfin/kernel/process.c | 2 +- arch/c6x/kernel/sys_c6x.c | 2 +- arch/cris/include/asm/uaccess.h | 2 +- arch/m68k/include/asm/uaccess_mm.h | 2 +- arch/metag/include/asm/uaccess.h | 2 +- arch/mips/include/asm/checksum.h | 4 ++-- arch/mips/include/asm/r4kcache.h | 4 ++-- arch/mips/include/asm/uaccess.h | 2 +- arch/mips/kernel/unaligned.c | 10 +++++----- arch/openrisc/include/asm/uaccess.h | 2 +- arch/parisc/include/asm/futex.h | 2 +- arch/parisc/lib/memcpy.c | 2 +- arch/s390/include/asm/uaccess.h | 2 +- arch/sparc/include/asm/uaccess.h | 2 +- arch/sparc/include/asm/uaccess_32.h | 2 +- arch/um/include/asm/uaccess.h | 2 +- arch/um/kernel/skas/uaccess.c | 10 +++++----- arch/unicore32/include/asm/uaccess.h | 2 +- arch/unicore32/kernel/process.c | 2 +- arch/xtensa/include/asm/uaccess.h | 2 +- block/bsg.c | 2 +- drivers/scsi/sg.c | 2 +- include/linux/uaccess.h | 2 ++ include/rdma/ib.h | 2 +- kernel/trace/bpf_trace.c | 2 +- lib/iov_iter.c | 2 +- mm/memory.c | 2 +- security/tomoyo/network.c | 2 +- 32 files changed, 44 insertions(+), 42 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 77c55ce89936..b2f62b98e290 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -425,7 +425,7 @@ clear_user(void __user *to, long len) #undef __module_call #define user_addr_max() \ - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + (uaccess_kernel() ? ~0UL : TASK_SIZE) extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strlen_user(const char __user *str); diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index d837a53c6e59..ffd14e630aca 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -27,7 +27,7 @@ #include /* for generic string functions */ -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) /* * Algorithmically, for __user_ok() we want do: diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 9677a7cf7987..b63527359d52 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -266,7 +266,7 @@ static inline void set_fs(mm_segment_t fs) #define access_ok(type, addr, size) (__range_ok(addr, size) == 0) #define user_addr_max() \ - (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs()) + (uaccess_kernel() ? ~0UL : get_fs()) /* * The "__xxx" versions of the user access functions do not verify the diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 6bd1089b07e0..9b4ed1728616 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -90,7 +90,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) unsigned long ua_flags; int atomic; - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { + if (uaccess_kernel()) { memcpy((void *)to, from, n); return 0; } @@ -162,7 +162,7 @@ __clear_user_memset(void __user *addr, unsigned long n) { unsigned long ua_flags; - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { + if (uaccess_kernel()) { memset((void *)addr, 0, n); return 0; } diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 89d5162d4ca6..89814850b08b 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -370,7 +370,7 @@ int _access_ok(unsigned long addr, unsigned long size) /* Check that things do not wrap around */ if (addr > ULONG_MAX - size) return 0; - if (segment_eq(get_fs(), KERNEL_DS)) + if (uaccess_kernel()) return 1; #ifdef CONFIG_MTD_UCLINUX if (1) diff --git a/arch/c6x/kernel/sys_c6x.c b/arch/c6x/kernel/sys_c6x.c index 3e9bdfbee8ad..a742ae259239 100644 --- a/arch/c6x/kernel/sys_c6x.c +++ b/arch/c6x/kernel/sys_c6x.c @@ -23,7 +23,7 @@ int _access_ok(unsigned long addr, unsigned long size) if (!addr || addr > (0xffffffffUL - (size - 1))) goto _bad_access; - if (segment_eq(get_fs(), KERNEL_DS)) + if (uaccess_kernel()) return 1; if (memory_start <= addr && (addr + size - 1) < memory_end) diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index c2462ef04eaf..5f5b8f53d2d7 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -43,7 +43,7 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) \ (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index fb72b710759e..14054a4e4216 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -375,7 +375,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) #define copy_to_user(to, from, n) __copy_to_user(to, from, n) #define user_addr_max() \ - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + (uaccess_kernel() ? ~0UL : TASK_SIZE) extern long strncpy_from_user(char *dst, const char __user *src, long count); extern __must_check long strlen_user(const char __user *str); diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 7fc5277ae71f..83de7554d2b3 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -24,7 +24,7 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) /* * Explicitly allow NULL pointers here. Parts of the kernel such * as readv/writev use access_ok to validate pointers, but want diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index c8b574f7e0cc..77cad232a1c6 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -50,7 +50,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { might_fault(); - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) return __csum_partial_copy_kernel((__force void *)src, dst, len, sum, err_ptr); else @@ -82,7 +82,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, { might_fault(); if (access_ok(VERIFY_WRITE, dst, len)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) return __csum_partial_copy_kernel(src, (__force void *)dst, len, sum, err_ptr); diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 55fd94e6cd0b..7f12d7e27c94 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -20,7 +20,7 @@ #include #include #include -#include /* for segment_eq() */ +#include /* for uaccess_kernel() */ extern void (*r4k_blast_dcache)(void); extern void (*r4k_blast_icache)(void); @@ -714,7 +714,7 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ \ __##pfx##flush_prologue \ \ - if (segment_eq(get_fs(), USER_DS)) { \ + if (!uaccess_kernel()) { \ while (1) { \ protected_cachee_op(hitop, addr); \ if (addr == aend) \ diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index dd25b312c973..70ca8eee166a 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -88,7 +88,7 @@ static inline bool eva_kernel_access(void) if (!IS_ENABLED(CONFIG_EVA)) return false; - return segment_eq(get_fs(), get_ds()); + return uaccess_kernel(); } /* diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 7ed98354fe9d..f806ee56e639 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -1026,7 +1026,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) LoadHW(addr, value, res); else LoadHWE(addr, value, res); @@ -1045,7 +1045,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) LoadW(addr, value, res); else LoadWE(addr, value, res); @@ -1064,7 +1064,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) LoadHWU(addr, value, res); else LoadHWUE(addr, value, res); @@ -1132,7 +1132,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, value = regs->regs[insn.i_format.rt]; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) StoreHW(addr, value, res); else StoreHWE(addr, value, res); @@ -1152,7 +1152,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, value = regs->regs[insn.i_format.rt]; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) StoreW(addr, value, res); else StoreWE(addr, value, res); diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 0b0f60444b76..227af1acb8bd 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -292,7 +292,7 @@ clear_user(void *addr, unsigned long size) } #define user_addr_max() \ - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + (uaccess_kernel() ? ~0UL : TASK_SIZE) extern long strncpy_from_user(char *dest, const char __user *src, long count); diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index ac8bd586ace8..0ba14300cd8e 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -109,7 +109,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is * our gateway page, and causes no end of trouble... */ - if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) + if (uaccess_kernel() && !uaddr) return -EFAULT; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index f82ff10ed974..66f5160136c2 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -76,7 +76,7 @@ DECLARE_PER_CPU(struct exception_data, exception_data); goto label; \ } while (0) -#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) +#define get_user_space() (uaccess_kernel() ? 0 : mfsp(3)) #define get_kernel_space() (0) #define MERGE(w0, sh_1, w1, sh_2) ({ \ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 9e9a5e8d6cf6..7228ed8da67d 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -37,7 +37,7 @@ static inline void set_fs(mm_segment_t fs) { current->thread.mm_segment = fs; - if (segment_eq(fs, KERNEL_DS)) { + if (uaccess_kernel()) { set_cpu_flag(CIF_ASCE_SECONDARY); __ctl_load(S390_lowcore.kernel_asce, 7, 7); } else { diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h index bd56c28fff9f..9e068bf9060a 100644 --- a/arch/sparc/include/asm/uaccess.h +++ b/arch/sparc/include/asm/uaccess.h @@ -7,7 +7,7 @@ #endif #define user_addr_max() \ - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + (uaccess_kernel() ? ~0UL : TASK_SIZE) long strncpy_from_user(char *dest, const char __user *src, long count); diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 952d512a64f2..a59a1e81986d 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -36,7 +36,7 @@ * large size and address near to PAGE_OFFSET - a fault will break his intentions. */ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) #define access_ok(type, addr, size) \ ({ (void)(type); __access_ok((unsigned long)(addr), size); }) diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index 076bdcb0c2ad..e992bb57da5a 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -45,7 +45,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size) return __addr_range_nowrap(addr, size) && (__under_task_size(addr, size) || __access_ok_vsyscall(addr, size) || - segment_eq(get_fs(), KERNEL_DS)); + uaccess_kernel()); } #endif diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c index 85ac8adb069b..22c9f79db8e6 100644 --- a/arch/um/kernel/skas/uaccess.c +++ b/arch/um/kernel/skas/uaccess.c @@ -141,7 +141,7 @@ static int copy_chunk_from_user(unsigned long from, int len, void *arg) long __copy_from_user(void *to, const void __user *from, unsigned long n) { - if (segment_eq(get_fs(), KERNEL_DS)) { + if (uaccess_kernel()) { memcpy(to, (__force void*)from, n); return 0; } @@ -161,7 +161,7 @@ static int copy_chunk_to_user(unsigned long to, int len, void *arg) long __copy_to_user(void __user *to, const void *from, unsigned long n) { - if (segment_eq(get_fs(), KERNEL_DS)) { + if (uaccess_kernel()) { memcpy((__force void *) to, from, n); return 0; } @@ -189,7 +189,7 @@ long __strncpy_from_user(char *dst, const char __user *src, long count) long n; char *ptr = dst; - if (segment_eq(get_fs(), KERNEL_DS)) { + if (uaccess_kernel()) { strncpy(dst, (__force void *) src, count); return strnlen(dst, count); } @@ -210,7 +210,7 @@ static int clear_chunk(unsigned long addr, int len, void *unused) unsigned long __clear_user(void __user *mem, unsigned long len) { - if (segment_eq(get_fs(), KERNEL_DS)) { + if (uaccess_kernel()) { memset((__force void*)mem, 0, len); return 0; } @@ -235,7 +235,7 @@ long __strnlen_user(const void __user *str, long len) { int count = 0, n; - if (segment_eq(get_fs(), KERNEL_DS)) + if (uaccess_kernel()) return strnlen((__force char*)str, len) + 1; n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count); diff --git a/arch/unicore32/include/asm/uaccess.h b/arch/unicore32/include/asm/uaccess.h index f60fab718b59..1196c88c2f9b 100644 --- a/arch/unicore32/include/asm/uaccess.h +++ b/arch/unicore32/include/asm/uaccess.h @@ -20,7 +20,7 @@ #define __strnlen_user __strnlen_user #define __clear_user __clear_user -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) (((size) <= TASK_SIZE) \ && ((addr) <= TASK_SIZE - (size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index d22c1dc7e39e..ddaf78ae6854 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c @@ -178,7 +178,7 @@ void __show_regs(struct pt_regs *regs) buf, interrupts_enabled(regs) ? "n" : "ff", fast_interrupts_enabled(regs) ? "n" : "ff", processor_modes[processor_mode(regs)], - segment_eq(get_fs(), get_ds()) ? "kernel" : "user"); + uaccess_kernel() ? "kernel" : "user"); { unsigned int ctrl; diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index bd8861c811ef..26512692e28f 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -37,7 +37,7 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) \ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) diff --git a/block/bsg.c b/block/bsg.c index 74835dbf0c47..69ccb7801a75 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -650,7 +650,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) dprintk("%s: write %zd bytes\n", bd->name, count); - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) + if (unlikely(uaccess_kernel())) return -EINVAL; bsg_set_block(bd, file); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 29b86505f796..5d9136a345ec 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -581,7 +581,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) + if (unlikely(uaccess_kernel())) return -EINVAL; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index b786ca2419b4..9c3ae8706e9d 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -7,6 +7,8 @@ #define VERIFY_READ 0 #define VERIFY_WRITE 1 +#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) + #include static __always_inline void pagefault_disabled_inc(void) diff --git a/include/rdma/ib.h b/include/rdma/ib.h index 9b4c22a36931..66dbed0c146d 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -100,7 +100,7 @@ struct sockaddr_ib { */ static inline bool ib_safe_file_access(struct file *filp) { - return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS); + return filp->f_cred == current_cred() && !uaccess_kernel(); } #endif /* _RDMA_IB_H */ diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index cee9802cf3e0..f806dbd66de9 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -96,7 +96,7 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, if (unlikely(in_interrupt() || current->flags & (PF_KTHREAD | PF_EXITING))) return -EPERM; - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) + if (unlikely(uaccess_kernel())) return -EPERM; if (!access_ok(VERIFY_WRITE, unsafe_ptr, size)) return -EPERM; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index e68604ae3ced..97db876c6862 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction, size_t count) { /* It will get better. Eventually... */ - if (segment_eq(get_fs(), KERNEL_DS)) { + if (uaccess_kernel()) { direction |= ITER_KVEC; i->type = direction; i->kvec = (struct kvec *)iov; diff --git a/mm/memory.c b/mm/memory.c index a97a4cec2e1f..e8f4e10e770a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4136,7 +4136,7 @@ void __might_fault(const char *file, int line) * get paged out, therefore we'll never actually fault, and the * below annotations will generate false positives. */ - if (segment_eq(get_fs(), KERNEL_DS)) + if (uaccess_kernel()) return; if (pagefault_disabled()) return; diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c index 97527710a72a..6c02ac478247 100644 --- a/security/tomoyo/network.c +++ b/security/tomoyo/network.c @@ -608,7 +608,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr, static bool tomoyo_kernel_service(void) { /* Nothing to do if I am a kernel service. */ - return segment_eq(get_fs(), KERNEL_DS); + return uaccess_kernel(); } /** -- cgit v1.2.3 From f0a955f4eeec0f16bdbdd0fb15d8ec0937d1de23 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 10:10:53 -0500 Subject: mips: sanitize __access_ok() for one thing, the last argument is always __access_mask and had been such since 2.4.0-test3pre8; for another, it can bloody well be a static inline - -O2 or -Os, __builtin_constant_p() propagates through static inline calls. Signed-off-by: Al Viro --- arch/mips/include/asm/uaccess.h | 21 ++++++--------------- arch/mips/kernel/mips-r2-to-r6-emul.c | 24 ++++++++++++------------ arch/mips/kernel/syscall.c | 2 +- arch/mips/oprofile/backtrace.c | 2 +- 4 files changed, 20 insertions(+), 29 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 70ca8eee166a..0c59911349ce 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -128,23 +128,14 @@ static inline bool eva_kernel_access(void) * this function, memory access functions may still return -EFAULT. */ -#define __access_mask get_fs().seg - -#define __access_ok(addr, size, mask) \ -({ \ - unsigned long __addr = (unsigned long) (addr); \ - unsigned long __size = size; \ - unsigned long __mask = mask; \ - unsigned long __ok; \ - \ - __chk_user_ptr(addr); \ - __ok = (signed long)(__mask & (__addr | (__addr + __size) | \ - __ua_size(__size))); \ - __ok == 0; \ -}) +static inline int __access_ok(const void __user *p, unsigned long size) +{ + unsigned long addr = (unsigned long)p; + return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0; +} #define access_ok(type, addr, size) \ - likely(__access_ok((addr), (size), __access_mask)) + likely(__access_ok((addr), (size))) /* * put_user: - Write a simple value into user space. diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index d8f1cf1ec370..550e7d03090a 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -1200,7 +1200,7 @@ fpu_emul: case lwl_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 4)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1273,7 +1273,7 @@ fpu_emul: case lwr_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 4)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1347,7 +1347,7 @@ fpu_emul: case swl_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1417,7 +1417,7 @@ fpu_emul: case swr_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1492,7 +1492,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 8)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1611,7 +1611,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 8)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1730,7 +1730,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1848,7 +1848,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1965,7 +1965,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_READ, vaddr, 4)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2021,7 +2021,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2084,7 +2084,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_READ, vaddr, 8)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2145,7 +2145,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index f1d17ece4181..1dfa7f5796c7 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -98,7 +98,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) if (unlikely(addr & 3)) return -EINVAL; - if (unlikely(!access_ok(VERIFY_WRITE, addr, 4))) + if (unlikely(!access_ok(VERIFY_WRITE, (const void __user *)addr, 4))) return -EINVAL; if (cpu_has_llsc && R10000_LLSC_WAR) { diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c index 5e645c9a3162..16ace558cd9d 100644 --- a/arch/mips/oprofile/backtrace.c +++ b/arch/mips/oprofile/backtrace.c @@ -18,7 +18,7 @@ struct stackframe { static inline int get_mem(unsigned long addr, unsigned long *result) { unsigned long *address = (unsigned long *) addr; - if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long))) + if (!access_ok(VERIFY_READ, address, sizeof(unsigned long))) return -1; if (__copy_from_user_inatomic(result, address, sizeof(unsigned long))) return -3; -- cgit v1.2.3 From c12a1d7ad916105fc5f7809f9273ea4fe7ce4b28 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 11:40:15 -0400 Subject: mips: consolidate __invoke_... wrappers Signed-off-by: Al Viro --- arch/mips/include/asm/uaccess.h | 144 ++++++++++++---------------------------- 1 file changed, 44 insertions(+), 100 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 0c59911349ce..0cd0accdd0fa 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -797,8 +797,30 @@ extern void __put_user_unaligned_unknown(void); extern size_t __copy_user(void *__to, const void *__from, size_t __n); -#ifndef CONFIG_EVA -#define __invoke_copy_to_user(to, from, n) \ +#define __invoke_copy_from(func, to, from, n) \ +({ \ + register void *__cu_to_r __asm__("$4"); \ + register const void __user *__cu_from_r __asm__("$5"); \ + register long __cu_len_r __asm__("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + ".set\tnoreorder\n\t" \ + __MODULE_JAL(func) \ + ".set\tnoat\n\t" \ + __UA_ADDU "\t$1, %1, %2\n\t" \ + ".set\tat\n\t" \ + ".set\treorder" \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ + DADDI_SCRATCH, "memory"); \ + __cu_len_r; \ +}) + +#define __invoke_copy_to(func, to, from, n) \ ({ \ register void __user *__cu_to_r __asm__("$4"); \ register const void *__cu_from_r __asm__("$5"); \ @@ -808,7 +830,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_from_r = (from); \ __cu_len_r = (n); \ __asm__ __volatile__( \ - __MODULE_JAL(__copy_user) \ + __MODULE_JAL(func) \ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ @@ -816,8 +838,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_len_r; \ }) +#ifndef CONFIG_EVA +#define __invoke_copy_to_user(to, from, n) \ + __invoke_copy_to(__copy_user, to, from, n) + #define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to_user(to, from, n) + __invoke_copy_to(__copy_user, to, from, n) #endif @@ -948,64 +974,24 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); #ifndef CONFIG_EVA #define __invoke_copy_from_user(to, from, n) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(__copy_user) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) + __invoke_copy_from(__copy_user, to, from, n) #define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from_user(to, from, n) + __invoke_copy_from(__copy_user, to, from, n) /* For userland <-> userland operations */ #define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from_user(to, from, n) + __invoke_copy_from(__copy_user, to, from, n) /* For kernel <-> kernel operations */ #define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from_user(to, from, n) + __invoke_copy_from(__copy_user, to, from, n) #define __invoke_copy_from_user_inatomic(to, from, n) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(__copy_user_inatomic) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) + __invoke_copy_from(__copy_user_inatomic, to, from, n) #define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from_user_inatomic(to, from, n) \ + __invoke_copy_from(__copy_user_inatomic, to, from, n) #else @@ -1019,79 +1005,37 @@ extern size_t __copy_to_user_eva(void *__to, const void *__from, size_t __n); extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); -#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(func_ptr) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - __MODULE_JAL(func_ptr) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - /* * Source or destination address is in userland. We need to go through * the TLB */ #define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva) + __invoke_copy_from(__copy_from_user_eva, to, from, n) #define __invoke_copy_from_user_inatomic(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, \ - __copy_user_inatomic_eva) + __invoke_copy_from(__copy_user_inatomic_eva, to, from, n) #define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva) + __invoke_copy_to(__copy_to_user_eva, to, from, n) #define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva) + __invoke_copy_from(__copy_in_user_eva, to, from, n) /* * Source or destination address in the kernel. We are not going through * the TLB */ #define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) + __invoke_copy_from(__copy_user, to, from, n) #define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic) + __invoke_copy_from(__copy_user_inatomic, to, from, n) #define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to_user_eva_generic(to, from, n, __copy_user) + __invoke_copy_to(__copy_user, to, from, n) #define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) + __invoke_copy_from(__copy_user, to, from, n) #endif /* CONFIG_EVA */ -- cgit v1.2.3 From b0bb945c14e81b3b33075d59469d5bf5b736a4fb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 11:51:34 -0400 Subject: mips: clean and reorder the forest of macros... Signed-off-by: Al Viro --- arch/mips/include/asm/uaccess.h | 120 ++++++++++++++++------------------------ 1 file changed, 49 insertions(+), 71 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 0cd0accdd0fa..9c05262a85f3 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -838,14 +838,60 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_len_r; \ }) +#define __invoke_copy_from_kernel(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) + +#define __invoke_copy_from_kernel_inatomic(to, from, n) \ + __invoke_copy_from(__copy_user_inatomic, to, from, n) + +#define __invoke_copy_to_kernel(to, from, n) \ + __invoke_copy_to(__copy_user, to, from, n) + +#define ___invoke_copy_in_kernel(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) + #ifndef CONFIG_EVA +#define __invoke_copy_from_user(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) + +#define __invoke_copy_from_user_inatomic(to, from, n) \ + __invoke_copy_from(__copy_user_inatomic, to, from, n) + #define __invoke_copy_to_user(to, from, n) \ __invoke_copy_to(__copy_user, to, from, n) -#define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) +#define ___invoke_copy_in_user(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) -#endif +#else + +/* EVA specific functions */ + +extern size_t __copy_user_inatomic_eva(void *__to, const void *__from, + size_t __n); +extern size_t __copy_from_user_eva(void *__to, const void *__from, + size_t __n); +extern size_t __copy_to_user_eva(void *__to, const void *__from, + size_t __n); +extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); + +/* + * Source or destination address is in userland. We need to go through + * the TLB + */ +#define __invoke_copy_from_user(to, from, n) \ + __invoke_copy_from(__copy_from_user_eva, to, from, n) + +#define __invoke_copy_from_user_inatomic(to, from, n) \ + __invoke_copy_from(__copy_user_inatomic_eva, to, from, n) + +#define __invoke_copy_to_user(to, from, n) \ + __invoke_copy_to(__copy_to_user_eva, to, from, n) + +#define ___invoke_copy_in_user(to, from, n) \ + __invoke_copy_from(__copy_in_user_eva, to, from, n) + +#endif /* CONFIG_EVA */ /* * __copy_to_user: - Copy a block of data into user space, with less checking. @@ -971,74 +1017,6 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_len; \ }) -#ifndef CONFIG_EVA - -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -/* For userland <-> userland operations */ -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -/* For kernel <-> kernel operations */ -#define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_from_user_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic, to, from, n) - -#define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic, to, from, n) - -#else - -/* EVA specific functions */ - -extern size_t __copy_user_inatomic_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_from_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_to_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); - -/* - * Source or destination address is in userland. We need to go through - * the TLB - */ -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_from_user_eva, to, from, n) - -#define __invoke_copy_from_user_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic_eva, to, from, n) - -#define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to(__copy_to_user_eva, to, from, n) - -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_in_user_eva, to, from, n) - -/* - * Source or destination address in the kernel. We are not going through - * the TLB - */ -#define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic, to, from, n) - -#define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) - -#define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#endif /* CONFIG_EVA */ - /* * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. -- cgit v1.2.3 From ab0aca27c4dfcfa72eb1339ae94edbfbdd751ae8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 11:54:21 -0400 Subject: mips: make copy_from_user() zero tail explicitly Signed-off-by: Al Viro --- arch/mips/include/asm/uaccess.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 9c05262a85f3..712dc40625b6 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -1080,29 +1080,29 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); ({ \ void *__cu_to; \ const void __user *__cu_from; \ - long __cu_len; \ + long __cu_len, __cu_res; \ \ __cu_to = (to); \ __cu_from = (from); \ - __cu_len = (n); \ + __cu_res = __cu_len = (n); \ \ check_object_size(__cu_to, __cu_len, false); \ \ if (eva_kernel_access()) { \ - __cu_len = __invoke_copy_from_kernel(__cu_to, \ + __cu_res = __invoke_copy_from_kernel(__cu_to, \ __cu_from, \ __cu_len); \ } else { \ if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, \ + __cu_res = __invoke_copy_from_user(__cu_to, \ __cu_from, \ __cu_len); \ - } else { \ - memset(__cu_to, 0, __cu_len); \ } \ } \ - __cu_len; \ + if (unlikely(__cu_res)) \ + memset(__cu_to + __cu_len - __cu_res, 0, __cu_res); \ + __cu_res; \ }) #define __copy_in_user(to, from, n) \ -- cgit v1.2.3 From 1a4fded6d32596053d4f0bc8c49028faf4114a17 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 12:03:16 -0400 Subject: mips: get rid of tail-zeroing in primitives Signed-off-by: Al Viro --- arch/mips/cavium-octeon/octeon-memcpy.S | 31 +-------------------- arch/mips/include/asm/uaccess.h | 19 ++----------- arch/mips/lib/memcpy.S | 49 --------------------------------- 3 files changed, 3 insertions(+), 96 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S index cfd97f6448bb..0a7c9834b81c 100644 --- a/arch/mips/cavium-octeon/octeon-memcpy.S +++ b/arch/mips/cavium-octeon/octeon-memcpy.S @@ -139,15 +139,6 @@ .set noreorder .set noat -/* - * t7 is used as a flag to note inatomic mode. - */ -LEAF(__copy_user_inatomic) -EXPORT_SYMBOL(__copy_user_inatomic) - b __copy_user_common - li t7, 1 - END(__copy_user_inatomic) - /* * A combined memcpy/__copy_user * __copy_user sets len to 0 for success; else to an upper bound of @@ -161,8 +152,6 @@ EXPORT_SYMBOL(memcpy) __memcpy: FEXPORT(__copy_user) EXPORT_SYMBOL(__copy_user) - li t7, 0 /* not inatomic */ -__copy_user_common: /* * Note: dst & src may be unaligned, len may be 0 * Temps @@ -414,25 +403,7 @@ l_exc: LOAD t0, TI_TASK($28) LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address SUB len, AT, t0 # len number of uncopied bytes - bnez t7, 2f /* Skip the zeroing out part if inatomic */ - /* - * Here's where we rely on src and dst being incremented in tandem, - * See (3) above. - * dst += (fault addr - src) to put dst at first byte to clear - */ - ADD dst, t0 # compute start address in a1 - SUB dst, src - /* - * Clear len bytes starting at dst. Can't call __bzero because it - * might modify len. An inefficient loop for these rare times... - */ - beqz len, done - SUB src, len, 1 -1: sb zero, 0(dst) - ADD dst, dst, 1 - bnez src, 1b - SUB src, src, 1 -2: jr ra + jr ra nop diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 712dc40625b6..988cd3b64a59 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -841,9 +841,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_kernel(to, from, n) \ __invoke_copy_from(__copy_user, to, from, n) -#define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic, to, from, n) - #define __invoke_copy_to_kernel(to, from, n) \ __invoke_copy_to(__copy_user, to, from, n) @@ -854,9 +851,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_user(to, from, n) \ __invoke_copy_from(__copy_user, to, from, n) -#define __invoke_copy_from_user_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic, to, from, n) - #define __invoke_copy_to_user(to, from, n) \ __invoke_copy_to(__copy_user, to, from, n) @@ -867,8 +861,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); /* EVA specific functions */ -extern size_t __copy_user_inatomic_eva(void *__to, const void *__from, - size_t __n); extern size_t __copy_from_user_eva(void *__to, const void *__from, size_t __n); extern size_t __copy_to_user_eva(void *__to, const void *__from, @@ -882,9 +874,6 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_user(to, from, n) \ __invoke_copy_from(__copy_from_user_eva, to, from, n) -#define __invoke_copy_from_user_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic_eva, to, from, n) - #define __invoke_copy_to_user(to, from, n) \ __invoke_copy_to(__copy_to_user_eva, to, from, n) @@ -930,8 +919,6 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); __cu_len; \ }) -extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); - #define __copy_to_user_inatomic(to, from, n) \ ({ \ void __user *__cu_to; \ @@ -966,12 +953,10 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); check_object_size(__cu_to, __cu_len, false); \ \ if (eva_kernel_access()) \ - __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \ - __cu_from,\ + __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from,\ __cu_len);\ else \ - __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \ - __cu_from, \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ __cu_len); \ __cu_len; \ }) diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index c3031f18c572..3114a2ed1f4e 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -562,39 +562,9 @@ LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address nop SUB len, AT, t0 # len number of uncopied bytes - bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */ - /* - * Here's where we rely on src and dst being incremented in tandem, - * See (3) above. - * dst += (fault addr - src) to put dst at first byte to clear - */ - ADD dst, t0 # compute start address in a1 - SUB dst, src - /* - * Clear len bytes starting at dst. Can't call __bzero because it - * might modify len. An inefficient loop for these rare times... - */ - .set reorder /* DADDI_WAR */ - SUB src, len, 1 - beqz len, .Ldone\@ - .set noreorder -1: sb zero, 0(dst) - ADD dst, dst, 1 -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS - bnez src, 1b - SUB src, src, 1 -#else - .set push - .set noat - li v1, 1 - bnez src, 1b - SUB src, src, v1 - .set pop -#endif jr ra nop - #define SEXC(n) \ .set reorder; /* DADDI_WAR */ \ .Ls_exc_p ## n ## u\@: \ @@ -672,15 +642,6 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ move a2, zero END(__rmemcpy) -/* - * t6 is used as a flag to note inatomic mode. - */ -LEAF(__copy_user_inatomic) -EXPORT_SYMBOL(__copy_user_inatomic) - b __copy_user_common - li t6, 1 - END(__copy_user_inatomic) - /* * A combined memcpy/__copy_user * __copy_user sets len to 0 for success; else to an upper bound of @@ -694,8 +655,6 @@ EXPORT_SYMBOL(memcpy) .L__memcpy: FEXPORT(__copy_user) EXPORT_SYMBOL(__copy_user) - li t6, 0 /* not inatomic */ -__copy_user_common: /* Legacy Mode, user <-> user */ __BUILD_COPY_USER LEGACY_MODE USEROP USEROP @@ -708,20 +667,12 @@ __copy_user_common: * space */ -LEAF(__copy_user_inatomic_eva) -EXPORT_SYMBOL(__copy_user_inatomic_eva) - b __copy_from_user_common - li t6, 1 - END(__copy_user_inatomic_eva) - /* * __copy_from_user (EVA) */ LEAF(__copy_from_user_eva) EXPORT_SYMBOL(__copy_from_user_eva) - li t6, 0 /* not inatomic */ -__copy_from_user_common: __BUILD_COPY_USER EVA_MODE USEROP KERNELOP END(__copy_from_user_eva) -- cgit v1.2.3 From 2260ea86c0e7623d4e99886638b2b83ef21efa60 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 15:59:26 -0400 Subject: mips: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/mips/Kconfig | 1 + arch/mips/include/asm/uaccess.h | 274 ++++------------------------------------ 2 files changed, 27 insertions(+), 248 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a008a9f03072..aff5633bfe4d 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -69,6 +69,7 @@ config MIPS select HAVE_EXIT_THREAD select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_ARCH_HARDENED_USERCOPY + select ARCH_HAS_RAW_COPY_USER menu "Machine selection" diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 988cd3b64a59..99e629a590a5 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -882,257 +882,35 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); #endif /* CONFIG_EVA */ -/* - * __copy_to_user: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -#define __copy_to_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_from, __cu_len, true); \ - might_fault(); \ - \ - if (eva_kernel_access()) \ - __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ - __cu_len); \ - else \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ - __cu_len); \ - __cu_len; \ -}) - -#define __copy_to_user_inatomic(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_from, __cu_len, true); \ - \ - if (eva_kernel_access()) \ - __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ - __cu_len); \ - else \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ - __cu_len); \ - __cu_len; \ -}) - -#define __copy_from_user_inatomic(to, from, n) \ -({ \ - void *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_to, __cu_len, false); \ - \ - if (eva_kernel_access()) \ - __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from,\ - __cu_len);\ - else \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ - __cu_len; \ -}) - -/* - * copy_to_user: - Copy a block of data into user space. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -#define copy_to_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_from, __cu_len, true); \ - \ - if (eva_kernel_access()) { \ - __cu_len = __invoke_copy_to_kernel(__cu_to, \ - __cu_from, \ - __cu_len); \ - } else { \ - if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ - might_fault(); \ - __cu_len = __invoke_copy_to_user(__cu_to, \ - __cu_from, \ - __cu_len); \ - } \ - } \ - __cu_len; \ -}) - -/* - * __copy_from_user: - Copy a block of data from user space, with less checking. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -#define __copy_from_user(to, from, n) \ -({ \ - void *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_to, __cu_len, false); \ - \ - if (eva_kernel_access()) { \ - __cu_len = __invoke_copy_from_kernel(__cu_to, \ - __cu_from, \ - __cu_len); \ - } else { \ - might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ - } \ - __cu_len; \ -}) +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (eva_kernel_access()) + return __invoke_copy_to_kernel(to, from, n); + else + return __invoke_copy_to_user(to, from, n); +} -/* - * copy_from_user: - Copy a block of data from user space. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -#define copy_from_user(to, from, n) \ -({ \ - void *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len, __cu_res; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_res = __cu_len = (n); \ - \ - check_object_size(__cu_to, __cu_len, false); \ - \ - if (eva_kernel_access()) { \ - __cu_res = __invoke_copy_from_kernel(__cu_to, \ - __cu_from, \ - __cu_len); \ - } else { \ - if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ - might_fault(); \ - __cu_res = __invoke_copy_from_user(__cu_to, \ - __cu_from, \ - __cu_len); \ - } \ - } \ - if (unlikely(__cu_res)) \ - memset(__cu_to + __cu_len - __cu_res, 0, __cu_res); \ - __cu_res; \ -}) +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (eva_kernel_access()) + return __invoke_copy_from_kernel(to, from, n); + else + return __invoke_copy_from_user(to, from, n); +} -#define __copy_in_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - if (eva_kernel_access()) { \ - __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \ - __cu_len); \ - } else { \ - might_fault(); \ - __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \ - __cu_len); \ - } \ - __cu_len; \ -}) +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER -#define copy_in_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - if (eva_kernel_access()) { \ - __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \ - __cu_len); \ - } else { \ - if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\ - access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\ - might_fault(); \ - __cu_len = ___invoke_copy_in_user(__cu_to, \ - __cu_from, \ - __cu_len); \ - } \ - } \ - __cu_len; \ -}) +static inline unsigned long +raw_copy_in_user(void __user*to, const void __user *from, unsigned long n) +{ + if (eva_kernel_access()) + return ___invoke_copy_in_kernel(to, from, n); + else + return ___invoke_copy_in_user(to, from, n); +} extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size); extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); -- cgit v1.2.3