diff options
author | Linus Torvalds | 2020-10-12 16:24:13 -0700 |
---|---|---|
committer | Linus Torvalds | 2020-10-12 16:24:13 -0700 |
commit | c90578360c92c71189308ebc71087197080e94c3 (patch) | |
tree | 15cccf727f6fe35ffd81922461996c1c2ca1ebfd /arch/sh | |
parent | 50d228345a03c882dfe11928ab41b42458b3f922 (diff) | |
parent | 70d65cd555c5e43c613700f604a47f7ebcf7b6f1 (diff) |
Merge branch 'work.csum_and_copy' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull copy_and_csum cleanups from Al Viro:
"Saner calling conventions for csum_and_copy_..._user() and friends"
[ Removing 800+ lines of code and cleaning stuff up is good - Linus ]
* 'work.csum_and_copy' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
ppc: propagate the calling conventions change down to csum_partial_copy_generic()
amd64: switch csum_partial_copy_generic() to new calling conventions
sparc64: propagate the calling convention changes down to __csum_partial_copy_...()
xtensa: propagate the calling conventions change down into csum_partial_copy_generic()
mips: propagate the calling convention change down into __csum_partial_copy_..._user()
mips: __csum_partial_copy_kernel() has no users left
mips: csum_and_copy_{to,from}_user() are never called under KERNEL_DS
sparc32: propagate the calling conventions change down to __csum_partial_copy_sparc_generic()
i386: propagate the calling conventions change down to csum_partial_copy_generic()
sh: propage the calling conventions change down to csum_partial_copy_generic()
m68k: get rid of zeroing destination on error in csum_and_copy_from_user()
arm: propagate the calling convention changes down to csum_partial_copy_from_user()
alpha: propagate the calling convention changes down to csum_partial_copy.c helpers
saner calling conventions for csum_and_copy_..._user()
csum_and_copy_..._user(): pass 0xffffffff instead of 0 as initial sum
csum_partial_copy_nocheck(): drop the last argument
unify generic instances of csum_partial_copy_nocheck()
icmp_push_reply(): reorder adding the checksum up
skb_copy_and_csum_bits(): don't bother with the last argument
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/checksum_32.h | 36 | ||||
-rw-r--r-- | arch/sh/lib/checksum.S | 119 |
2 files changed, 47 insertions, 108 deletions
diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h index 91571a42e44e..1a391e3a7659 100644 --- a/arch/sh/include/asm/checksum_32.h +++ b/arch/sh/include/asm/checksum_32.h @@ -30,10 +30,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); * better 64-bit) boundary */ -asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, - int *src_err_ptr, int *dst_err_ptr); +asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len); +#define _HAVE_ARCH_CSUM_AND_COPY /* * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. @@ -42,23 +41,18 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, * access_ok(). */ static inline -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum) +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) { - return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); + return csum_partial_copy_generic(src, dst, len); } #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER static inline -__wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len) { - if (access_ok(src, len)) - return csum_partial_copy_generic((__force const void *)src, dst, - len, sum, err_ptr, NULL); - if (len) - *err_ptr = -EFAULT; - return sum; + if (!access_ok(src, len)) + return 0; + return csum_partial_copy_generic((__force const void *)src, dst, len); } /* @@ -199,16 +193,10 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, #define HAVE_CSUM_COPY_USER static inline __wsum csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum sum, - int *err_ptr) + int len) { - if (access_ok(dst, len)) - return csum_partial_copy_generic((__force const void *)src, - dst, len, sum, NULL, err_ptr); - - if (len) - *err_ptr = -EFAULT; - - return (__force __wsum)-1; /* invalid checksum */ + if (!access_ok(dst, len)) + return 0; + return csum_partial_copy_generic((__force const void *)src, dst, len); } #endif /* __ASM_SH_CHECKSUM_H */ diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S index 97b5c2d9fec4..3e07074e0098 100644 --- a/arch/sh/lib/checksum.S +++ b/arch/sh/lib/checksum.S @@ -173,47 +173,27 @@ ENTRY(csum_partial) mov r6, r0 /* -unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, - int sum, int *src_err_ptr, int *dst_err_ptr) +unsigned int csum_partial_copy_generic (const char *src, char *dst, int len) */ /* - * Copy from ds while checksumming, otherwise like csum_partial - * - * The macros SRC and DST specify the type of access for the instruction. - * thus we can call a custom exception handler for all access types. - * - * FIXME: could someone double-check whether I haven't mixed up some SRC and - * DST definitions? It's damn hard to trigger all cases. I hope I got - * them all but there's no guarantee. + * Copy from ds while checksumming, otherwise like csum_partial with initial + * sum being ~0U */ -#define SRC(...) \ +#define EXC(...) \ 9999: __VA_ARGS__ ; \ .section __ex_table, "a"; \ .long 9999b, 6001f ; \ .previous -#define DST(...) \ - 9999: __VA_ARGS__ ; \ - .section __ex_table, "a"; \ - .long 9999b, 6002f ; \ - .previous - ! ! r4: const char *SRC ! r5: char *DST ! r6: int LEN -! r7: int SUM -! -! on stack: -! int *SRC_ERR_PTR -! int *DST_ERR_PTR ! ENTRY(csum_partial_copy_generic) - mov.l r5,@-r15 - mov.l r6,@-r15 - + mov #-1,r7 mov #3,r0 ! Check src and dest are equally aligned mov r4,r1 and r0,r1 @@ -243,11 +223,11 @@ ENTRY(csum_partial_copy_generic) clrt .align 2 5: -SRC( mov.b @r4+,r1 ) -SRC( mov.b @r4+,r0 ) +EXC( mov.b @r4+,r1 ) +EXC( mov.b @r4+,r0 ) extu.b r1,r1 -DST( mov.b r1,@r5 ) -DST( mov.b r0,@(1,r5) ) +EXC( mov.b r1,@r5 ) +EXC( mov.b r0,@(1,r5) ) extu.b r0,r0 add #2,r5 @@ -276,8 +256,8 @@ DST( mov.b r0,@(1,r5) ) ! Handle first two bytes as a special case .align 2 1: -SRC( mov.w @r4+,r0 ) -DST( mov.w r0,@r5 ) +EXC( mov.w @r4+,r0 ) +EXC( mov.w r0,@r5 ) add #2,r5 extu.w r0,r0 addc r0,r7 @@ -292,32 +272,32 @@ DST( mov.w r0,@r5 ) clrt .align 2 1: -SRC( mov.l @r4+,r0 ) -SRC( mov.l @r4+,r1 ) +EXC( mov.l @r4+,r0 ) +EXC( mov.l @r4+,r1 ) addc r0,r7 -DST( mov.l r0,@r5 ) -DST( mov.l r1,@(4,r5) ) +EXC( mov.l r0,@r5 ) +EXC( mov.l r1,@(4,r5) ) addc r1,r7 -SRC( mov.l @r4+,r0 ) -SRC( mov.l @r4+,r1 ) +EXC( mov.l @r4+,r0 ) +EXC( mov.l @r4+,r1 ) addc r0,r7 -DST( mov.l r0,@(8,r5) ) -DST( mov.l r1,@(12,r5) ) +EXC( mov.l r0,@(8,r5) ) +EXC( mov.l r1,@(12,r5) ) addc r1,r7 -SRC( mov.l @r4+,r0 ) -SRC( mov.l @r4+,r1 ) +EXC( mov.l @r4+,r0 ) +EXC( mov.l @r4+,r1 ) addc r0,r7 -DST( mov.l r0,@(16,r5) ) -DST( mov.l r1,@(20,r5) ) +EXC( mov.l r0,@(16,r5) ) +EXC( mov.l r1,@(20,r5) ) addc r1,r7 -SRC( mov.l @r4+,r0 ) -SRC( mov.l @r4+,r1 ) +EXC( mov.l @r4+,r0 ) +EXC( mov.l @r4+,r1 ) addc r0,r7 -DST( mov.l r0,@(24,r5) ) -DST( mov.l r1,@(28,r5) ) +EXC( mov.l r0,@(24,r5) ) +EXC( mov.l r1,@(28,r5) ) addc r1,r7 add #32,r5 movt r0 @@ -335,9 +315,9 @@ DST( mov.l r1,@(28,r5) ) clrt shlr2 r6 3: -SRC( mov.l @r4+,r0 ) +EXC( mov.l @r4+,r0 ) addc r0,r7 -DST( mov.l r0,@r5 ) +EXC( mov.l r0,@r5 ) add #4,r5 movt r0 dt r6 @@ -353,8 +333,8 @@ DST( mov.l r0,@r5 ) mov #2,r1 cmp/hs r1,r6 bf 5f -SRC( mov.w @r4+,r0 ) -DST( mov.w r0,@r5 ) +EXC( mov.w @r4+,r0 ) +EXC( mov.w r0,@r5 ) extu.w r0,r0 add #2,r5 cmp/eq r1,r6 @@ -363,8 +343,8 @@ DST( mov.w r0,@r5 ) shll16 r0 addc r0,r7 5: -SRC( mov.b @r4+,r0 ) -DST( mov.b r0,@r5 ) +EXC( mov.b @r4+,r0 ) +EXC( mov.b r0,@r5 ) extu.b r0,r0 #ifndef __LITTLE_ENDIAN__ shll8 r0 @@ -373,42 +353,13 @@ DST( mov.b r0,@r5 ) mov #0,r0 addc r0,r7 7: -5000: # Exception handler: .section .fixup, "ax" 6001: - mov.l @(8,r15),r0 ! src_err_ptr - mov #-EFAULT,r1 - mov.l r1,@r0 - - ! zero the complete destination - computing the rest - ! is too much work - mov.l @(4,r15),r5 ! dst - mov.l @r15,r6 ! len - mov #0,r7 -1: mov.b r7,@r5 - dt r6 - bf/s 1b - add #1,r5 - mov.l 8000f,r0 - jmp @r0 - nop - .align 2 -8000: .long 5000b - -6002: - mov.l @(12,r15),r0 ! dst_err_ptr - mov #-EFAULT,r1 - mov.l r1,@r0 - mov.l 8001f,r0 - jmp @r0 - nop - .align 2 -8001: .long 5000b - + rts + mov #0,r0 .previous - add #8,r15 rts mov r7,r0 |