diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-01 16:03:37 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-01 16:03:37 -0700 |
commit | 4b01285e1672ed9342ace952e92eb1e1db7134ae (patch) | |
tree | 0c7389b64a3af8babefaafd3d261138ed43b719f /arch/ia64 | |
parent | b23c4771ff62de8ca9b5e4a2d64491b2fb6f8f69 (diff) | |
parent | 001c1a655f0a4e4ebe5d9beb47466dc5c6ab4871 (diff) |
Merge branch 'uaccess.csum' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess/csum updates from Al Viro:
"Regularize the sitation with uaccess checksum primitives:
- fold csum_partial_... into csum_and_copy_..._user()
- on x86 collapse several access_ok()/stac()/clac() into
user_access_begin()/user_access_end()"
* 'uaccess.csum' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
default csum_and_copy_to_user(): don't bother with access_ok()
take the dummy csum_and_copy_from_user() into net/checksum.h
arm: switch to csum_and_copy_from_user()
sh32: convert to csum_and_copy_from_user()
m68k: convert to csum_and_copy_from_user()
xtensa: switch to providing csum_and_copy_from_user()
sparc: switch to providing csum_and_copy_from_user()
parisc: turn csum_partial_copy_from_user() into csum_and_copy_from_user()
alpha: turn csum_partial_copy_from_user() into csum_and_copy_from_user()
ia64: turn csum_partial_copy_from_user() into csum_and_copy_from_user()
ia64: csum_partial_copy_nocheck(): don't abuse csum_partial_copy_from_user()
x86: switch 32bit csum_and_copy_to_user() to user_access_{begin,end}()
x86: switch both 32bit and 64bit to providing csum_and_copy_from_user()
x86_64: csum_..._copy_..._user(): switch to unsafe_..._user()
get rid of csum_partial_copy_to_user()
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/checksum.h | 10 | ||||
-rw-r--r-- | arch/ia64/lib/csum_partial_copy.c | 32 |
2 files changed, 2 insertions, 40 deletions
diff --git a/arch/ia64/include/asm/checksum.h b/arch/ia64/include/asm/checksum.h index 0ed18bc3f6cf..2a1c64629cdc 100644 --- a/arch/ia64/include/asm/checksum.h +++ b/arch/ia64/include/asm/checksum.h @@ -37,16 +37,6 @@ extern __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); -/* - * Same as csum_partial, but copies from src while it checksums. - * - * Here it is even more important to align src and dst on a 32-bit (or - * even better 64-bit) boundary. - */ -extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, - int *errp); - extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); diff --git a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c index bf9396b1ed32..5d147a33d648 100644 --- a/arch/ia64/lib/csum_partial_copy.c +++ b/arch/ia64/lib/csum_partial_copy.c @@ -103,39 +103,11 @@ out: * This is very ugly but temporary. THIS NEEDS SERIOUS ENHANCEMENTS. * But it's very tricky to get right even in C. */ -extern unsigned long do_csum(const unsigned char *, long); - -__wsum -csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum psum, int *errp) -{ - unsigned long result; - - /* XXX Fixme - * for now we separate the copy from checksum for obvious - * alignment difficulties. Look at the Alpha code and you'll be - * scared. - */ - - if (__copy_from_user(dst, src, len) != 0 && errp) - *errp = -EFAULT; - - result = do_csum(dst, len); - - /* add in old sum, and carry.. */ - result += (__force u32)psum; - /* 32+c bits -> 32 bits */ - result = (result & 0xffffffff) + (result >> 32); - return (__force __wsum)result; -} - -EXPORT_SYMBOL(csum_partial_copy_from_user); - __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { - return csum_partial_copy_from_user((__force const void __user *)src, - dst, len, sum, NULL); + memcpy(dst, src, len); + return csum_partial(dst, len, sum); } EXPORT_SYMBOL(csum_partial_copy_nocheck); |