diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2020-05-20 23:37:33 +1000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2020-05-20 23:37:33 +1000 |
commit | 217ba7dccef8e811eee43003bfef24f1902f37c9 (patch) | |
tree | 4ae0ab57daa2424f510850aa56352b0838265474 /arch | |
parent | 30df74d67d48949da87e3a5b57c381763e8fd526 (diff) | |
parent | e2a8b49e79553bd8ec48f73cead84e6146c09408 (diff) |
Merge branch 'topic/uaccess-ppc' into next
Merge our uaccess-ppc topic branch. It is based on the uaccess topic
branch that we're sharing with Viro.
This includes the addition of user_[read|write]_access_begin(), as
well as some powerpc specific changes to our uaccess routines that
would conflict badly if merged separately.
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/kup.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kup.h | 14 | ||||
-rw-r--r-- | arch/powerpc/include/asm/uaccess.h | 149 |
3 files changed, 142 insertions, 25 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 3c0ba22dc360..1617e73bee30 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -108,7 +108,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user u32 addr, end; BUILD_BUG_ON(!__builtin_constant_p(dir)); - BUILD_BUG_ON(dir == KUAP_CURRENT); + BUILD_BUG_ON(dir & ~KUAP_READ_WRITE); if (!(dir & KUAP_WRITE)) return; @@ -131,7 +131,7 @@ static __always_inline void prevent_user_access(void __user *to, const void __us BUILD_BUG_ON(!__builtin_constant_p(dir)); - if (dir == KUAP_CURRENT) { + if (dir & KUAP_CURRENT_WRITE) { u32 kuap = current->thread.kuap; if (unlikely(!kuap)) diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index 92bcd1a26d73..c745ee41ad66 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -10,7 +10,9 @@ * Use the current saved situation instead of the to/from/size params. * Used on book3s/32 */ -#define KUAP_CURRENT 4 +#define KUAP_CURRENT_READ 4 +#define KUAP_CURRENT_WRITE 8 +#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE) #ifdef CONFIG_PPC64 #include <asm/book3s/64/kup-radix.h> @@ -101,6 +103,16 @@ static inline void prevent_current_access_user(void) prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT); } +static inline void prevent_current_read_from_user(void) +{ + prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_READ); +} + +static inline void prevent_current_write_to_user(void) +{ + prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_WRITE); +} + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_KUAP_H_ */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index c0523efa1458..64c04ab09112 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -93,12 +93,12 @@ static inline int __access_ok(unsigned long addr, unsigned long size, #define __get_user(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true) #define __put_user(x, ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true) + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __put_user_goto(x, ptr, label) \ + __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) #define __get_user_allowed(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false) -#define __put_user_allowed(x, ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false) #define __get_user_inatomic(x, ptr) \ __get_user_nosleep((x), (ptr), sizeof(*(ptr))) @@ -207,17 +207,18 @@ do { \ prevent_write_to_user(ptr, size); \ } while (0) -#define __put_user_nocheck(x, ptr, size, do_allow) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ if (!is_kernel_addr((unsigned long)__pu_addr)) \ might_fault(); \ - __chk_user_ptr(ptr); \ - if (do_allow) \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ - else \ - __put_user_size_allowed((x), __pu_addr, (size), __pu_err); \ + __chk_user_ptr(__pu_addr); \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) @@ -225,9 +226,13 @@ do { \ ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ might_fault(); \ - if (access_ok(__pu_addr, size)) \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ + if (access_ok(__pu_addr, __pu_size)) \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) @@ -235,12 +240,62 @@ do { \ ({ \ long __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __chk_user_ptr(ptr); \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ + __chk_user_ptr(__pu_addr); \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) +#define __put_user_asm_goto(x, addr, label, op) \ + asm volatile goto( \ + "1: " op "%U1%X1 %0,%1 # put_user\n" \ + EX_TABLE(1b, %l2) \ + : \ + : "r" (x), "m" (*addr) \ + : \ + : label) + +#ifdef __powerpc64__ +#define __put_user_asm2_goto(x, ptr, label) \ + __put_user_asm_goto(x, ptr, label, "std") +#else /* __powerpc64__ */ +#define __put_user_asm2_goto(x, addr, label) \ + asm volatile goto( \ + "1: stw%X1 %0, %1\n" \ + "2: stw%X1 %L0, %L1\n" \ + EX_TABLE(1b, %l2) \ + EX_TABLE(2b, %l2) \ + : \ + : "r" (x), "m" (*addr) \ + : \ + : label) +#endif /* __powerpc64__ */ + +#define __put_user_size_goto(x, ptr, size, label) \ +do { \ + switch (size) { \ + case 1: __put_user_asm_goto(x, ptr, label, "stb"); break; \ + case 2: __put_user_asm_goto(x, ptr, label, "sth"); break; \ + case 4: __put_user_asm_goto(x, ptr, label, "stw"); break; \ + case 8: __put_user_asm2_goto(x, ptr, label); break; \ + default: __put_user_bad(); \ + } \ +} while (0) + +#define __put_user_nocheck_goto(x, ptr, size, label) \ +do { \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + if (!is_kernel_addr((unsigned long)__pu_addr)) \ + might_fault(); \ + __chk_user_ptr(ptr); \ + __put_user_size_goto((x), __pu_addr, (size), label); \ +} while (0) + + extern long __get_user_bad(void); /* @@ -328,15 +383,18 @@ do { \ long __gu_err; \ __long_type(*(ptr)) __gu_val; \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - __chk_user_ptr(ptr); \ + __typeof__(size) __gu_size = (size); \ + \ + __chk_user_ptr(__gu_addr); \ if (!is_kernel_addr((unsigned long)__gu_addr)) \ might_fault(); \ barrier_nospec(); \ if (do_allow) \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ else \ - __get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) @@ -345,12 +403,15 @@ do { \ long __gu_err = -EFAULT; \ __long_type(*(ptr)) __gu_val = 0; \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __typeof__(size) __gu_size = (size); \ + \ might_fault(); \ - if (access_ok(__gu_addr, (size))) { \ + if (access_ok(__gu_addr, __gu_size)) { \ barrier_nospec(); \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ } \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) @@ -359,10 +420,13 @@ do { \ long __gu_err; \ __long_type(*(ptr)) __gu_val; \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - __chk_user_ptr(ptr); \ + __typeof__(size) __gu_size = (size); \ + \ + __chk_user_ptr(__gu_addr); \ barrier_nospec(); \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) @@ -513,10 +577,51 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t #define user_access_save prevent_user_access_return #define user_access_restore restore_user_access +static __must_check inline bool +user_read_access_begin(const void __user *ptr, size_t len) +{ + if (unlikely(!access_ok(ptr, len))) + return false; + allow_read_from_user(ptr, len); + return true; +} +#define user_read_access_begin user_read_access_begin +#define user_read_access_end prevent_current_read_from_user + +static __must_check inline bool +user_write_access_begin(const void __user *ptr, size_t len) +{ + if (unlikely(!access_ok(ptr, len))) + return false; + allow_write_to_user((void __user *)ptr, len); + return true; +} +#define user_write_access_begin user_write_access_begin +#define user_write_access_end prevent_current_write_to_user + #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) -#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e) +#define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) + #define unsafe_copy_to_user(d, s, l, e) \ - unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e) +do { \ + u8 __user *_dst = (u8 __user *)(d); \ + const u8 *_src = (const u8 *)(s); \ + size_t _len = (l); \ + int _i; \ + \ + for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \ + __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\ + if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \ + __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \ + _i += 4; \ + } \ + if (_len & 2) { \ + __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \ + _i += 2; \ + } \ + if (_len & 1) \ + __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\ +} while (0) #endif /* _ARCH_POWERPC_UACCESS_H */ |