From 3d4247fcc938d0ab5cf6fdb752dae07fdeab9736 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 14 Jan 2020 17:54:00 +0000 Subject: powerpc/32: Add support of KASAN_VMALLOC Add support of KASAN_VMALLOC on PPC32. To allow this, the early shadow covering the VMALLOC space need to be removed once high_memory var is set and before freeing memblock. And the VMALLOC area need to be aligned such that boundaries are covered by a full shadow page. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/031dec5487bde9b2181c8b3c9800e1879cf98c1a.1579024426.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/book3s/32/pgtable.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/powerpc/include/asm/book3s/32') diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 0796533d37dd..5b39c11e884a 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -193,7 +193,12 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #else #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) #endif + +#ifdef CONFIG_KASAN_VMALLOC +#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) +#else #define VMALLOC_END ioremap_bot +#endif #ifndef __ASSEMBLY__ #include -- cgit v1.2.3 From 6ec20aa2e510b6297906c45f009aa08b2d97269a Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 24 Jan 2020 11:54:40 +0000 Subject: powerpc/32s: Fix bad_kuap_fault() At the moment, bad_kuap_fault() reports a fault only if a bad access to userspace occurred while access to userspace was not granted. But if a fault occurs for a write outside the allowed userspace segment(s) that have been unlocked, bad_kuap_fault() fails to detect it and the kernel loops forever in do_page_fault(). Fix it by checking that the accessed address is within the allowed range. Fixes: a68c31fc01ef ("powerpc/32s: Implement Kernel Userspace Access Protection") Cc: stable@vger.kernel.org # v5.2+ Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/f48244e9485ada0a304ed33ccbb8da271180c80d.1579866752.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/book3s/32/kup.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm/book3s/32') diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index f9dc597b0b86..d88008c8eb85 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -131,12 +131,17 @@ static inline void prevent_user_access(void __user *to, const void __user *from, kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */ } -static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) +static inline bool +bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { + unsigned long begin = regs->kuap & 0xf0000000; + unsigned long end = regs->kuap << 28; + if (!is_write) return false; - return WARN(!regs->kuap, "Bug: write fault blocked by segment registers !"); + return WARN(address < begin || address >= end, + "Bug: write fault blocked by segment registers !"); } #endif /* CONFIG_PPC_KUAP */ -- cgit v1.2.3 From 1d8f739b07bd538f272f60bf53f10e7e6248d295 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 24 Jan 2020 11:54:41 +0000 Subject: powerpc/kuap: Fix set direction in allow/prevent_user_access() __builtin_constant_p() always return 0 for pointers, so on RADIX we always end up opening both direction (by writing 0 in SPR29): 0000000000000170 <._copy_to_user>: ... 1b0: 4c 00 01 2c isync 1b4: 39 20 00 00 li r9,0 1b8: 7d 3d 03 a6 mtspr 29,r9 1bc: 4c 00 01 2c isync 1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50> 1c0: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000220 <._copy_from_user>: ... 2ac: 4c 00 01 2c isync 2b0: 39 20 00 00 li r9,0 2b4: 7d 3d 03 a6 mtspr 29,r9 2b8: 4c 00 01 2c isync 2bc: 7f c5 f3 78 mr r5,r30 2c0: 7f 83 e3 78 mr r3,r28 2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4> 2c4: R_PPC64_REL24 .__copy_tofrom_user ... Use an explicit parameter for direction selection, so that GCC is able to see it is a constant: 00000000000001b0 <._copy_to_user>: ... 1f0: 4c 00 01 2c isync 1f4: 3d 20 40 00 lis r9,16384 1f8: 79 29 07 c6 rldicr r9,r9,32,31 1fc: 7d 3d 03 a6 mtspr 29,r9 200: 4c 00 01 2c isync 204: 48 00 00 01 bl 204 <._copy_to_user+0x54> 204: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000260 <._copy_from_user>: ... 2ec: 4c 00 01 2c isync 2f0: 39 20 ff ff li r9,-1 2f4: 79 29 00 04 rldicr r9,r9,0,0 2f8: 7d 3d 03 a6 mtspr 29,r9 2fc: 4c 00 01 2c isync 300: 7f c5 f3 78 mr r5,r30 304: 7f 83 e3 78 mr r3,r28 308: 48 00 00 01 bl 308 <._copy_from_user+0xa8> 308: R_PPC64_REL24 .__copy_tofrom_user ... Signed-off-by: Christophe Leroy [mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/book3s/32/kup.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include/asm/book3s/32') diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index d88008c8eb85..91c8f1d9bcee 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -102,11 +102,13 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end) isync(); /* Context sync required after mtsrin() */ } -static inline void allow_user_access(void __user *to, const void __user *from, u32 size) +static __always_inline void allow_user_access(void __user *to, const void __user *from, + u32 size, unsigned long dir) { u32 addr, end; - if (__builtin_constant_p(to) && to == NULL) + BUILD_BUG_ON(!__builtin_constant_p(dir)); + if (!(dir & KUAP_WRITE)) return; addr = (__force u32)to; @@ -119,11 +121,16 @@ static inline void allow_user_access(void __user *to, const void __user *from, u kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */ } -static inline void prevent_user_access(void __user *to, const void __user *from, u32 size) +static __always_inline void prevent_user_access(void __user *to, const void __user *from, + u32 size, unsigned long dir) { u32 addr = (__force u32)to; u32 end = min(addr + size, TASK_SIZE); + BUILD_BUG_ON(!__builtin_constant_p(dir)); + if (!(dir & KUAP_WRITE)) + return; + if (!addr || addr >= TASK_SIZE || !size) return; -- cgit v1.2.3 From 88f8c080d47f307871840a0a825ef556673eb592 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 24 Jan 2020 11:54:42 +0000 Subject: powerpc/32s: Drop NULL addr verification NULL addr is a user address. Don't waste time checking it. If someone tries to access it, it will SIGFAULT the same way as for address 1, so no need to make it special. The special case is when not doing a write, in that case we want to drop the entire function. This is now handled by 'dir' param and not by the nulity of 'to' anymore. Also make beginning of prevent_user_access() similar to beginning of allow_user_access(), and tell the compiler that writing in kernel space or with a 0 length is unlikely Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/85e971223dfe6ace734637db1841678939a76155.1579866752.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/book3s/32/kup.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/include/asm/book3s/32') diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 91c8f1d9bcee..de29fb752cca 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -113,7 +113,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user addr = (__force u32)to; - if (!addr || addr >= TASK_SIZE || !size) + if (unlikely(addr >= TASK_SIZE || !size)) return; end = min(addr + size, TASK_SIZE); @@ -124,16 +124,18 @@ static __always_inline void allow_user_access(void __user *to, const void __user static __always_inline void prevent_user_access(void __user *to, const void __user *from, u32 size, unsigned long dir) { - u32 addr = (__force u32)to; - u32 end = min(addr + size, TASK_SIZE); + u32 addr, end; BUILD_BUG_ON(!__builtin_constant_p(dir)); if (!(dir & KUAP_WRITE)) return; - if (!addr || addr >= TASK_SIZE || !size) + addr = (__force u32)to; + + if (unlikely(addr >= TASK_SIZE || !size)) return; + end = min(addr + size, TASK_SIZE); current->thread.kuap = 0; kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */ } -- cgit v1.2.3 From bedb4dbe443c11ff551b4ae4e48c8676fdc96467 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 24 Jan 2020 11:54:43 +0000 Subject: powerpc/32s: Prepare prevent_user_access() for user_access_end() In preparation of implementing user_access_begin and friends on powerpc, the book3s/32 version of prevent_user_access() need to be prepared for user_access_end(). user_access_end() doesn't provide the address and size which were passed to user_access_begin(), required by prevent_user_access() to know which segment to modify. The list of segments which where unprotected by allow_user_access() are available in current->kuap. But we don't want prevent_user_access() to read this all the time, especially everytime it is 0 (for instance because the access was not a write access). Implement a special direction named KUAP_CURRENT. In this case only, the addr and end are retrieved from current->kuap. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/55bcc1f25d8200892a31f67a0b024ff3b816c3cc.1579866752.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/book3s/32/kup.h | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/include/asm/book3s/32') diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index de29fb752cca..17e069291c72 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -108,6 +108,8 @@ static __always_inline void allow_user_access(void __user *to, const void __user u32 addr, end; BUILD_BUG_ON(!__builtin_constant_p(dir)); + BUILD_BUG_ON(dir == KUAP_CURRENT); + if (!(dir & KUAP_WRITE)) return; @@ -117,6 +119,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user return; end = min(addr + size, TASK_SIZE); + current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf); kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */ } @@ -127,15 +130,25 @@ static __always_inline void prevent_user_access(void __user *to, const void __us u32 addr, end; BUILD_BUG_ON(!__builtin_constant_p(dir)); - if (!(dir & KUAP_WRITE)) - return; - addr = (__force u32)to; + if (dir == KUAP_CURRENT) { + u32 kuap = current->thread.kuap; - if (unlikely(addr >= TASK_SIZE || !size)) + if (unlikely(!kuap)) + return; + + addr = kuap & 0xf0000000; + end = kuap << 28; + } else if (dir & KUAP_WRITE) { + addr = (__force u32)to; + end = min(addr + size, TASK_SIZE); + + if (unlikely(addr >= TASK_SIZE || !size)) + return; + } else { return; + } - end = min(addr + size, TASK_SIZE); current->thread.kuap = 0; kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */ } -- cgit v1.2.3 From 3d7dfd632f9b60cfce069b4da517e6b1a1c3f613 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 24 Jan 2020 11:54:45 +0000 Subject: powerpc: Implement user_access_save() and user_access_restore() Implement user_access_save() and user_access_restore() On 8xx and radix: - On save, get the value of the associated special register then prevent user access. - On restore, set back the saved value to the associated special register. On book3s/32: - On save, get the value stored in current->thread.kuap and prevent user access. - On restore, regenerate address range from the stored value and reopen read/write access for that range. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/54f2f74938006b33c55a416674807b42ef222068.1579866752.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/book3s/32/kup.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'arch/powerpc/include/asm/book3s/32') diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 17e069291c72..3c0ba22dc360 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -153,6 +153,29 @@ static __always_inline void prevent_user_access(void __user *to, const void __us kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */ } +static inline unsigned long prevent_user_access_return(void) +{ + unsigned long flags = current->thread.kuap; + unsigned long addr = flags & 0xf0000000; + unsigned long end = flags << 28; + void __user *to = (__force void __user *)addr; + + if (flags) + prevent_user_access(to, to, end - addr, KUAP_READ_WRITE); + + return flags; +} + +static inline void restore_user_access(unsigned long flags) +{ + unsigned long addr = flags & 0xf0000000; + unsigned long end = flags << 28; + void __user *to = (__force void __user *)addr; + + if (flags) + allow_user_access(to, to, end - addr, KUAP_READ_WRITE); +} + static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { -- cgit v1.2.3