summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-02-04 13:06:46 +0000
committerLinus Torvalds <torvalds@linux-foundation.org>2020-02-04 13:06:46 +0000
commit71c3a888cbcaf453aecf8d2f8fb003271d28073f (patch)
tree9895a04a40c39db585d2e1a0553bbc79d4b0b084 /arch/powerpc/mm
parent153b5c566d30fb984827acb12fd93c3aa6c147d3 (diff)
parent4c25df5640ae6e4491ee2c50d3f70c1559ef037d (diff)
Merge tag 'powerpc-5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "A pretty small batch for us, and apologies for it being a bit late, I wanted to sneak Christophe's user_access_begin() series in. Summary: - Implement user_access_begin() and friends for our platforms that support controlling kernel access to userspace. - Enable CONFIG_VMAP_STACK on 32-bit Book3S and 8xx. - Some tweaks to our pseries IOMMU code to allow SVMs ("secure" virtual machines) to use the IOMMU. - Add support for CLOCK_{REALTIME/MONOTONIC}_COARSE to the 32-bit VDSO, and some other improvements. - A series to use the PCI hotplug framework to control opencapi card's so that they can be reset and re-read after flashing a new FPGA image. As well as other minor fixes and improvements as usual. Thanks to: Alastair D'Silva, Alexandre Ghiti, Alexey Kardashevskiy, Andrew Donnellan, Aneesh Kumar K.V, Anju T Sudhakar, Bai Yingjie, Chen Zhou, Christophe Leroy, Frederic Barrat, Greg Kurz, Jason A. Donenfeld, Joel Stanley, Jordan Niethe, Julia Lawall, Krzysztof Kozlowski, Laurent Dufour, Laurentiu Tudor, Linus Walleij, Michael Bringmann, Nathan Chancellor, Nicholas Piggin, Nick Desaulniers, Oliver O'Halloran, Peter Ujfalusi, Pingfan Liu, Ram Pai, Randy Dunlap, Russell Currey, Sam Bobroff, Sebastian Andrzej Siewior, Shawn Anastasio, Stephen Rothwell, Steve Best, Sukadev Bhattiprolu, Thiago Jung Bauermann, Tyrel Datwyler, Vaibhav Jain" * tag 'powerpc-5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (131 commits) powerpc: configs: Cleanup old Kconfig options powerpc/configs/skiroot: Enable some more hardening options powerpc/configs/skiroot: Disable xmon default & enable reboot on panic powerpc/configs/skiroot: Enable security features powerpc/configs/skiroot: Update for symbol movement only powerpc/configs/skiroot: Drop default n CONFIG_CRYPTO_ECHAINIV powerpc/configs/skiroot: Drop HID_LOGITECH powerpc/configs: Drop NET_VENDOR_HP which moved to staging powerpc/configs: NET_CADENCE became NET_VENDOR_CADENCE powerpc/configs: Drop CONFIG_QLGE which moved to staging powerpc: Do not consider weak unresolved symbol relocations as bad powerpc/32s: Fix kasan_early_hash_table() for CONFIG_VMAP_STACK powerpc: indent to improve Kconfig readability powerpc: Provide initial documentation for PAPR hcalls powerpc: Implement user_access_save() and user_access_restore() powerpc: Implement user_access_begin and friends powerpc/32s: Prepare prevent_user_access() for user_access_end() powerpc/32s: Drop NULL addr verification powerpc/kuap: Fix set direction in allow/prevent_user_access() powerpc/32s: Fix bad_kuap_fault() ...
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S46
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c9
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c11
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c6
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c3
-rw-r--r--arch/powerpc/mm/fault.c11
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c89
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/mmu_decl.h6
-rw-r--r--arch/powerpc/mm/nohash/8xx.c13
-rw-r--r--arch/powerpc/mm/pgtable_32.c1
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c6
12 files changed, 125 insertions, 80 deletions
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 8bbbd9775c8a..c11b0a005196 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -25,6 +25,12 @@
#include <asm/feature-fixups.h>
#include <asm/code-patching-asm.h>
+#ifdef CONFIG_VMAP_STACK
+#define ADDR_OFFSET 0
+#else
+#define ADDR_OFFSET PAGE_OFFSET
+#endif
+
#ifdef CONFIG_SMP
.section .bss
.align 2
@@ -47,8 +53,8 @@ mmu_hash_lock:
.text
_GLOBAL(hash_page)
#ifdef CONFIG_SMP
- lis r8, (mmu_hash_lock - PAGE_OFFSET)@h
- ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
+ lis r8, (mmu_hash_lock - ADDR_OFFSET)@h
+ ori r8, r8, (mmu_hash_lock - ADDR_OFFSET)@l
lis r0,0x0fff
b 10f
11: lwz r6,0(r8)
@@ -66,9 +72,12 @@ _GLOBAL(hash_page)
cmplw 0,r4,r0
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
+#ifdef CONFIG_VMAP_STACK
+ tovirt(r5, r5)
+#endif
blt+ 112f /* assume user more likely */
- lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
- addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
+ lis r5, (swapper_pg_dir - ADDR_OFFSET)@ha /* if kernel address, use */
+ addi r5 ,r5 ,(swapper_pg_dir - ADDR_OFFSET)@l /* kernel page table */
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
112:
#ifndef CONFIG_PTE_64BIT
@@ -80,6 +89,9 @@ _GLOBAL(hash_page)
lwzx r8,r8,r5 /* Get L1 entry */
rlwinm. r8,r8,0,0,20 /* extract pt base address */
#endif
+#ifdef CONFIG_VMAP_STACK
+ tovirt(r8, r8)
+#endif
#ifdef CONFIG_SMP
beq- hash_page_out /* return if no mapping */
#else
@@ -137,9 +149,9 @@ retry:
#ifdef CONFIG_SMP
eieio
- lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
+ lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha
li r0,0
- stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
+ stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
#endif
/* Return from the exception */
@@ -152,9 +164,9 @@ retry:
#ifdef CONFIG_SMP
hash_page_out:
eieio
- lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
+ lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha
li r0,0
- stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
+ stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
blr
#endif /* CONFIG_SMP */
@@ -329,7 +341,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
patch_site 1f, patch__hash_page_A1
patch_site 2f, patch__hash_page_A2
/* Get the address of the primary PTE group in the hash table (r3) */
-0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
+0: lis r0, (Hash_base - ADDR_OFFSET)@h /* base address of hash table */
1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
xor r3,r3,r0 /* make primary hash */
@@ -343,10 +355,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
beq+ 10f /* no PTE: go look for an empty slot */
tlbie r4
- lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
- lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
+ lis r4, (htab_hash_searches - ADDR_OFFSET)@ha
+ lwz r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
addi r6,r6,1 /* count how many searches we do */
- stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
+ stw r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
mtctr r0
@@ -378,10 +390,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
beq+ found_empty
/* update counter of times that the primary PTEG is full */
- lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
- lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
+ lis r4, (primary_pteg_full - ADDR_OFFSET)@ha
+ lwz r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
addi r6,r6,1
- stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
+ stw r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
patch_site 0f, patch__hash_page_C
/* Search the secondary PTEG for an empty slot */
@@ -415,8 +427,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
* lockup here but that shouldn't happen
*/
-1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
- lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
+1: lis r4, (next_slot - ADDR_OFFSET)@ha /* get next evict slot */
+ lwz r6, (next_slot - ADDR_OFFSET)@l(r4)
addi r6,r6,HPTE_SIZE /* search for candidate */
andi. r6,r6,7*HPTE_SIZE
stw r6,next_slot@l(r4)
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 69b2419accef..0a1c65a2c565 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -413,6 +413,7 @@ void __init MMU_init_hw(void)
void __init MMU_init_hw_patch(void)
{
unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
+ unsigned int hash;
if (ppc_md.progress)
ppc_md.progress("hash:patch", 0x345);
@@ -424,8 +425,12 @@ void __init MMU_init_hw_patch(void)
/*
* Patch up the instructions in hashtable.S:create_hpte
*/
- modify_instruction_site(&patch__hash_page_A0, 0xffff,
- ((unsigned int)Hash - PAGE_OFFSET) >> 16);
+ if (IS_ENABLED(CONFIG_VMAP_STACK))
+ hash = (unsigned int)Hash;
+ else
+ hash = (unsigned int)Hash - PAGE_OFFSET;
+
+ modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index b30435c7d804..523d4d39d11e 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -652,6 +652,7 @@ static void init_hpte_page_sizes(void)
static void __init htab_init_page_sizes(void)
{
+ bool aligned = true;
init_hpte_page_sizes();
if (!debug_pagealloc_enabled()) {
@@ -659,7 +660,15 @@ static void __init htab_init_page_sizes(void)
* Pick a size for the linear mapping. Currently, we only
* support 16M, 1M and 4K which is the default
*/
- if (mmu_psize_defs[MMU_PAGE_16M].shift)
+ if (IS_ENABLED(STRICT_KERNEL_RWX) &&
+ (unsigned long)_stext % 0x1000000) {
+ if (mmu_psize_defs[MMU_PAGE_16M].shift)
+ pr_warn("Kernel not 16M aligned, "
+ "disabling 16M linear map alignment");
+ aligned = false;
+ }
+
+ if (mmu_psize_defs[MMU_PAGE_16M].shift && aligned)
mmu_linear_psize = MMU_PAGE_16M;
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
mmu_linear_psize = MMU_PAGE_1M;
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 974109bb85db..dd1bea45325c 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -337,7 +337,11 @@ static void __init radix_init_pgtable(void)
}
/* Find out how many PID bits are supported */
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
+ if (!mmu_pid_bits)
+ mmu_pid_bits = 20;
+ mmu_base_pid = 1;
+ } else if (cpu_has_feature(CPU_FTR_HVMODE)) {
if (!mmu_pid_bits)
mmu_pid_bits = 20;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index a95175c0972b..03f43c924e00 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -1161,6 +1161,9 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
if (unlikely(pid == MMU_NO_CONTEXT))
return;
+ if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ return;
+
/*
* If this context hasn't run on that CPU before and KVM is
* around, there's a slim chance that the guest on another
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index b5047f9b5dec..8db0507619e2 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -233,7 +233,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
// Read/write fault in a valid region (the exception table search passed
// above), but blocked by KUAP is bad, it can never succeed.
- if (bad_kuap_fault(regs, is_write))
+ if (bad_kuap_fault(regs, address, is_write))
return true;
// What's left? Kernel fault on user in well defined regions (extable
@@ -279,12 +279,8 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
access_ok(nip, sizeof(*nip))) {
unsigned int inst;
- int res;
- pagefault_disable();
- res = __get_user_inatomic(inst, nip);
- pagefault_enable();
- if (!res)
+ if (!probe_user_read(&inst, nip, sizeof(inst)))
return !store_updates_sp(inst);
*must_retry = true;
}
@@ -354,6 +350,9 @@ static void sanity_check_fault(bool is_write, bool is_user,
* Userspace trying to access kernel address, we get PROTFAULT for that.
*/
if (is_user && address >= TASK_SIZE) {
+ if ((long)address == -1)
+ return;
+
pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
current->comm, current->pid, address,
from_kuid(&init_user_ns, current_uid()));
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index 0e6ed4413eea..16dd95bd0749 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -12,7 +12,7 @@
#include <asm/code-patching.h>
#include <mm/mmu_decl.h>
-static pgprot_t kasan_prot_ro(void)
+static pgprot_t __init kasan_prot_ro(void)
{
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
return PAGE_READONLY;
@@ -20,7 +20,7 @@ static pgprot_t kasan_prot_ro(void)
return PAGE_KERNEL_RO;
}
-static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
+static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
{
unsigned long va = (unsigned long)kasan_early_shadow_page;
phys_addr_t pa = __pa(kasan_early_shadow_page);
@@ -30,29 +30,25 @@ static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
__set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
}
-static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
+static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
{
pmd_t *pmd;
unsigned long k_cur, k_next;
- pgprot_t prot = slab_is_available() ? kasan_prot_ro() : PAGE_KERNEL;
+ pte_t *new = NULL;
pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
- pte_t *new;
-
k_next = pgd_addr_end(k_cur, k_end);
if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
continue;
- if (slab_is_available())
- new = pte_alloc_one_kernel(&init_mm);
- else
+ if (!new)
new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
if (!new)
return -ENOMEM;
- kasan_populate_pte(new, prot);
+ kasan_populate_pte(new, PAGE_KERNEL);
smp_wmb(); /* See comment in __pte_alloc */
@@ -63,39 +59,27 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
new = NULL;
}
spin_unlock(&init_mm.page_table_lock);
-
- if (new && slab_is_available())
- pte_free_kernel(&init_mm, new);
}
return 0;
}
-static void __ref *kasan_get_one_page(void)
-{
- if (slab_is_available())
- return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-
- return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
-}
-
-static int __ref kasan_init_region(void *start, size_t size)
+static int __init kasan_init_region(void *start, size_t size)
{
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
unsigned long k_cur;
int ret;
- void *block = NULL;
+ void *block;
ret = kasan_init_shadow_page_tables(k_start, k_end);
if (ret)
return ret;
- if (!slab_is_available())
- block = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ block = memblock_alloc(k_end - k_start, PAGE_SIZE);
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
- void *va = block ? block + k_cur - k_start : kasan_get_one_page();
+ void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
if (!va)
@@ -129,6 +113,31 @@ static void __init kasan_remap_early_shadow_ro(void)
flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
}
+static void __init kasan_unmap_early_shadow_vmalloc(void)
+{
+ unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START);
+ unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END);
+ unsigned long k_cur;
+ phys_addr_t pa = __pa(kasan_early_shadow_page);
+
+ if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ int ret = kasan_init_shadow_page_tables(k_start, k_end);
+
+ if (ret)
+ panic("kasan: kasan_init_shadow_page_tables() failed");
+ }
+ for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+ pte_t *ptep = pte_offset_kernel(pmd, k_cur);
+
+ if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
+ continue;
+
+ __set_pte_at(&init_mm, k_cur, ptep, __pte(0), 0);
+ }
+ flush_tlb_kernel_range(k_start, k_end);
+}
+
void __init kasan_mmu_init(void)
{
int ret;
@@ -165,34 +174,22 @@ void __init kasan_init(void)
pr_info("KASAN init done\n");
}
-#ifdef CONFIG_MODULES
-void *module_alloc(unsigned long size)
+void __init kasan_late_init(void)
{
- void *base;
-
- base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
- NUMA_NO_NODE, __builtin_return_address(0));
-
- if (!base)
- return NULL;
-
- if (!kasan_init_region(base, size))
- return base;
-
- vfree(base);
-
- return NULL;
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ kasan_unmap_early_shadow_vmalloc();
}
-#endif
#ifdef CONFIG_PPC_BOOK3S_32
u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
static void __init kasan_early_hash_table(void)
{
- modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16);
- modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16);
+ unsigned int hash = IS_ENABLED(CONFIG_VMAP_STACK) ? (unsigned int)early_hash :
+ __pa(early_hash);
+
+ modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
+ modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
Hash = (struct hash_pte *)early_hash;
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index f5535eae637f..ef7b1119b2e2 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -49,6 +49,7 @@
#include <asm/fixmap.h>
#include <asm/swiotlb.h>
#include <asm/rtas.h>
+#include <asm/kasan.h>
#include <mm/mmu_decl.h>
@@ -301,6 +302,9 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
set_max_mapnr(max_pfn);
+
+ kasan_late_init();
+
memblock_free_all();
#ifdef CONFIG_HIGHMEM
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 8e99649c24fc..7097e07a209a 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -181,3 +181,9 @@ void mmu_mark_rodata_ro(void);
static inline void mmu_mark_initmem_nx(void) { }
static inline void mmu_mark_rodata_ro(void) { }
#endif
+
+#ifdef CONFIG_PPC_DEBUG_WX
+void ptdump_check_wx(void);
+#else
+static inline void ptdump_check_wx(void) { }
+#endif
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 96eb8e43f39b..3189308dece4 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -21,33 +21,34 @@ extern int __map_without_ltlbs;
static unsigned long block_mapped_ram;
/*
- * Return PA for this VA if it is in an area mapped with LTLBs.
+ * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
* Otherwise, returns 0
*/
phys_addr_t v_block_mapped(unsigned long va)
{
unsigned long p = PHYS_IMMR_BASE;
- if (__map_without_ltlbs)
- return 0;
if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
return p + va - VIRT_IMMR_BASE;
+ if (__map_without_ltlbs)
+ return 0;
if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
return __pa(va);
return 0;
}
/*
- * Return VA for a given PA mapped with LTLBs or 0 if not mapped
+ * Return VA for a given PA mapped with LTLBs or fixmap
+ * Return 0 if not mapped
*/
unsigned long p_block_mapped(phys_addr_t pa)
{
unsigned long p = PHYS_IMMR_BASE;
- if (__map_without_ltlbs)
- return 0;
if (pa >= p && pa < p + IMMR_SIZE)
return VIRT_IMMR_BASE + pa - p;
+ if (__map_without_ltlbs)
+ return 0;
if (pa < block_mapped_ram)
return (unsigned long)__va(pa);
return 0;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 73b84166d06a..5fb90edd865e 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -218,6 +218,7 @@ void mark_rodata_ro(void)
if (v_block_mapped((unsigned long)_sinittext)) {
mmu_mark_rodata_ro();
+ ptdump_check_wx();
return;
}
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 2f9ddc29c535..206156255247 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -24,6 +24,8 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
+#include <mm/mmu_decl.h>
+
#include "ptdump.h"
/*
@@ -173,10 +175,12 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
+ pte_t pte = __pte(st->current_flags);
+
if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
return;
- if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X)))
+ if (!pte_write(pte) || !pte_exec(pte))
return;
WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",