diff options
author | Mike Rapoport <rppt@linux.ibm.com> | 2021-07-07 18:07:59 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-08 11:48:20 -0700 |
commit | 6d47c23b16aa78ff93a3050ccf4b1bd1c064b8b3 (patch) | |
tree | 159d26d035facafbfec9539a1c3f8ab9f770589b /arch/arm64 | |
parent | 10cc327883919dbd2d77c858a50698622760639d (diff) |
set_memory: allow querying whether set_direct_map_*() is actually enabled
On arm64, set_direct_map_*() functions may return 0 without actually
changing the linear map. This behaviour can be controlled using kernel
parameters, so we need a way to determine at runtime whether calls to
set_direct_map_invalid_noflush() and set_direct_map_default_noflush() have
any effect.
Extend set_memory API with can_set_direct_map() function that allows
checking if calling set_direct_map_*() will actually change the page
table, replace several occurrences of open coded checks in arm64 with the
new function and provide a generic stub for architectures that always
modify page tables upon calls to set_direct_map APIs.
[arnd@arndb.de: arm64: kfence: fix header inclusion ]
Link: https://lkml.kernel.org/r/20210518072034.31572-4-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christopher Lameter <cl@linux.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Hagen Paul Pfeifer <hagen@jauu.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Palmer Dabbelt <palmerdabbelt@google.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rick Edgecombe <rick.p.edgecombe@intel.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tycho Andersen <tycho@tycho.ws>
Cc: Will Deacon <will@kernel.org>
Cc: kernel test robot <lkp@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/cacheflush.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/kfence.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/set_memory.h | 17 | ||||
-rw-r--r-- | arch/arm64/kernel/machine_kexec.c | 1 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 7 | ||||
-rw-r--r-- | arch/arm64/mm/pageattr.c | 13 |
7 files changed, 31 insertions, 16 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 26889dbfe904..64202010b700 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -3,7 +3,6 @@ generic-y += early_ioremap.h generic-y += mcs_spinlock.h generic-y += qrwlock.h generic-y += qspinlock.h -generic-y += set_memory.h generic-y += user.h generated-y += cpucaps.h diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 543c997eb3b7..5a228e203ef9 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -144,12 +144,6 @@ static __always_inline void icache_inval_all_pou(void) dsb(ish); } -int set_memory_valid(unsigned long addr, int numpages, int enable); - -int set_direct_map_invalid_noflush(struct page *page); -int set_direct_map_default_noflush(struct page *page); -bool kernel_page_present(struct page *page); - #include <asm-generic/cacheflush.h> #endif /* __ASM_CACHEFLUSH_H */ diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index d061176d57ea..aa855c6a0ae6 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,7 +8,7 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H -#include <asm/cacheflush.h> +#include <asm/set_memory.h> static inline bool arch_kfence_init_pool(void) { return true; } diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h new file mode 100644 index 000000000000..0f740b781187 --- /dev/null +++ b/arch/arm64/include/asm/set_memory.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _ASM_ARM64_SET_MEMORY_H +#define _ASM_ARM64_SET_MEMORY_H + +#include <asm-generic/set_memory.h> + +bool can_set_direct_map(void); +#define can_set_direct_map can_set_direct_map + +int set_memory_valid(unsigned long addr, int numpages, int enable); + +int set_direct_map_invalid_noflush(struct page *page); +int set_direct_map_default_noflush(struct page *page); +bool kernel_page_present(struct page *page); + +#endif /* _ASM_ARM64_SET_MEMORY_H */ diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 03ceabe4d912..213d56c14f60 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/kexec.h> #include <linux/page-flags.h> +#include <linux/set_memory.h> #include <linux/smp.h> #include <asm/cacheflush.h> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 595fde9a47dd..d74586508448 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -22,6 +22,7 @@ #include <linux/io.h> #include <linux/mm.h> #include <linux/vmalloc.h> +#include <linux/set_memory.h> #include <asm/barrier.h> #include <asm/cputype.h> @@ -515,8 +516,7 @@ static void __init map_mem(pgd_t *pgdp) */ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); - if (rodata_full || crash_mem_map || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE)) + if (can_set_direct_map() || crash_mem_map || IS_ENABLED(CONFIG_KFENCE)) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -1489,8 +1489,7 @@ int arch_add_memory(int nid, u64 start, u64 size, * KFENCE requires linear map to be mapped at page granularity, so that * it is possible to protect/unprotect single pages in the KFENCE pool. */ - if (rodata_full || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE)) + if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE)) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 92eccaf595c8..a3bacd79507a 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -19,6 +19,11 @@ struct page_change_data { bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); +bool can_set_direct_map(void) +{ + return rodata_full || debug_pagealloc_enabled(); +} + static int change_page_range(pte_t *ptep, unsigned long addr, void *data) { struct page_change_data *cdata = data; @@ -155,7 +160,7 @@ int set_direct_map_invalid_noflush(struct page *page) .clear_mask = __pgprot(PTE_VALID), }; - if (!debug_pagealloc_enabled() && !rodata_full) + if (!can_set_direct_map()) return 0; return apply_to_page_range(&init_mm, @@ -170,7 +175,7 @@ int set_direct_map_default_noflush(struct page *page) .clear_mask = __pgprot(PTE_RDONLY), }; - if (!debug_pagealloc_enabled() && !rodata_full) + if (!can_set_direct_map()) return 0; return apply_to_page_range(&init_mm, @@ -181,7 +186,7 @@ int set_direct_map_default_noflush(struct page *page) #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { - if (!debug_pagealloc_enabled() && !rodata_full) + if (!can_set_direct_map()) return; set_memory_valid((unsigned long)page_address(page), numpages, enable); @@ -206,7 +211,7 @@ bool kernel_page_present(struct page *page) pte_t *ptep; unsigned long addr = (unsigned long)page_address(page); - if (!debug_pagealloc_enabled() && !rodata_full) + if (!can_set_direct_map()) return true; pgdp = pgd_offset_k(addr); |