summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-23 09:46:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-23 09:46:16 -0700
commit032c7ed958174957a4d6eac61806f66e1123d815 (patch)
tree0468862c64b825b437181700f2e9ba2870a83b1c /arch/arm64/include
parentf9893351acaecf0a414baf9942b48d5bb5c688c6 (diff)
parent66dd3474702aa98d5844367e1577cdad78ef7c65 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull more arm64 updates from Will Deacon: "A small selection of further arm64 fixes and updates. Most of these are fixes that came in during the merge window, with the exception of the HAVE_MOVE_PMD mremap() speed-up which we discussed back in 2018 and somehow forgot to enable upstream. - Improve performance of Spectre-v2 mitigation on Falkor CPUs (if you're lucky enough to have one) - Select HAVE_MOVE_PMD. This has been shown to improve mremap() performance, which is used heavily by the Android runtime GC, and it seems we forgot to enable this upstream back in 2018. - Ensure linker flags are consistent between LLVM and BFD - Fix stale comment in Spectre mitigation rework - Fix broken copyright header - Fix KASLR randomisation of the linear map - Prevent arm64-specific prctl()s from compat tasks (return -EINVAL)" Link: https://lore.kernel.org/kvmarm/20181108181201.88826-3-joelaf@google.com/ * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: proton-pack: Update comment to reflect new function name arm64: spectre-v2: Favour CPU-specific mitigation at EL2 arm64: link with -z norelro regardless of CONFIG_RELOCATABLE arm64: Fix a broken copyright header in gen_vdso_offsets.sh arm64: mremap speedup - Enable HAVE_MOVE_PMD arm64: mm: use single quantity to represent the PA to VA translation arm64: reject prctl(PR_PAC_RESET_KEYS) on compat tasks
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/memory.h5
-rw-r--r--arch/arm64/include/asm/pgtable.h4
2 files changed, 4 insertions, 5 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 43640d797455..cd61239bae8c 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -169,7 +169,6 @@
extern u64 vabits_actual;
#define PAGE_END (_PAGE_END(vabits_actual))
-extern s64 physvirt_offset;
extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
@@ -245,7 +244,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
*/
#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
-#define __lm_to_phys(addr) (((addr) + physvirt_offset))
+#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
#define __virt_to_phys_nodebug(x) ({ \
@@ -263,7 +262,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
#endif /* CONFIG_DEBUG_VIRTUAL */
-#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset))
+#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index a11bf52e0c38..4ff12a7adcfd 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -24,6 +24,8 @@
#define VMALLOC_START (MODULES_END)
#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
+#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
+
#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
@@ -34,8 +36,6 @@
#include <linux/mm_types.h>
#include <linux/sched.h>
-extern struct page *vmemmap;
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE