summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/entry.S')
-rw-r--r--arch/arm64/kernel/entry.S396
1 files changed, 365 insertions, 31 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 5edde1c2e93e..cccd2788e631 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -28,6 +28,8 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
+#include <asm/memory.h>
+#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
@@ -69,8 +71,21 @@
#define BAD_FIQ 2
#define BAD_ERROR 3
- .macro kernel_ventry label
+ .macro kernel_ventry, el, label, regsize = 64
.align 7
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
+ .if \el == 0
+ .if \regsize == 64
+ mrs x30, tpidrro_el0
+ msr tpidrro_el0, xzr
+ .else
+ mov x30, xzr
+ .endif
+ .endif
+alternative_else_nop_endif
+#endif
+
sub sp, sp, #S_FRAME_SIZE
#ifdef CONFIG_VMAP_STACK
/*
@@ -82,7 +97,7 @@
tbnz x0, #THREAD_SHIFT, 0f
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
- b \label
+ b el\()\el\()_\label
0:
/*
@@ -114,7 +129,12 @@
sub sp, sp, x0
mrs x0, tpidrro_el0
#endif
- b \label
+ b el\()\el\()_\label
+ .endm
+
+ .macro tramp_alias, dst, sym
+ mov_q \dst, TRAMP_VALIAS
+ add \dst, \dst, #(\sym - .entry.tramp.text)
.endm
.macro kernel_entry, el, regsize = 64
@@ -185,7 +205,7 @@ alternative_else_nop_endif
.if \el != 0
mrs x21, ttbr0_el1
- tst x21, #0xffff << 48 // Check for the reserved ASID
+ tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
b.eq 1f // TTBR0 access already disabled
and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
@@ -248,7 +268,7 @@ alternative_else_nop_endif
tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
.endif
- __uaccess_ttbr0_enable x0
+ __uaccess_ttbr0_enable x0, x1
.if \el == 0
/*
@@ -257,7 +277,7 @@ alternative_else_nop_endif
* Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
* corruption).
*/
- post_ttbr0_update_workaround
+ bl post_ttbr_update_workaround
.endif
1:
.if \el != 0
@@ -269,18 +289,20 @@ alternative_else_nop_endif
.if \el == 0
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
+ tst x22, #PSR_MODE32_BIT // native task?
+ b.eq 3f
+
#ifdef CONFIG_ARM64_ERRATUM_845719
alternative_if ARM64_WORKAROUND_845719
- tbz x22, #4, 1f
#ifdef CONFIG_PID_IN_CONTEXTIDR
mrs x29, contextidr_el1
msr contextidr_el1, x29
#else
msr contextidr_el1, xzr
#endif
-1:
alternative_else_nop_endif
#endif
+3:
.endif
msr elr_el1, x21 // set up the return data
@@ -306,7 +328,21 @@ alternative_else_nop_endif
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
* when returning from IPI handler, and when returning to user-space.
*/
- eret // return to kernel
+
+ .if \el == 0
+alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ bne 4f
+ msr far_el1, x30
+ tramp_alias x30, tramp_exit_native
+ br x30
+4:
+ tramp_alias x30, tramp_exit_compat
+ br x30
+#endif
+ .else
+ eret
+ .endif
.endm
.macro irq_stack_entry
@@ -371,31 +407,31 @@ tsk .req x28 // current thread_info
.align 11
ENTRY(vectors)
- kernel_ventry el1_sync_invalid // Synchronous EL1t
- kernel_ventry el1_irq_invalid // IRQ EL1t
- kernel_ventry el1_fiq_invalid // FIQ EL1t
- kernel_ventry el1_error_invalid // Error EL1t
+ kernel_ventry 1, sync_invalid // Synchronous EL1t
+ kernel_ventry 1, irq_invalid // IRQ EL1t
+ kernel_ventry 1, fiq_invalid // FIQ EL1t
+ kernel_ventry 1, error_invalid // Error EL1t
- kernel_ventry el1_sync // Synchronous EL1h
- kernel_ventry el1_irq // IRQ EL1h
- kernel_ventry el1_fiq_invalid // FIQ EL1h
- kernel_ventry el1_error // Error EL1h
+ kernel_ventry 1, sync // Synchronous EL1h
+ kernel_ventry 1, irq // IRQ EL1h
+ kernel_ventry 1, fiq_invalid // FIQ EL1h
+ kernel_ventry 1, error // Error EL1h
- kernel_ventry el0_sync // Synchronous 64-bit EL0
- kernel_ventry el0_irq // IRQ 64-bit EL0
- kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
- kernel_ventry el0_error // Error 64-bit EL0
+ kernel_ventry 0, sync // Synchronous 64-bit EL0
+ kernel_ventry 0, irq // IRQ 64-bit EL0
+ kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
+ kernel_ventry 0, error // Error 64-bit EL0
#ifdef CONFIG_COMPAT
- kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
- kernel_ventry el0_irq_compat // IRQ 32-bit EL0
- kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
- kernel_ventry el0_error_compat // Error 32-bit EL0
+ kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
+ kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
+ kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
+ kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
#else
- kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
- kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
- kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0
- kernel_ventry el0_error_invalid // Error 32-bit EL0
+ kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
+ kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
+ kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
+ kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
#endif
END(vectors)
@@ -689,12 +725,15 @@ el0_ia:
* Instruction abort handling
*/
mrs x26, far_el1
- enable_daif
+ enable_da_f
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
- bl do_mem_abort
+ bl do_el0_ia_bp_hardening
b ret_to_user
el0_fpsimd_acc:
/*
@@ -947,6 +986,124 @@ __ni_sys_trace:
.popsection // .entry.text
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * Exception vectors trampoline.
+ */
+ .pushsection ".entry.tramp.text", "ax"
+
+ .macro tramp_map_kernel, tmp
+ mrs \tmp, ttbr1_el1
+ add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
+ bic \tmp, \tmp, #USER_ASID_FLAG
+ msr ttbr1_el1, \tmp
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
+ /* ASID already in \tmp[63:48] */
+ movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
+ movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
+ /* 2MB boundary containing the vectors, so we nobble the walk cache */
+ movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
+ isb
+ tlbi vae1, \tmp
+ dsb nsh
+alternative_else_nop_endif
+#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
+ .endm
+
+ .macro tramp_unmap_kernel, tmp
+ mrs \tmp, ttbr1_el1
+ sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
+ orr \tmp, \tmp, #USER_ASID_FLAG
+ msr ttbr1_el1, \tmp
+ /*
+ * We avoid running the post_ttbr_update_workaround here because the
+ * user and kernel ASIDs don't have conflicting mappings, so any
+ * "blessing" as described in:
+ *
+ * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
+ *
+ * will not hurt correctness. Whilst this may partially defeat the
+ * point of using split ASIDs in the first place, it avoids
+ * the hit of invalidating the entire I-cache on every return to
+ * userspace.
+ */
+ .endm
+
+ .macro tramp_ventry, regsize = 64
+ .align 7
+1:
+ .if \regsize == 64
+ msr tpidrro_el0, x30 // Restored in kernel_ventry
+ .endif
+ /*
+ * Defend against branch aliasing attacks by pushing a dummy
+ * entry onto the return stack and using a RET instruction to
+ * enter the full-fat kernel vectors.
+ */
+ bl 2f
+ b .
+2:
+ tramp_map_kernel x30
+#ifdef CONFIG_RANDOMIZE_BASE
+ adr x30, tramp_vectors + PAGE_SIZE
+alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
+ ldr x30, [x30]
+#else
+ ldr x30, =vectors
+#endif
+ prfm plil1strm, [x30, #(1b - tramp_vectors)]
+ msr vbar_el1, x30
+ add x30, x30, #(1b - tramp_vectors)
+ isb
+ ret
+ .endm
+
+ .macro tramp_exit, regsize = 64
+ adr x30, tramp_vectors
+ msr vbar_el1, x30
+ tramp_unmap_kernel x30
+ .if \regsize == 64
+ mrs x30, far_el1
+ .endif
+ eret
+ .endm
+
+ .align 11
+ENTRY(tramp_vectors)
+ .space 0x400
+
+ tramp_ventry
+ tramp_ventry
+ tramp_ventry
+ tramp_ventry
+
+ tramp_ventry 32
+ tramp_ventry 32
+ tramp_ventry 32
+ tramp_ventry 32
+END(tramp_vectors)
+
+ENTRY(tramp_exit_native)
+ tramp_exit
+END(tramp_exit_native)
+
+ENTRY(tramp_exit_compat)
+ tramp_exit 32
+END(tramp_exit_compat)
+
+ .ltorg
+ .popsection // .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+ .pushsection ".rodata", "a"
+ .align PAGE_SHIFT
+ .globl __entry_tramp_data_start
+__entry_tramp_data_start:
+ .quad vectors
+ .popsection // .rodata
+#endif /* CONFIG_RANDOMIZE_BASE */
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
/*
* Special system call wrappers.
*/
@@ -1000,3 +1157,180 @@ ENTRY(ret_from_fork)
b ret_to_user
ENDPROC(ret_from_fork)
NOKPROBE(ret_from_fork)
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+
+#include <asm/sdei.h>
+#include <uapi/linux/arm_sdei.h>
+
+.macro sdei_handler_exit exit_mode
+ /* On success, this call never returns... */
+ cmp \exit_mode, #SDEI_EXIT_SMC
+ b.ne 99f
+ smc #0
+ b .
+99: hvc #0
+ b .
+.endm
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * The regular SDEI entry point may have been unmapped along with the rest of
+ * the kernel. This trampoline restores the kernel mapping to make the x1 memory
+ * argument accessible.
+ *
+ * This clobbers x4, __sdei_handler() will restore this from firmware's
+ * copy.
+ */
+.ltorg
+.pushsection ".entry.tramp.text", "ax"
+ENTRY(__sdei_asm_entry_trampoline)
+ mrs x4, ttbr1_el1
+ tbz x4, #USER_ASID_BIT, 1f
+
+ tramp_map_kernel tmp=x4
+ isb
+ mov x4, xzr
+
+ /*
+ * Use reg->interrupted_regs.addr_limit to remember whether to unmap
+ * the kernel on exit.
+ */
+1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ adr x4, tramp_vectors + PAGE_SIZE
+ add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
+ ldr x4, [x4]
+#else
+ ldr x4, =__sdei_asm_handler
+#endif
+ br x4
+ENDPROC(__sdei_asm_entry_trampoline)
+NOKPROBE(__sdei_asm_entry_trampoline)
+
+/*
+ * Make the exit call and restore the original ttbr1_el1
+ *
+ * x0 & x1: setup for the exit API call
+ * x2: exit_mode
+ * x4: struct sdei_registered_event argument from registration time.
+ */
+ENTRY(__sdei_asm_exit_trampoline)
+ ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+ cbnz x4, 1f
+
+ tramp_unmap_kernel tmp=x4
+
+1: sdei_handler_exit exit_mode=x2
+ENDPROC(__sdei_asm_exit_trampoline)
+NOKPROBE(__sdei_asm_exit_trampoline)
+ .ltorg
+.popsection // .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+.pushsection ".rodata", "a"
+__sdei_asm_trampoline_next_handler:
+ .quad __sdei_asm_handler
+.popsection // .rodata
+#endif /* CONFIG_RANDOMIZE_BASE */
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
+/*
+ * Software Delegated Exception entry point.
+ *
+ * x0: Event number
+ * x1: struct sdei_registered_event argument from registration time.
+ * x2: interrupted PC
+ * x3: interrupted PSTATE
+ * x4: maybe clobbered by the trampoline
+ *
+ * Firmware has preserved x0->x17 for us, we must save/restore the rest to
+ * follow SMC-CC. We save (or retrieve) all the registers as the handler may
+ * want them.
+ */
+ENTRY(__sdei_asm_handler)
+ stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
+ stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
+ stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
+ stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
+ stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
+ stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
+ stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
+ stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
+ stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
+ stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
+ stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
+ stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
+ stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
+ stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
+ mov x4, sp
+ stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
+
+ mov x19, x1
+
+#ifdef CONFIG_VMAP_STACK
+ /*
+ * entry.S may have been using sp as a scratch register, find whether
+ * this is a normal or critical event and switch to the appropriate
+ * stack for this CPU.
+ */
+ ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
+ cbnz w4, 1f
+ ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
+ b 2f
+1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
+2: mov x6, #SDEI_STACK_SIZE
+ add x5, x5, x6
+ mov sp, x5
+#endif
+
+ /*
+ * We may have interrupted userspace, or a guest, or exit-from or
+ * return-to either of these. We can't trust sp_el0, restore it.
+ */
+ mrs x28, sp_el0
+ ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
+ msr sp_el0, x0
+
+ /* If we interrupted the kernel point to the previous stack/frame. */
+ and x0, x3, #0xc
+ mrs x1, CurrentEL
+ cmp x0, x1
+ csel x29, x29, xzr, eq // fp, or zero
+ csel x4, x2, xzr, eq // elr, or zero
+
+ stp x29, x4, [sp, #-16]!
+ mov x29, sp
+
+ add x0, x19, #SDEI_EVENT_INTREGS
+ mov x1, x19
+ bl __sdei_handler
+
+ msr sp_el0, x28
+ /* restore regs >x17 that we clobbered */
+ mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
+ ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
+ ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
+ ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
+ mov sp, x1
+
+ mov x1, x0 // address to complete_and_resume
+ /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
+ cmp x0, #1
+ mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
+ mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
+ csel x0, x2, x3, ls
+
+ ldr_l x2, sdei_exit_mode
+
+alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ sdei_handler_exit exit_mode=x2
+alternative_else_nop_endif
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
+ br x5
+#endif
+ENDPROC(__sdei_asm_handler)
+NOKPROBE(__sdei_asm_handler)
+#endif /* CONFIG_ARM_SDE_INTERFACE */