diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 10:42:40 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 10:42:40 -0800 |
commit | 1d87200446f1d10dfe9672ca8edb027a82612f8c (patch) | |
tree | 45cc71ff8e4d1bcde9b07ce8203277f2b8982941 /arch/x86/entry/entry_64.S | |
parent | 5c4a1c090d8676d8b84e2ac40671602be44afdfc (diff) | |
parent | f01ec4fca8207e31b59a010c3de679c833f3a877 (diff) |
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar:
"The main changes in this cycle were:
- Cross-arch changes to move the linker sections for NOTES and
EXCEPTION_TABLE into the RO_DATA area, where they belong on most
architectures. (Kees Cook)
- Switch the x86 linker fill byte from x90 (NOP) to 0xcc (INT3), to
trap jumps into the middle of those padding areas instead of
sliding execution. (Kees Cook)
- A thorough cleanup of symbol definitions within x86 assembler code.
The rather randomly named macros got streamlined around a
(hopefully) straightforward naming scheme:
SYM_START(name, linkage, align...)
SYM_END(name, sym_type)
SYM_FUNC_START(name)
SYM_FUNC_END(name)
SYM_CODE_START(name)
SYM_CODE_END(name)
SYM_DATA_START(name)
SYM_DATA_END(name)
etc - with about three times of these basic primitives with some
label, local symbol or attribute variant, expressed via postfixes.
No change in functionality intended. (Jiri Slaby)
- Misc other changes, cleanups and smaller fixes"
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits)
x86/entry/64: Remove pointless jump in paranoid_exit
x86/entry/32: Remove unused resume_userspace label
x86/build/vdso: Remove meaningless CFLAGS_REMOVE_*.o
m68k: Convert missed RODATA to RO_DATA
x86/vmlinux: Use INT3 instead of NOP for linker fill bytes
x86/mm: Report actual image regions in /proc/iomem
x86/mm: Report which part of kernel image is freed
x86/mm: Remove redundant address-of operators on addresses
xtensa: Move EXCEPTION_TABLE to RO_DATA segment
powerpc: Move EXCEPTION_TABLE to RO_DATA segment
parisc: Move EXCEPTION_TABLE to RO_DATA segment
microblaze: Move EXCEPTION_TABLE to RO_DATA segment
ia64: Move EXCEPTION_TABLE to RO_DATA segment
h8300: Move EXCEPTION_TABLE to RO_DATA segment
c6x: Move EXCEPTION_TABLE to RO_DATA segment
arm64: Move EXCEPTION_TABLE to RO_DATA segment
alpha: Move EXCEPTION_TABLE to RO_DATA segment
x86/vmlinux: Move EXCEPTION_TABLE to RO_DATA segment
x86/vmlinux: Actually use _etext for the end of the text segment
vmlinux.lds.h: Allow EXCEPTION_TABLE to live in RO_DATA
...
Diffstat (limited to 'arch/x86/entry/entry_64.S')
-rw-r--r-- | arch/x86/entry/entry_64.S | 112 |
1 files changed, 56 insertions, 56 deletions
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index b7c3ea4cb19d..76942cbd95a1 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -15,7 +15,7 @@ * at the top of the kernel process stack. * * Some macro usage: - * - ENTRY/END: Define functions in the symbol table. + * - SYM_FUNC_START/END:Define functions in the symbol table. * - TRACE_IRQ_*: Trace hardirq state for lock debugging. * - idtentry: Define exception entry points. */ @@ -46,11 +46,11 @@ .section .entry.text, "ax" #ifdef CONFIG_PARAVIRT -ENTRY(native_usergs_sysret64) +SYM_CODE_START(native_usergs_sysret64) UNWIND_HINT_EMPTY swapgs sysretq -END(native_usergs_sysret64) +SYM_CODE_END(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ .macro TRACE_IRQS_FLAGS flags:req @@ -142,7 +142,7 @@ END(native_usergs_sysret64) * with them due to bugs in both AMD and Intel CPUs. */ -ENTRY(entry_SYSCALL_64) +SYM_CODE_START(entry_SYSCALL_64) UNWIND_HINT_EMPTY /* * Interrupts are off on entry. @@ -162,7 +162,7 @@ ENTRY(entry_SYSCALL_64) pushq %r11 /* pt_regs->flags */ pushq $__USER_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ -GLOBAL(entry_SYSCALL_64_after_hwframe) +SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) pushq %rax /* pt_regs->orig_ax */ PUSH_AND_CLEAR_REGS rax=$-ENOSYS @@ -273,13 +273,13 @@ syscall_return_via_sysret: popq %rdi popq %rsp USERGS_SYSRET64 -END(entry_SYSCALL_64) +SYM_CODE_END(entry_SYSCALL_64) /* * %rdi: prev task * %rsi: next task */ -ENTRY(__switch_to_asm) +SYM_CODE_START(__switch_to_asm) UNWIND_HINT_FUNC /* * Save callee-saved registers @@ -321,7 +321,7 @@ ENTRY(__switch_to_asm) popq %rbp jmp __switch_to -END(__switch_to_asm) +SYM_CODE_END(__switch_to_asm) /* * A newly forked process directly context switches into this address. @@ -330,7 +330,7 @@ END(__switch_to_asm) * rbx: kernel thread func (NULL for user thread) * r12: kernel thread arg */ -ENTRY(ret_from_fork) +SYM_CODE_START(ret_from_fork) UNWIND_HINT_EMPTY movq %rax, %rdi call schedule_tail /* rdi: 'prev' task parameter */ @@ -357,14 +357,14 @@ ENTRY(ret_from_fork) */ movq $0, RAX(%rsp) jmp 2b -END(ret_from_fork) +SYM_CODE_END(ret_from_fork) /* * Build the entry stubs with some assembler magic. * We pack 1 stub into every 8-byte block. */ .align 8 -ENTRY(irq_entries_start) +SYM_CODE_START(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) UNWIND_HINT_IRET_REGS @@ -373,10 +373,10 @@ ENTRY(irq_entries_start) .align 8 vector=vector+1 .endr -END(irq_entries_start) +SYM_CODE_END(irq_entries_start) .align 8 -ENTRY(spurious_entries_start) +SYM_CODE_START(spurious_entries_start) vector=FIRST_SYSTEM_VECTOR .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) UNWIND_HINT_IRET_REGS @@ -385,7 +385,7 @@ ENTRY(spurious_entries_start) .align 8 vector=vector+1 .endr -END(spurious_entries_start) +SYM_CODE_END(spurious_entries_start) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY @@ -511,7 +511,7 @@ END(spurious_entries_start) * | return address | * +----------------------------------------------------+ */ -ENTRY(interrupt_entry) +SYM_CODE_START(interrupt_entry) UNWIND_HINT_FUNC ASM_CLAC cld @@ -579,7 +579,7 @@ ENTRY(interrupt_entry) TRACE_IRQS_OFF ret -END(interrupt_entry) +SYM_CODE_END(interrupt_entry) _ASM_NOKPROBE(interrupt_entry) @@ -589,18 +589,18 @@ _ASM_NOKPROBE(interrupt_entry) * The interrupt stubs push (~vector+0x80) onto the stack and * then jump to common_spurious/interrupt. */ -common_spurious: +SYM_CODE_START_LOCAL(common_spurious) addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ call interrupt_entry UNWIND_HINT_REGS indirect=1 call smp_spurious_interrupt /* rdi points to pt_regs */ jmp ret_from_intr -END(common_spurious) +SYM_CODE_END(common_spurious) _ASM_NOKPROBE(common_spurious) /* common_interrupt is a hotpath. Align it */ .p2align CONFIG_X86_L1_CACHE_SHIFT -common_interrupt: +SYM_CODE_START_LOCAL(common_interrupt) addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ call interrupt_entry UNWIND_HINT_REGS indirect=1 @@ -616,12 +616,12 @@ ret_from_intr: jz retint_kernel /* Interrupt came from user space */ -GLOBAL(retint_user) +.Lretint_user: mov %rsp,%rdi call prepare_exit_to_usermode TRACE_IRQS_IRETQ -GLOBAL(swapgs_restore_regs_and_return_to_usermode) +SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates user mode. */ testb $3, CS(%rsp) @@ -679,7 +679,7 @@ retint_kernel: */ TRACE_IRQS_IRETQ -GLOBAL(restore_regs_and_return_to_kernel) +SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates kernel mode. */ testb $3, CS(%rsp) @@ -695,7 +695,7 @@ GLOBAL(restore_regs_and_return_to_kernel) */ INTERRUPT_RETURN -ENTRY(native_iret) +SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL) UNWIND_HINT_IRET_REGS /* * Are we returning to a stack segment from the LDT? Note: in @@ -706,8 +706,7 @@ ENTRY(native_iret) jnz native_irq_return_ldt #endif -.global native_irq_return_iret -native_irq_return_iret: +SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) /* * This may fault. Non-paranoid faults on return to userspace are * handled by fixup_bad_iret. These include #SS, #GP, and #NP. @@ -789,14 +788,14 @@ native_irq_return_ldt: */ jmp native_irq_return_iret #endif -END(common_interrupt) +SYM_CODE_END(common_interrupt) _ASM_NOKPROBE(common_interrupt) /* * APIC interrupts. */ .macro apicinterrupt3 num sym do_sym -ENTRY(\sym) +SYM_CODE_START(\sym) UNWIND_HINT_IRET_REGS pushq $~(\num) .Lcommon_\sym: @@ -804,7 +803,7 @@ ENTRY(\sym) UNWIND_HINT_REGS indirect=1 call \do_sym /* rdi points to pt_regs */ jmp ret_from_intr -END(\sym) +SYM_CODE_END(\sym) _ASM_NOKPROBE(\sym) .endm @@ -969,7 +968,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS. */ .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 read_cr2=0 -ENTRY(\sym) +SYM_CODE_START(\sym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 /* Sanity check */ @@ -1019,7 +1018,7 @@ ENTRY(\sym) .endif _ASM_NOKPROBE(\sym) -END(\sym) +SYM_CODE_END(\sym) .endm idtentry divide_error do_divide_error has_error_code=0 @@ -1041,7 +1040,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 * Reload gs selector with exception handling * edi: new selector */ -ENTRY(native_load_gs_index) +SYM_FUNC_START(native_load_gs_index) FRAME_BEGIN pushfq DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) @@ -1055,13 +1054,13 @@ ENTRY(native_load_gs_index) popfq FRAME_END ret -ENDPROC(native_load_gs_index) +SYM_FUNC_END(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, .Lbad_gs) .section .fixup, "ax" /* running with kernelgs */ -.Lbad_gs: +SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) SWAPGS /* switch back to user gs */ .macro ZAP_GS /* This can't be a string because the preprocessor needs to see it. */ @@ -1072,10 +1071,11 @@ EXPORT_SYMBOL(native_load_gs_index) xorl %eax, %eax movl %eax, %gs jmp 2b +SYM_CODE_END(.Lbad_gs) .previous /* Call softirq on interrupt stack. Interrupts are off. */ -ENTRY(do_softirq_own_stack) +SYM_FUNC_START(do_softirq_own_stack) pushq %rbp mov %rsp, %rbp ENTER_IRQ_STACK regs=0 old_rsp=%r11 @@ -1083,7 +1083,7 @@ ENTRY(do_softirq_own_stack) LEAVE_IRQ_STACK regs=0 leaveq ret -ENDPROC(do_softirq_own_stack) +SYM_FUNC_END(do_softirq_own_stack) #ifdef CONFIG_XEN_PV idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 @@ -1101,7 +1101,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 * existing activation in its critical region -- if so, we pop the current * activation and restart the handler using the previous one. */ -ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ +/* do_hypervisor_callback(struct *pt_regs) */ +SYM_CODE_START_LOCAL(xen_do_hypervisor_callback) /* * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will @@ -1119,7 +1120,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ call xen_maybe_preempt_hcall #endif jmp error_exit -END(xen_do_hypervisor_callback) +SYM_CODE_END(xen_do_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. @@ -1134,7 +1135,7 @@ END(xen_do_hypervisor_callback) * We distinguish between categories by comparing each saved segment register * with its current contents: any discrepancy means we in category 1. */ -ENTRY(xen_failsafe_callback) +SYM_CODE_START(xen_failsafe_callback) UNWIND_HINT_EMPTY movl %ds, %ecx cmpw %cx, 0x10(%rsp) @@ -1164,7 +1165,7 @@ ENTRY(xen_failsafe_callback) PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER jmp error_exit -END(xen_failsafe_callback) +SYM_CODE_END(xen_failsafe_callback) #endif /* CONFIG_XEN_PV */ #ifdef CONFIG_XEN_PVHVM @@ -1214,7 +1215,7 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 * Use slow, but surefire "are we in kernel?" check. * Return: ebx=0: need swapgs on exit, ebx=1: otherwise */ -ENTRY(paranoid_entry) +SYM_CODE_START_LOCAL(paranoid_entry) UNWIND_HINT_FUNC cld PUSH_AND_CLEAR_REGS save_ret=1 @@ -1248,7 +1249,7 @@ ENTRY(paranoid_entry) FENCE_SWAPGS_KERNEL_ENTRY ret -END(paranoid_entry) +SYM_CODE_END(paranoid_entry) /* * "Paranoid" exit path from exception stack. This is invoked @@ -1262,7 +1263,7 @@ END(paranoid_entry) * * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ -ENTRY(paranoid_exit) +SYM_CODE_START_LOCAL(paranoid_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG @@ -1272,19 +1273,18 @@ ENTRY(paranoid_exit) /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 SWAPGS_UNSAFE_STACK - jmp .Lparanoid_exit_restore + jmp restore_regs_and_return_to_kernel .Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 -.Lparanoid_exit_restore: jmp restore_regs_and_return_to_kernel -END(paranoid_exit) +SYM_CODE_END(paranoid_exit) /* * Save all registers in pt_regs, and switch GS if needed. */ -ENTRY(error_entry) +SYM_CODE_START_LOCAL(error_entry) UNWIND_HINT_FUNC cld PUSH_AND_CLEAR_REGS save_ret=1 @@ -1364,16 +1364,16 @@ ENTRY(error_entry) call fixup_bad_iret mov %rax, %rsp jmp .Lerror_entry_from_usermode_after_swapgs -END(error_entry) +SYM_CODE_END(error_entry) -ENTRY(error_exit) +SYM_CODE_START_LOCAL(error_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF testb $3, CS(%rsp) jz retint_kernel - jmp retint_user -END(error_exit) + jmp .Lretint_user +SYM_CODE_END(error_exit) /* * Runs on exception stack. Xen PV does not go through this path at all, @@ -1383,7 +1383,7 @@ END(error_exit) * %r14: Used to save/restore the CR3 of the interrupted context * when PAGE_TABLE_ISOLATION is in use. Do not clobber. */ -ENTRY(nmi) +SYM_CODE_START(nmi) UNWIND_HINT_IRET_REGS /* @@ -1718,21 +1718,21 @@ nmi_restore: * about espfix64 on the way back to kernel mode. */ iretq -END(nmi) +SYM_CODE_END(nmi) #ifndef CONFIG_IA32_EMULATION /* * This handles SYSCALL from 32-bit code. There is no way to program * MSRs to fully disable 32-bit SYSCALL. */ -ENTRY(ignore_sysret) +SYM_CODE_START(ignore_sysret) UNWIND_HINT_EMPTY mov $-ENOSYS, %eax sysret -END(ignore_sysret) +SYM_CODE_END(ignore_sysret) #endif -ENTRY(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_do_exit) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1742,4 +1742,4 @@ ENTRY(rewind_stack_do_exit) UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE call do_exit -END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_do_exit) |