From 30a2441cae7b149ff484a697bf9eb8de53240a4f Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 11:22:13 +0200 Subject: x86/asm: Make more symbols local During the assembly cleanup patchset review, I found more symbols which are used only locally. So make them really local by prepending ".L" to them. Namely: - wakeup_idt is used only in realmode/rm/wakeup_asm.S. - in_pm32 is used only in boot/pmjump.S. - retint_user is used only in entry/entry_64.S, perhaps since commit 2ec67971facc ("x86/entry/64/compat: Remove most of the fast system call machinery"), where entry_64_compat's caller was removed. Drop GLOBAL from all of them too. I do not see more candidates in the series. Signed-off-by: Jiri Slaby Acked-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: bp@alien8.de Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20191011092213.31470-1-jslaby@suse.cz Signed-off-by: Ingo Molnar --- arch/x86/entry/entry_64.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index b7c3ea4cb19d..86cbb22208c8 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -616,7 +616,7 @@ ret_from_intr: jz retint_kernel /* Interrupt came from user space */ -GLOBAL(retint_user) +.Lretint_user: mov %rsp,%rdi call prepare_exit_to_usermode TRACE_IRQS_IRETQ @@ -1372,7 +1372,7 @@ ENTRY(error_exit) TRACE_IRQS_OFF testb $3, CS(%rsp) jz retint_kernel - jmp retint_user + jmp .Lretint_user END(error_exit) /* -- cgit v1.2.3 From 76dc6d600166de2c0482db95318534e6dc284212 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:50:44 +0200 Subject: x86/asm/entry: Annotate THUNKs Place SYM_*_START_NOALIGN and SYM_*_END around the THUNK macro body. Preserve @function by FUNC (64bit) and CODE (32bit). Given it was not marked as aligned, use NOALIGN. The result: Value Size Type Bind Vis Ndx Name 0000 28 FUNC GLOBAL DEFAULT 1 trace_hardirqs_on_thunk 001c 28 FUNC GLOBAL DEFAULT 1 trace_hardirqs_off_thunk 0038 24 FUNC GLOBAL DEFAULT 1 lockdep_sys_exit_thunk 0050 24 FUNC GLOBAL DEFAULT 1 ___preempt_schedule 0068 24 FUNC GLOBAL DEFAULT 1 ___preempt_schedule_notra The annotation of .L_restore does not generate anything (at the moment). Here, it just serves documentation purposes (as opening and closing brackets of functions). Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191011115108.12392-5-jslaby@suse.cz --- arch/x86/entry/thunk_32.S | 4 ++-- arch/x86/entry/thunk_64.S | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S index 2713490611a3..e010d4ae11f1 100644 --- a/arch/x86/entry/thunk_32.S +++ b/arch/x86/entry/thunk_32.S @@ -10,8 +10,7 @@ /* put return address in eax (arg1) */ .macro THUNK name, func, put_ret_addr_in_eax=0 - .globl \name -\name: +SYM_CODE_START_NOALIGN(\name) pushl %eax pushl %ecx pushl %edx @@ -27,6 +26,7 @@ popl %eax ret _ASM_NOKPROBE(\name) +SYM_CODE_END(\name) .endm #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S index ea5c4167086c..c5c3b6e86e62 100644 --- a/arch/x86/entry/thunk_64.S +++ b/arch/x86/entry/thunk_64.S @@ -12,7 +12,7 @@ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func, put_ret_addr_in_rdi=0 - ENTRY(\name) +SYM_FUNC_START_NOALIGN(\name) pushq %rbp movq %rsp, %rbp @@ -33,7 +33,7 @@ call \func jmp .L_restore - ENDPROC(\name) +SYM_FUNC_END(\name) _ASM_NOKPROBE(\name) .endm @@ -56,7 +56,7 @@ #if defined(CONFIG_TRACE_IRQFLAGS) \ || defined(CONFIG_DEBUG_LOCK_ALLOC) \ || defined(CONFIG_PREEMPTION) -.L_restore: +SYM_CODE_START_LOCAL_NOALIGN(.L_restore) popq %r11 popq %r10 popq %r9 @@ -69,4 +69,5 @@ popq %rbp ret _ASM_NOKPROBE(.L_restore) +SYM_CODE_END(.L_restore) #endif -- cgit v1.2.3 From ef77e6880be8fa3033544109e29417c8710fd3f2 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:50:45 +0200 Subject: x86/asm: Annotate local pseudo-functions Use the newly added SYM_CODE_START_LOCAL* to annotate beginnings of all pseudo-functions (those ending with END until now) which do not have ".globl" annotation. This is needed to balance END for tools that generate debuginfo. Note that ENDs are switched to SYM_CODE_END too so that everybody can see the pairing. C-like functions (which handle frame ptr etc.) are not annotated here, hence SYM_CODE_* macros are used here, not SYM_FUNC_*. Note that the 32bit version of early_idt_handler_common already had ENDPROC -- switch that to SYM_CODE_END for the same reason as above (and to be the same as 64bit). While early_idt_handler_common is LOCAL, it's name is not prepended with ".L" as it happens to appear in call traces. bad_get_user*, and bad_put_user are now aligned, as they are separate functions. They do not mind to be aligned -- no need to be compact there. early_idt_handler_common is aligned now too, as it is after early_idt_handler_array, so as well no need to be compact there. verify_cpu is self-standing and included in other .S files, so align it too. The others have alignment preserved to what it used to be (using the _NOALIGN variant of macros). Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Alexios Zavras Cc: Allison Randal Cc: Andy Lutomirski Cc: Cao jin Cc: Enrico Weigelt Cc: Greg Kroah-Hartman Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: Juergen Gross Cc: linux-arch@vger.kernel.org Cc: Maran Wilson Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191011115108.12392-6-jslaby@suse.cz --- arch/x86/entry/entry_64.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 86cbb22208c8..db43526cecfa 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1061,7 +1061,7 @@ EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, .Lbad_gs) .section .fixup, "ax" /* running with kernelgs */ -.Lbad_gs: +SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) SWAPGS /* switch back to user gs */ .macro ZAP_GS /* This can't be a string because the preprocessor needs to see it. */ @@ -1072,6 +1072,7 @@ EXPORT_SYMBOL(native_load_gs_index) xorl %eax, %eax movl %eax, %gs jmp 2b +SYM_CODE_END(.Lbad_gs) .previous /* Call softirq on interrupt stack. Interrupts are off. */ -- cgit v1.2.3 From cc66936e504a5b91dda52fda90203e174a7a71aa Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:50:50 +0200 Subject: x86/asm/entry: Annotate interrupt symbols properly * annotate functions properly by SYM_CODE_START, SYM_CODE_START_LOCAL* and SYM_CODE_END -- these are not C-like functions, so they have to be annotated using CODE. * use SYM_INNER_LABEL* for labels being in the middle of other functions This prevents nested labels annotations. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191011115108.12392-11-jslaby@suse.cz --- arch/x86/entry/entry_32.S | 28 ++++++++++++++-------------- arch/x86/entry/entry_64.S | 13 ++++++------- 2 files changed, 20 insertions(+), 21 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index f83ca5aa8b77..f37ff583cecb 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -807,8 +807,7 @@ END(ret_from_fork) */ # userspace resumption stub bypassing syscall exit tracing - ALIGN -ret_from_exception: +SYM_CODE_START_LOCAL(ret_from_exception) preempt_stop(CLBR_ANY) ret_from_intr: #ifdef CONFIG_VM86 @@ -825,13 +824,13 @@ ret_from_intr: cmpl $USER_RPL, %eax jb restore_all_kernel # not returning to v8086 or userspace -ENTRY(resume_userspace) +SYM_INNER_LABEL_ALIGN(resume_userspace, SYM_L_LOCAL) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movl %esp, %eax call prepare_exit_to_usermode jmp restore_all -END(ret_from_exception) +SYM_CODE_END(ret_from_exception) GLOBAL(__begin_SYSENTER_singlestep_region) /* @@ -1100,7 +1099,7 @@ restore_all_kernel: jmp .Lirq_return .section .fixup, "ax" -ENTRY(iret_exc ) +SYM_CODE_START(iret_exc) pushl $0 # no error code pushl $do_iret_error @@ -1117,6 +1116,7 @@ ENTRY(iret_exc ) #endif jmp common_exception +SYM_CODE_END(iret_exc) .previous _ASM_EXTABLE(.Lirq_return, iret_exc) ENDPROC(entry_INT80_32) @@ -1182,7 +1182,7 @@ ENTRY(spurious_entries_start) .endr END(spurious_entries_start) -common_spurious: +SYM_CODE_START_LOCAL(common_spurious) ASM_CLAC addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ SAVE_ALL switch_stacks=1 @@ -1191,7 +1191,7 @@ common_spurious: movl %esp, %eax call smp_spurious_interrupt jmp ret_from_intr -ENDPROC(common_spurious) +SYM_CODE_END(common_spurious) #endif /* @@ -1199,7 +1199,7 @@ ENDPROC(common_spurious) * so IRQ-flags tracing has to follow that: */ .p2align CONFIG_X86_L1_CACHE_SHIFT -common_interrupt: +SYM_CODE_START_LOCAL(common_interrupt) ASM_CLAC addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ @@ -1209,7 +1209,7 @@ common_interrupt: movl %esp, %eax call do_IRQ jmp ret_from_intr -ENDPROC(common_interrupt) +SYM_CODE_END(common_interrupt) #define BUILD_INTERRUPT3(name, nr, fn) \ ENTRY(name) \ @@ -1361,7 +1361,7 @@ ENTRY(xen_hypervisor_callback) jmp xen_iret_crit_fixup -ENTRY(xen_do_upcall) +SYM_INNER_LABEL_ALIGN(xen_do_upcall, SYM_L_GLOBAL) 1: mov %esp, %eax call xen_evtchn_do_upcall #ifndef CONFIG_PREEMPTION @@ -1447,7 +1447,7 @@ ENTRY(page_fault) jmp common_exception_read_cr2 END(page_fault) -common_exception_read_cr2: +SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2) /* the function address is in %gs's slot on the stack */ SAVE_ALL switch_stacks=1 skip_gs=1 @@ -1470,9 +1470,9 @@ common_exception_read_cr2: movl %esp, %eax # pt_regs pointer CALL_NOSPEC %edi jmp ret_from_exception -END(common_exception_read_cr2) +SYM_CODE_END(common_exception_read_cr2) -common_exception: +SYM_CODE_START_LOCAL_NOALIGN(common_exception) /* the function address is in %gs's slot on the stack */ SAVE_ALL switch_stacks=1 skip_gs=1 ENCODE_FRAME_POINTER @@ -1492,7 +1492,7 @@ common_exception: movl %esp, %eax # pt_regs pointer CALL_NOSPEC %edi jmp ret_from_exception -END(common_exception) +SYM_CODE_END(common_exception) ENTRY(debug) /* diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index db43526cecfa..607e25f54ff4 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -589,18 +589,18 @@ _ASM_NOKPROBE(interrupt_entry) * The interrupt stubs push (~vector+0x80) onto the stack and * then jump to common_spurious/interrupt. */ -common_spurious: +SYM_CODE_START_LOCAL(common_spurious) addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ call interrupt_entry UNWIND_HINT_REGS indirect=1 call smp_spurious_interrupt /* rdi points to pt_regs */ jmp ret_from_intr -END(common_spurious) +SYM_CODE_END(common_spurious) _ASM_NOKPROBE(common_spurious) /* common_interrupt is a hotpath. Align it */ .p2align CONFIG_X86_L1_CACHE_SHIFT -common_interrupt: +SYM_CODE_START_LOCAL(common_interrupt) addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ call interrupt_entry UNWIND_HINT_REGS indirect=1 @@ -695,7 +695,7 @@ GLOBAL(restore_regs_and_return_to_kernel) */ INTERRUPT_RETURN -ENTRY(native_iret) +SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL) UNWIND_HINT_IRET_REGS /* * Are we returning to a stack segment from the LDT? Note: in @@ -706,8 +706,7 @@ ENTRY(native_iret) jnz native_irq_return_ldt #endif -.global native_irq_return_iret -native_irq_return_iret: +SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) /* * This may fault. Non-paranoid faults on return to userspace are * handled by fixup_bad_iret. These include #SS, #GP, and #NP. @@ -789,7 +788,7 @@ native_irq_return_ldt: */ jmp native_irq_return_iret #endif -END(common_interrupt) +SYM_CODE_END(common_interrupt) _ASM_NOKPROBE(common_interrupt) /* -- cgit v1.2.3 From 26ba4e5738a544aa17c462bfbe580e74071c810b Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:50:57 +0200 Subject: x86/asm: Use SYM_INNER_LABEL instead of GLOBAL The GLOBAL macro had several meanings and is going away. Convert all the inner function labels marked with GLOBAL to use SYM_INNER_LABEL instead. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jiri Kosina Cc: Josh Poimboeuf Cc: linux-arch@vger.kernel.org Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: "Steven Rostedt (VMware)" Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191011115108.12392-18-jslaby@suse.cz --- arch/x86/entry/entry_64.S | 6 +++--- arch/x86/entry/entry_64_compat.S | 4 ++-- arch/x86/entry/vdso/vdso32/system_call.S | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 607e25f54ff4..57d246048ac6 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -162,7 +162,7 @@ ENTRY(entry_SYSCALL_64) pushq %r11 /* pt_regs->flags */ pushq $__USER_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ -GLOBAL(entry_SYSCALL_64_after_hwframe) +SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) pushq %rax /* pt_regs->orig_ax */ PUSH_AND_CLEAR_REGS rax=$-ENOSYS @@ -621,7 +621,7 @@ ret_from_intr: call prepare_exit_to_usermode TRACE_IRQS_IRETQ -GLOBAL(swapgs_restore_regs_and_return_to_usermode) +SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates user mode. */ testb $3, CS(%rsp) @@ -679,7 +679,7 @@ retint_kernel: */ TRACE_IRQS_IRETQ -GLOBAL(restore_regs_and_return_to_kernel) +SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates kernel mode. */ testb $3, CS(%rsp) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 39913770a44d..5c7e71669239 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -146,7 +146,7 @@ ENTRY(entry_SYSENTER_compat) pushq $X86_EFLAGS_FIXED popfq jmp .Lsysenter_flags_fixed -GLOBAL(__end_entry_SYSENTER_compat) +SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL) ENDPROC(entry_SYSENTER_compat) /* @@ -215,7 +215,7 @@ ENTRY(entry_SYSCALL_compat) pushq %r11 /* pt_regs->flags */ pushq $__USER32_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ -GLOBAL(entry_SYSCALL_compat_after_hwframe) +SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL) movl %eax, %eax /* discard orig_ax high bits */ pushq %rax /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S index 263d7433dea8..de1fff7188aa 100644 --- a/arch/x86/entry/vdso/vdso32/system_call.S +++ b/arch/x86/entry/vdso/vdso32/system_call.S @@ -62,7 +62,7 @@ __kernel_vsyscall: /* Enter using int $0x80 */ int $0x80 -GLOBAL(int80_landing_pad) +SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL) /* * Restore EDX and ECX in case they were clobbered. EBP is not -- cgit v1.2.3 From b4edca150106a68d05eaf823d665a355ff19e28b Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:50:59 +0200 Subject: x86/asm: Remove the last GLOBAL user and remove the macro Convert the remaining 32bit users and remove the GLOBAL macro finally. In particular, this means to use SYM_ENTRY for the singlestepping hack region. Exclude the global definition of GLOBAL from x86 too. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Andrew Morton Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Cc: Mark Rutland Cc: "Rafael J. Wysocki" Cc: Thomas Gleixner Cc: Will Deacon Cc: x86-ml Link: https://lkml.kernel.org/r/20191011115108.12392-20-jslaby@suse.cz --- arch/x86/entry/entry_32.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index f37ff583cecb..4900a6a5e125 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -832,7 +832,7 @@ SYM_INNER_LABEL_ALIGN(resume_userspace, SYM_L_LOCAL) jmp restore_all SYM_CODE_END(ret_from_exception) -GLOBAL(__begin_SYSENTER_singlestep_region) +SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) /* * All code from here through __end_SYSENTER_singlestep_region is subject * to being single-stepped if a user program sets TF and executes SYSENTER. @@ -1011,7 +1011,7 @@ ENTRY(entry_SYSENTER_32) pushl $X86_EFLAGS_FIXED popfl jmp .Lsysenter_flags_fixed -GLOBAL(__end_SYSENTER_singlestep_region) +SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) ENDPROC(entry_SYSENTER_32) /* -- cgit v1.2.3 From ef1e03152cb027d5925646d4d1772ced7595292f Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:51:00 +0200 Subject: x86/asm: Make some functions local There are a couple of assembly functions which are invoked only locally in the file they are defined. In C, they are marked "static". In assembly, annotate them using SYM_{FUNC,CODE}_START_LOCAL (and switch their ENDPROC to SYM_{FUNC,CODE}_END too). Whether FUNC or CODE is used, depends on whether ENDPROC or END was used for a particular function before. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Ard Biesheuvel Cc: Boris Ostrovsky Cc: Darren Hart Cc: Greg Kroah-Hartman Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Juergen Gross Cc: linux-arch@vger.kernel.org Cc: linux-efi Cc: linux-efi@vger.kernel.org Cc: Matt Fleming Cc: Peter Zijlstra Cc: platform-driver-x86@vger.kernel.org Cc: Stefano Stabellini Cc: Thomas Gleixner Cc: x86-ml Cc: xen-devel@lists.xenproject.org Link: https://lkml.kernel.org/r/20191011115108.12392-21-jslaby@suse.cz --- arch/x86/entry/entry_64.S | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 57d246048ac6..1568da63bf16 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1101,7 +1101,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 * existing activation in its critical region -- if so, we pop the current * activation and restart the handler using the previous one. */ -ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ +/* do_hypervisor_callback(struct *pt_regs) */ +SYM_CODE_START_LOCAL(xen_do_hypervisor_callback) /* * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will @@ -1119,7 +1120,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ call xen_maybe_preempt_hcall #endif jmp error_exit -END(xen_do_hypervisor_callback) +SYM_CODE_END(xen_do_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. @@ -1214,7 +1215,7 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 * Use slow, but surefire "are we in kernel?" check. * Return: ebx=0: need swapgs on exit, ebx=1: otherwise */ -ENTRY(paranoid_entry) +SYM_CODE_START_LOCAL(paranoid_entry) UNWIND_HINT_FUNC cld PUSH_AND_CLEAR_REGS save_ret=1 @@ -1248,7 +1249,7 @@ ENTRY(paranoid_entry) FENCE_SWAPGS_KERNEL_ENTRY ret -END(paranoid_entry) +SYM_CODE_END(paranoid_entry) /* * "Paranoid" exit path from exception stack. This is invoked @@ -1262,7 +1263,7 @@ END(paranoid_entry) * * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ -ENTRY(paranoid_exit) +SYM_CODE_START_LOCAL(paranoid_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG @@ -1279,12 +1280,12 @@ ENTRY(paranoid_exit) RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 .Lparanoid_exit_restore: jmp restore_regs_and_return_to_kernel -END(paranoid_exit) +SYM_CODE_END(paranoid_exit) /* * Save all registers in pt_regs, and switch GS if needed. */ -ENTRY(error_entry) +SYM_CODE_START_LOCAL(error_entry) UNWIND_HINT_FUNC cld PUSH_AND_CLEAR_REGS save_ret=1 @@ -1364,16 +1365,16 @@ ENTRY(error_entry) call fixup_bad_iret mov %rax, %rsp jmp .Lerror_entry_from_usermode_after_swapgs -END(error_entry) +SYM_CODE_END(error_entry) -ENTRY(error_exit) +SYM_CODE_START_LOCAL(error_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF testb $3, CS(%rsp) jz retint_kernel jmp .Lretint_user -END(error_exit) +SYM_CODE_END(error_exit) /* * Runs on exception stack. Xen PV does not go through this path at all, -- cgit v1.2.3 From bc7b11c04ee9c9b0451ebf85bf64e0de69fdbb17 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:51:03 +0200 Subject: x86/asm/64: Change all ENTRY+END to SYM_CODE_* Change all assembly code which is marked using END (and not ENDPROC). Switch all these to the appropriate new annotation SYM_CODE_START and SYM_CODE_END. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Reviewed-by: Boris Ostrovsky [xen bits] Cc: Andy Lutomirski Cc: Cao jin Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jiri Kosina Cc: Josh Poimboeuf Cc: Juergen Gross Cc: linux-arch@vger.kernel.org Cc: Maran Wilson Cc: Peter Zijlstra Cc: Stefano Stabellini Cc: "Steven Rostedt (VMware)" Cc: Thomas Gleixner Cc: x86-ml Cc: xen-devel@lists.xenproject.org Link: https://lkml.kernel.org/r/20191011115108.12392-24-jslaby@suse.cz --- arch/x86/entry/entry_64.S | 52 ++++++++++++++++++++-------------------- arch/x86/entry/entry_64_compat.S | 8 +++---- 2 files changed, 30 insertions(+), 30 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 1568da63bf16..13e4fe378e5a 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -46,11 +46,11 @@ .section .entry.text, "ax" #ifdef CONFIG_PARAVIRT -ENTRY(native_usergs_sysret64) +SYM_CODE_START(native_usergs_sysret64) UNWIND_HINT_EMPTY swapgs sysretq -END(native_usergs_sysret64) +SYM_CODE_END(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ .macro TRACE_IRQS_FLAGS flags:req @@ -142,7 +142,7 @@ END(native_usergs_sysret64) * with them due to bugs in both AMD and Intel CPUs. */ -ENTRY(entry_SYSCALL_64) +SYM_CODE_START(entry_SYSCALL_64) UNWIND_HINT_EMPTY /* * Interrupts are off on entry. @@ -273,13 +273,13 @@ syscall_return_via_sysret: popq %rdi popq %rsp USERGS_SYSRET64 -END(entry_SYSCALL_64) +SYM_CODE_END(entry_SYSCALL_64) /* * %rdi: prev task * %rsi: next task */ -ENTRY(__switch_to_asm) +SYM_CODE_START(__switch_to_asm) UNWIND_HINT_FUNC /* * Save callee-saved registers @@ -321,7 +321,7 @@ ENTRY(__switch_to_asm) popq %rbp jmp __switch_to -END(__switch_to_asm) +SYM_CODE_END(__switch_to_asm) /* * A newly forked process directly context switches into this address. @@ -330,7 +330,7 @@ END(__switch_to_asm) * rbx: kernel thread func (NULL for user thread) * r12: kernel thread arg */ -ENTRY(ret_from_fork) +SYM_CODE_START(ret_from_fork) UNWIND_HINT_EMPTY movq %rax, %rdi call schedule_tail /* rdi: 'prev' task parameter */ @@ -357,14 +357,14 @@ ENTRY(ret_from_fork) */ movq $0, RAX(%rsp) jmp 2b -END(ret_from_fork) +SYM_CODE_END(ret_from_fork) /* * Build the entry stubs with some assembler magic. * We pack 1 stub into every 8-byte block. */ .align 8 -ENTRY(irq_entries_start) +SYM_CODE_START(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) UNWIND_HINT_IRET_REGS @@ -373,10 +373,10 @@ ENTRY(irq_entries_start) .align 8 vector=vector+1 .endr -END(irq_entries_start) +SYM_CODE_END(irq_entries_start) .align 8 -ENTRY(spurious_entries_start) +SYM_CODE_START(spurious_entries_start) vector=FIRST_SYSTEM_VECTOR .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) UNWIND_HINT_IRET_REGS @@ -385,7 +385,7 @@ ENTRY(spurious_entries_start) .align 8 vector=vector+1 .endr -END(spurious_entries_start) +SYM_CODE_END(spurious_entries_start) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY @@ -511,7 +511,7 @@ END(spurious_entries_start) * | return address | * +----------------------------------------------------+ */ -ENTRY(interrupt_entry) +SYM_CODE_START(interrupt_entry) UNWIND_HINT_FUNC ASM_CLAC cld @@ -579,7 +579,7 @@ ENTRY(interrupt_entry) TRACE_IRQS_OFF ret -END(interrupt_entry) +SYM_CODE_END(interrupt_entry) _ASM_NOKPROBE(interrupt_entry) @@ -795,7 +795,7 @@ _ASM_NOKPROBE(common_interrupt) * APIC interrupts. */ .macro apicinterrupt3 num sym do_sym -ENTRY(\sym) +SYM_CODE_START(\sym) UNWIND_HINT_IRET_REGS pushq $~(\num) .Lcommon_\sym: @@ -803,7 +803,7 @@ ENTRY(\sym) UNWIND_HINT_REGS indirect=1 call \do_sym /* rdi points to pt_regs */ jmp ret_from_intr -END(\sym) +SYM_CODE_END(\sym) _ASM_NOKPROBE(\sym) .endm @@ -968,7 +968,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS. */ .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 read_cr2=0 -ENTRY(\sym) +SYM_CODE_START(\sym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 /* Sanity check */ @@ -1018,7 +1018,7 @@ ENTRY(\sym) .endif _ASM_NOKPROBE(\sym) -END(\sym) +SYM_CODE_END(\sym) .endm idtentry divide_error do_divide_error has_error_code=0 @@ -1135,7 +1135,7 @@ SYM_CODE_END(xen_do_hypervisor_callback) * We distinguish between categories by comparing each saved segment register * with its current contents: any discrepancy means we in category 1. */ -ENTRY(xen_failsafe_callback) +SYM_CODE_START(xen_failsafe_callback) UNWIND_HINT_EMPTY movl %ds, %ecx cmpw %cx, 0x10(%rsp) @@ -1165,7 +1165,7 @@ ENTRY(xen_failsafe_callback) PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER jmp error_exit -END(xen_failsafe_callback) +SYM_CODE_END(xen_failsafe_callback) #endif /* CONFIG_XEN_PV */ #ifdef CONFIG_XEN_PVHVM @@ -1384,7 +1384,7 @@ SYM_CODE_END(error_exit) * %r14: Used to save/restore the CR3 of the interrupted context * when PAGE_TABLE_ISOLATION is in use. Do not clobber. */ -ENTRY(nmi) +SYM_CODE_START(nmi) UNWIND_HINT_IRET_REGS /* @@ -1719,21 +1719,21 @@ nmi_restore: * about espfix64 on the way back to kernel mode. */ iretq -END(nmi) +SYM_CODE_END(nmi) #ifndef CONFIG_IA32_EMULATION /* * This handles SYSCALL from 32-bit code. There is no way to program * MSRs to fully disable 32-bit SYSCALL. */ -ENTRY(ignore_sysret) +SYM_CODE_START(ignore_sysret) UNWIND_HINT_EMPTY mov $-ENOSYS, %eax sysret -END(ignore_sysret) +SYM_CODE_END(ignore_sysret) #endif -ENTRY(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_do_exit) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1743,4 +1743,4 @@ ENTRY(rewind_stack_do_exit) UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE call do_exit -END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_do_exit) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 5c7e71669239..da296435676e 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -196,7 +196,7 @@ ENDPROC(entry_SYSENTER_compat) * esp user stack * 0(%esp) arg6 */ -ENTRY(entry_SYSCALL_compat) +SYM_CODE_START(entry_SYSCALL_compat) /* Interrupts are off on entry. */ swapgs @@ -311,7 +311,7 @@ sysret32_from_system_call: xorl %r10d, %r10d swapgs sysretl -END(entry_SYSCALL_compat) +SYM_CODE_END(entry_SYSCALL_compat) /* * 32-bit legacy system call entry. @@ -339,7 +339,7 @@ END(entry_SYSCALL_compat) * edi arg5 * ebp arg6 */ -ENTRY(entry_INT80_compat) +SYM_CODE_START(entry_INT80_compat) /* * Interrupts are off on entry. */ @@ -416,4 +416,4 @@ ENTRY(entry_INT80_compat) /* Go back to user mode. */ TRACE_IRQS_ON jmp swapgs_restore_regs_and_return_to_usermode -END(entry_INT80_compat) +SYM_CODE_END(entry_INT80_compat) -- cgit v1.2.3 From 6dcc5627f6aec4cb1d1494d06a48d8061db06a04 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:51:04 +0200 Subject: x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_* MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are all functions which are invoked from elsewhere, so annotate them as global using the new SYM_FUNC_START and their ENDPROC's by SYM_FUNC_END. Make sure ENTRY/ENDPROC is not defined on X86_64, given these were the last users. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Reviewed-by: Rafael J. Wysocki [hibernate] Reviewed-by: Boris Ostrovsky [xen bits] Acked-by: Herbert Xu [crypto] Cc: Allison Randal Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Ard Biesheuvel Cc: Armijn Hemel Cc: Cao jin Cc: Darren Hart Cc: Dave Hansen Cc: "David S. Miller" Cc: Enrico Weigelt Cc: Greg Kroah-Hartman Cc: Herbert Xu Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jim Mattson Cc: Joerg Roedel Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kate Stewart Cc: "Kirill A. Shutemov" Cc: kvm ML Cc: Len Brown Cc: linux-arch@vger.kernel.org Cc: linux-crypto@vger.kernel.org Cc: linux-efi Cc: linux-efi@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: Mark Rutland Cc: Matt Fleming Cc: Paolo Bonzini Cc: Pavel Machek Cc: Peter Zijlstra Cc: platform-driver-x86@vger.kernel.org Cc: "Radim Krčmář" Cc: Sean Christopherson Cc: Stefano Stabellini Cc: "Steven Rostedt (VMware)" Cc: Thomas Gleixner Cc: Vitaly Kuznetsov Cc: Wanpeng Li Cc: Wei Huang Cc: x86-ml Cc: xen-devel@lists.xenproject.org Cc: Xiaoyao Li Link: https://lkml.kernel.org/r/20191011115108.12392-25-jslaby@suse.cz --- arch/x86/entry/entry_64.S | 10 +++++----- arch/x86/entry/entry_64_compat.S | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 13e4fe378e5a..d58c01239457 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -15,7 +15,7 @@ * at the top of the kernel process stack. * * Some macro usage: - * - ENTRY/END: Define functions in the symbol table. + * - SYM_FUNC_START/END:Define functions in the symbol table. * - TRACE_IRQ_*: Trace hardirq state for lock debugging. * - idtentry: Define exception entry points. */ @@ -1040,7 +1040,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 * Reload gs selector with exception handling * edi: new selector */ -ENTRY(native_load_gs_index) +SYM_FUNC_START(native_load_gs_index) FRAME_BEGIN pushfq DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) @@ -1054,7 +1054,7 @@ ENTRY(native_load_gs_index) popfq FRAME_END ret -ENDPROC(native_load_gs_index) +SYM_FUNC_END(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, .Lbad_gs) @@ -1075,7 +1075,7 @@ SYM_CODE_END(.Lbad_gs) .previous /* Call softirq on interrupt stack. Interrupts are off. */ -ENTRY(do_softirq_own_stack) +SYM_FUNC_START(do_softirq_own_stack) pushq %rbp mov %rsp, %rbp ENTER_IRQ_STACK regs=0 old_rsp=%r11 @@ -1083,7 +1083,7 @@ ENTRY(do_softirq_own_stack) LEAVE_IRQ_STACK regs=0 leaveq ret -ENDPROC(do_softirq_own_stack) +SYM_FUNC_END(do_softirq_own_stack) #ifdef CONFIG_XEN_PV idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index da296435676e..f1d3ccae5dd5 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -46,7 +46,7 @@ * ebp user stack * 0(%ebp) arg6 */ -ENTRY(entry_SYSENTER_compat) +SYM_FUNC_START(entry_SYSENTER_compat) /* Interrupts are off on entry. */ SWAPGS @@ -147,7 +147,7 @@ ENTRY(entry_SYSENTER_compat) popfq jmp .Lsysenter_flags_fixed SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL) -ENDPROC(entry_SYSENTER_compat) +SYM_FUNC_END(entry_SYSENTER_compat) /* * 32-bit SYSCALL entry. -- cgit v1.2.3 From 78762b0e79bc1dd01347be061abdf505202152c9 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:51:05 +0200 Subject: x86/asm/32: Add ENDs to some functions and relabel with SYM_CODE_* All these are functions which are invoked from elsewhere but they are not typical C functions. So annotate them using the new SYM_CODE_START. All these were not balanced with any END, so mark their ends by SYM_CODE_END, appropriately. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Reviewed-by: Boris Ostrovsky [xen bits] Reviewed-by: Rafael J. Wysocki [hibernate] Cc: Andy Lutomirski Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Len Brown Cc: linux-arch@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: Pavel Machek Cc: Peter Zijlstra Cc: Pingfan Liu Cc: Stefano Stabellini Cc: "Steven Rostedt (VMware)" Cc: Thomas Gleixner Cc: x86-ml Cc: xen-devel@lists.xenproject.org Link: https://lkml.kernel.org/r/20191011115108.12392-26-jslaby@suse.cz --- arch/x86/entry/entry_32.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 4900a6a5e125..64fe7aa50ad2 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -847,9 +847,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) * Xen doesn't set %esp to be precisely what the normal SYSENTER * entry point expects, so fix it up before using the normal path. */ -ENTRY(xen_sysenter_target) +SYM_CODE_START(xen_sysenter_target) addl $5*4, %esp /* remove xen-provided frame */ jmp .Lsysenter_past_esp +SYM_CODE_END(xen_sysenter_target) #endif /* -- cgit v1.2.3 From 5e63306f1629527799e34a9814dd8035df6ca854 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:51:06 +0200 Subject: x86/asm/32: Change all ENTRY+END to SYM_CODE_* Change all assembly code which is marked using END (and not ENDPROC) to appropriate new markings SYM_CODE_START and SYM_CODE_END. And since the last user of END on X86 is gone now, make sure that END is not defined there. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Herbert Xu Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: linux-arch@vger.kernel.org Cc: Mark Rutland Cc: Peter Zijlstra Cc: "Rafael J. Wysocki" Cc: "Steven Rostedt (VMware)" Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191011115108.12392-27-jslaby@suse.cz --- arch/x86/entry/entry_32.S | 104 +++++++++++++++++++++++----------------------- 1 file changed, 52 insertions(+), 52 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 64fe7aa50ad2..0ecc12fcfc05 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -709,7 +709,7 @@ * %eax: prev task * %edx: next task */ -ENTRY(__switch_to_asm) +SYM_CODE_START(__switch_to_asm) /* * Save callee-saved registers * This must match the order in struct inactive_task_frame @@ -748,7 +748,7 @@ ENTRY(__switch_to_asm) popl %ebp jmp __switch_to -END(__switch_to_asm) +SYM_CODE_END(__switch_to_asm) /* * The unwinder expects the last frame on the stack to always be at the same @@ -774,7 +774,7 @@ ENDPROC(schedule_tail_wrapper) * ebx: kernel thread func (NULL for user thread) * edi: kernel thread arg */ -ENTRY(ret_from_fork) +SYM_CODE_START(ret_from_fork) call schedule_tail_wrapper testl %ebx, %ebx @@ -797,7 +797,7 @@ ENTRY(ret_from_fork) */ movl $0, PT_EAX(%esp) jmp 2b -END(ret_from_fork) +SYM_CODE_END(ret_from_fork) /* * Return to user mode is not as complex as all this looks, @@ -1161,7 +1161,7 @@ ENDPROC(entry_INT80_32) * We pack 1 stub into every 8-byte block. */ .align 8 -ENTRY(irq_entries_start) +SYM_CODE_START(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) pushl $(~vector+0x80) /* Note: always in signed byte range */ @@ -1169,11 +1169,11 @@ ENTRY(irq_entries_start) jmp common_interrupt .align 8 .endr -END(irq_entries_start) +SYM_CODE_END(irq_entries_start) #ifdef CONFIG_X86_LOCAL_APIC .align 8 -ENTRY(spurious_entries_start) +SYM_CODE_START(spurious_entries_start) vector=FIRST_SYSTEM_VECTOR .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) pushl $(~vector+0x80) /* Note: always in signed byte range */ @@ -1181,7 +1181,7 @@ ENTRY(spurious_entries_start) jmp common_spurious .align 8 .endr -END(spurious_entries_start) +SYM_CODE_END(spurious_entries_start) SYM_CODE_START_LOCAL(common_spurious) ASM_CLAC @@ -1230,14 +1230,14 @@ ENDPROC(name) /* The include is where all of the SMP etc. interrupts come from */ #include -ENTRY(coprocessor_error) +SYM_CODE_START(coprocessor_error) ASM_CLAC pushl $0 pushl $do_coprocessor_error jmp common_exception -END(coprocessor_error) +SYM_CODE_END(coprocessor_error) -ENTRY(simd_coprocessor_error) +SYM_CODE_START(simd_coprocessor_error) ASM_CLAC pushl $0 #ifdef CONFIG_X86_INVD_BUG @@ -1249,96 +1249,96 @@ ENTRY(simd_coprocessor_error) pushl $do_simd_coprocessor_error #endif jmp common_exception -END(simd_coprocessor_error) +SYM_CODE_END(simd_coprocessor_error) -ENTRY(device_not_available) +SYM_CODE_START(device_not_available) ASM_CLAC pushl $-1 # mark this as an int pushl $do_device_not_available jmp common_exception -END(device_not_available) +SYM_CODE_END(device_not_available) #ifdef CONFIG_PARAVIRT -ENTRY(native_iret) +SYM_CODE_START(native_iret) iret _ASM_EXTABLE(native_iret, iret_exc) -END(native_iret) +SYM_CODE_END(native_iret) #endif -ENTRY(overflow) +SYM_CODE_START(overflow) ASM_CLAC pushl $0 pushl $do_overflow jmp common_exception -END(overflow) +SYM_CODE_END(overflow) -ENTRY(bounds) +SYM_CODE_START(bounds) ASM_CLAC pushl $0 pushl $do_bounds jmp common_exception -END(bounds) +SYM_CODE_END(bounds) -ENTRY(invalid_op) +SYM_CODE_START(invalid_op) ASM_CLAC pushl $0 pushl $do_invalid_op jmp common_exception -END(invalid_op) +SYM_CODE_END(invalid_op) -ENTRY(coprocessor_segment_overrun) +SYM_CODE_START(coprocessor_segment_overrun) ASM_CLAC pushl $0 pushl $do_coprocessor_segment_overrun jmp common_exception -END(coprocessor_segment_overrun) +SYM_CODE_END(coprocessor_segment_overrun) -ENTRY(invalid_TSS) +SYM_CODE_START(invalid_TSS) ASM_CLAC pushl $do_invalid_TSS jmp common_exception -END(invalid_TSS) +SYM_CODE_END(invalid_TSS) -ENTRY(segment_not_present) +SYM_CODE_START(segment_not_present) ASM_CLAC pushl $do_segment_not_present jmp common_exception -END(segment_not_present) +SYM_CODE_END(segment_not_present) -ENTRY(stack_segment) +SYM_CODE_START(stack_segment) ASM_CLAC pushl $do_stack_segment jmp common_exception -END(stack_segment) +SYM_CODE_END(stack_segment) -ENTRY(alignment_check) +SYM_CODE_START(alignment_check) ASM_CLAC pushl $do_alignment_check jmp common_exception -END(alignment_check) +SYM_CODE_END(alignment_check) -ENTRY(divide_error) +SYM_CODE_START(divide_error) ASM_CLAC pushl $0 # no error code pushl $do_divide_error jmp common_exception -END(divide_error) +SYM_CODE_END(divide_error) #ifdef CONFIG_X86_MCE -ENTRY(machine_check) +SYM_CODE_START(machine_check) ASM_CLAC pushl $0 pushl machine_check_vector jmp common_exception -END(machine_check) +SYM_CODE_END(machine_check) #endif -ENTRY(spurious_interrupt_bug) +SYM_CODE_START(spurious_interrupt_bug) ASM_CLAC pushl $0 pushl $do_spurious_interrupt_bug jmp common_exception -END(spurious_interrupt_bug) +SYM_CODE_END(spurious_interrupt_bug) #ifdef CONFIG_XEN_PV ENTRY(xen_hypervisor_callback) @@ -1442,11 +1442,11 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, #endif /* CONFIG_HYPERV */ -ENTRY(page_fault) +SYM_CODE_START(page_fault) ASM_CLAC pushl $do_page_fault jmp common_exception_read_cr2 -END(page_fault) +SYM_CODE_END(page_fault) SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2) /* the function address is in %gs's slot on the stack */ @@ -1495,7 +1495,7 @@ SYM_CODE_START_LOCAL_NOALIGN(common_exception) jmp ret_from_exception SYM_CODE_END(common_exception) -ENTRY(debug) +SYM_CODE_START(debug) /* * Entry from sysenter is now handled in common_exception */ @@ -1503,7 +1503,7 @@ ENTRY(debug) pushl $-1 # mark this as an int pushl $do_debug jmp common_exception -END(debug) +SYM_CODE_END(debug) /* * NMI is doubly nasty. It can happen on the first instruction of @@ -1512,7 +1512,7 @@ END(debug) * switched stacks. We handle both conditions by simply checking whether we * interrupted kernel code running on the SYSENTER stack. */ -ENTRY(nmi) +SYM_CODE_START(nmi) ASM_CLAC #ifdef CONFIG_X86_ESPFIX32 @@ -1577,9 +1577,9 @@ ENTRY(nmi) lss 12+4(%esp), %esp # back to espfix stack jmp .Lirq_return #endif -END(nmi) +SYM_CODE_END(nmi) -ENTRY(int3) +SYM_CODE_START(int3) ASM_CLAC pushl $-1 # mark this as an int @@ -1590,22 +1590,22 @@ ENTRY(int3) movl %esp, %eax # pt_regs pointer call do_int3 jmp ret_from_exception -END(int3) +SYM_CODE_END(int3) -ENTRY(general_protection) +SYM_CODE_START(general_protection) pushl $do_general_protection jmp common_exception -END(general_protection) +SYM_CODE_END(general_protection) #ifdef CONFIG_KVM_GUEST -ENTRY(async_page_fault) +SYM_CODE_START(async_page_fault) ASM_CLAC pushl $do_async_page_fault jmp common_exception_read_cr2 -END(async_page_fault) +SYM_CODE_END(async_page_fault) #endif -ENTRY(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_do_exit) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1614,4 +1614,4 @@ ENTRY(rewind_stack_do_exit) call do_exit 1: jmp 1b -END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_do_exit) -- cgit v1.2.3 From 6d685e5318e51b843ca50adeca50dc6300bf2cbb Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:51:07 +0200 Subject: x86/asm/32: Change all ENTRY+ENDPROC to SYM_FUNC_* These are all functions which are invoked from elsewhere, so annotate them as global using the new SYM_FUNC_START and their ENDPROC's by SYM_FUNC_END. Now, ENTRY/ENDPROC can be forced to be undefined on X86, so do so. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Allison Randal Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Ard Biesheuvel Cc: Bill Metzenthen Cc: Boris Ostrovsky Cc: Darren Hart Cc: "David S. Miller" Cc: Greg Kroah-Hartman Cc: Herbert Xu Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Cc: linux-crypto@vger.kernel.org Cc: linux-efi Cc: linux-efi@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: Mark Rutland Cc: Matt Fleming Cc: Pavel Machek Cc: platform-driver-x86@vger.kernel.org Cc: "Rafael J. Wysocki" Cc: Thomas Gleixner Cc: Will Deacon Cc: x86-ml Link: https://lkml.kernel.org/r/20191011115108.12392-28-jslaby@suse.cz --- arch/x86/entry/entry_32.S | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 0ecc12fcfc05..a987b62fc591 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -757,7 +757,7 @@ SYM_CODE_END(__switch_to_asm) * asmlinkage function so its argument has to be pushed on the stack. This * wrapper creates a proper "end of stack" frame header before the call. */ -ENTRY(schedule_tail_wrapper) +SYM_FUNC_START(schedule_tail_wrapper) FRAME_BEGIN pushl %eax @@ -766,7 +766,7 @@ ENTRY(schedule_tail_wrapper) FRAME_END ret -ENDPROC(schedule_tail_wrapper) +SYM_FUNC_END(schedule_tail_wrapper) /* * A newly forked process directly context switches into this address. * @@ -885,7 +885,7 @@ SYM_CODE_END(xen_sysenter_target) * ebp user stack * 0(%ebp) arg6 */ -ENTRY(entry_SYSENTER_32) +SYM_FUNC_START(entry_SYSENTER_32) /* * On entry-stack with all userspace-regs live - save and * restore eflags and %eax to use it as scratch-reg for the cr3 @@ -1013,7 +1013,7 @@ ENTRY(entry_SYSENTER_32) popfl jmp .Lsysenter_flags_fixed SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) -ENDPROC(entry_SYSENTER_32) +SYM_FUNC_END(entry_SYSENTER_32) /* * 32-bit legacy system call entry. @@ -1043,7 +1043,7 @@ ENDPROC(entry_SYSENTER_32) * edi arg5 * ebp arg6 */ -ENTRY(entry_INT80_32) +SYM_FUNC_START(entry_INT80_32) ASM_CLAC pushl %eax /* pt_regs->orig_ax */ @@ -1120,7 +1120,7 @@ SYM_CODE_START(iret_exc) SYM_CODE_END(iret_exc) .previous _ASM_EXTABLE(.Lirq_return, iret_exc) -ENDPROC(entry_INT80_32) +SYM_FUNC_END(entry_INT80_32) .macro FIXUP_ESPFIX_STACK /* @@ -1213,7 +1213,7 @@ SYM_CODE_START_LOCAL(common_interrupt) SYM_CODE_END(common_interrupt) #define BUILD_INTERRUPT3(name, nr, fn) \ -ENTRY(name) \ +SYM_FUNC_START(name) \ ASM_CLAC; \ pushl $~(nr); \ SAVE_ALL switch_stacks=1; \ @@ -1222,7 +1222,7 @@ ENTRY(name) \ movl %esp, %eax; \ call fn; \ jmp ret_from_intr; \ -ENDPROC(name) +SYM_FUNC_END(name) #define BUILD_INTERRUPT(name, nr) \ BUILD_INTERRUPT3(name, nr, smp_##name); \ @@ -1341,7 +1341,7 @@ SYM_CODE_START(spurious_interrupt_bug) SYM_CODE_END(spurious_interrupt_bug) #ifdef CONFIG_XEN_PV -ENTRY(xen_hypervisor_callback) +SYM_FUNC_START(xen_hypervisor_callback) pushl $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL ENCODE_FRAME_POINTER @@ -1369,7 +1369,7 @@ SYM_INNER_LABEL_ALIGN(xen_do_upcall, SYM_L_GLOBAL) call xen_maybe_preempt_hcall #endif jmp ret_from_intr -ENDPROC(xen_hypervisor_callback) +SYM_FUNC_END(xen_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. @@ -1383,7 +1383,7 @@ ENDPROC(xen_hypervisor_callback) * to pop the stack frame we end up in an infinite loop of failsafe callbacks. * We distinguish between categories by maintaining a status value in EAX. */ -ENTRY(xen_failsafe_callback) +SYM_FUNC_START(xen_failsafe_callback) pushl %eax movl $1, %eax 1: mov 4(%esp), %ds @@ -1420,7 +1420,7 @@ ENTRY(xen_failsafe_callback) _ASM_EXTABLE(2b, 7b) _ASM_EXTABLE(3b, 8b) _ASM_EXTABLE(4b, 9b) -ENDPROC(xen_failsafe_callback) +SYM_FUNC_END(xen_failsafe_callback) #endif /* CONFIG_XEN_PV */ #ifdef CONFIG_XEN_PVHVM -- cgit v1.2.3 From 66584ea6b70a6cbae661292702e56a07580dbbd4 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 15 Nov 2019 00:49:22 +0900 Subject: x86/build/vdso: Remove meaningless CFLAGS_REMOVE_*.o CFLAGS_REMOVE_*.o syntax is used to drop particular flags when building objects from C files. It has no effect for assembly files. vdso-note.o is compiled from the assembly file, vdso-note.S, hence CFLAGS_REMOVE_vdso-note.o is meaningless. Neither vvar.c nor vvar.S is found in the vdso directory. Since there is no source file to create vvar.o, CFLAGS_REMOVE_vvar.o is also meaningless. Signed-off-by: Masahiro Yamada Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191114154922.30365-1-yamada.masahiro@socionext.com --- arch/x86/entry/vdso/Makefile | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 0f2154106d01..2b75e80f6b41 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -87,11 +87,9 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS # # vDSO code runs in userspace and -pg doesn't help with profiling anyway. # -CFLAGS_REMOVE_vdso-note.o = -pg CFLAGS_REMOVE_vclock_gettime.o = -pg CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg CFLAGS_REMOVE_vgetcpu.o = -pg -CFLAGS_REMOVE_vvar.o = -pg # # X32 processes use x32 vDSO to access 64bit kernel data. -- cgit v1.2.3 From df1a7524741b6c094786032e12a21a448321d9f6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 23 Oct 2019 14:27:06 +0200 Subject: x86/entry/32: Remove unused resume_userspace label The C reimplementation of SYSENTER left that unused ENTRY() label around. Remove it. Fixes: 5f310f739b4c ("x86/entry/32: Re-implement SYSENTER using the new C path") Originally-by: Peter Zijlstra Signed-off-by: Thomas Gleixner Reviewed-by: Sean Christopherson Reviewed-by: Alexandre Chartre Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20191023123117.686514045@linutronix.de --- arch/x86/entry/entry_32.S | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index a987b62fc591..4bbcc5e64969 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -824,7 +824,6 @@ ret_from_intr: cmpl $USER_RPL, %eax jb restore_all_kernel # not returning to v8086 or userspace -SYM_INNER_LABEL_ALIGN(resume_userspace, SYM_L_LOCAL) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movl %esp, %eax -- cgit v1.2.3 From 45c08383141794a7e9b26f35d491b74f33ac469e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 23 Oct 2019 14:27:07 +0200 Subject: x86/entry/64: Remove pointless jump in paranoid_exit Jump directly to restore_regs_and_return_to_kernel instead of making a pointless extra jump through .Lparanoid_exit_restore Signed-off-by: Thomas Gleixner Reviewed-by: Sean Christopherson Reviewed-by: Alexandre Chartre Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20191023123117.779277679@linutronix.de --- arch/x86/entry/entry_64.S | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index d58c01239457..76942cbd95a1 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1273,12 +1273,11 @@ SYM_CODE_START_LOCAL(paranoid_exit) /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 SWAPGS_UNSAFE_STACK - jmp .Lparanoid_exit_restore + jmp restore_regs_and_return_to_kernel .Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 -.Lparanoid_exit_restore: jmp restore_regs_and_return_to_kernel SYM_CODE_END(paranoid_exit) -- cgit v1.2.3