summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-04-10 13:54:00 +0200
committerBorislav Petkov <bp@suse.de>2020-04-11 16:39:30 +0200
commitd7e94dbdac1a40924626b0efc7ff530c8baf5e4a (patch)
tree7e41876bdd753a6e9b0e6911cb03bcee99a1c320 /arch/x86/include/asm
parent5b8b9d0c6d0e0f1993c6c56deaf9646942c49d94 (diff)
x86/split_lock: Provide handle_guest_split_lock()
Without at least minimal handling for split lock detection induced #AC, VMX will just run into the same problem as the VMWare hypervisor, which was reported by Kenneth. It will inject the #AC blindly into the guest whether the guest is prepared or not. Provide a function for guest mode which acts depending on the host SLD mode. If mode == sld_warn, treat it like user space, i.e. emit a warning, disable SLD and mark the task accordingly. Otherwise force SIGBUS. [ bp: Add a !CPU_SUP_INTEL stub for handle_guest_split_lock(). ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Link: https://lkml.kernel.org/r/20200410115516.978037132@linutronix.de Link: https://lkml.kernel.org/r/20200402123258.895628824@linutronix.de
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/cpu.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index ff6f3ca649b3..dd17c2da1af5 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -44,6 +44,7 @@ unsigned int x86_stepping(unsigned int sig);
extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
extern void switch_to_sld(unsigned long tifn);
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
+extern bool handle_guest_split_lock(unsigned long ip);
#else
static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
static inline void switch_to_sld(unsigned long tifn) {}
@@ -51,5 +52,10 @@ static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{
return false;
}
+
+static inline bool handle_guest_split_lock(unsigned long ip)
+{
+ return false;
+}
#endif
#endif /* _ASM_X86_CPU_H */