From d2d9c4a3d01c20f936f88e0c2793945e6632dab6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 21 Oct 2016 10:44:49 +0200 Subject: x86/apic: Get rid of "warning: 'acpi_ioapic_lock' defined but not used" kbuild test robot reported this against the -RT tree: |>> arch/x86/kernel/acpi/boot.c:90:21: warning: 'acpi_ioapic_lock' defined but not used [-Wunused-variable] | static DEFINE_MUTEX(acpi_ioapic_lock); | ^ | include/linux/mutex_rt.h:27:15: note: in definition of macro 'DEFINE_MUTEX' | struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) ^~~~~~~~~ which is also true (as in non-used) for !RT but the compiler does not emit a warning. Reported-by: kbuild test robot Signed-off-by: Sebastian Andrzej Siewior Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20161021084449.32523-1-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/boot.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 8a5abaa7d453..066ceb7767b8 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -76,6 +76,7 @@ int acpi_fix_pin2_polarity __initdata; static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; #endif +#ifdef CONFIG_X86_IO_APIC /* * Locks related to IOAPIC hotplug * Hotplug side: @@ -88,6 +89,7 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; * ->ioapic_lock */ static DEFINE_MUTEX(acpi_ioapic_lock); +#endif /* -------------------------------------------------------------------------- Boot-time Configuration -- cgit v1.2.3 From b2c5ea4f759190ee9f75687a00035c1a66d0d743 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 7 Nov 2016 11:13:39 +0800 Subject: x86/msr: Add wrmsr_notrace() Required to remove the extra irq_enter()/irq_exit() in smp_reschedule_interrupt(). Suggested-by: Peter Zijlstra Suggested-by: Paolo Bonzini Signed-off-by: Wanpeng Li Acked-by: Paolo Bonzini Reviewed-by: Borislav Petkov Cc: kvm@vger.kernel.org Cc: Mike Galbraith Link: http://lkml.kernel.org/r/1478488420-5982-2-git-send-email-wanpeng.li@hotmail.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/msr.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index b5fee97813cd..9b0a23236836 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -115,17 +115,29 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, } /* Can be uninlined because referenced by paravirt */ -notrace static inline void native_write_msr(unsigned int msr, +static notrace inline void __native_write_msr_notrace(unsigned int msr, unsigned low, unsigned high) { asm volatile("1: wrmsr\n" "2:\n" _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe) : : "c" (msr), "a"(low), "d" (high) : "memory"); +} + +/* Can be uninlined because referenced by paravirt */ +static notrace inline void native_write_msr(unsigned int msr, + unsigned low, unsigned high) +{ + __native_write_msr_notrace(msr, low, high); if (msr_tracepoint_active(__tracepoint_write_msr)) do_trace_write_msr(msr, ((u64)high << 32 | low), 0); } +static inline void wrmsr_notrace(unsigned msr, unsigned low, unsigned high) +{ + __native_write_msr_notrace(msr, low, high); +} + /* Can be uninlined because referenced by paravirt */ notrace static inline int native_write_msr_safe(unsigned int msr, unsigned low, unsigned high) -- cgit v1.2.3 From 8ca225520e278e41396dab0524989f4848626f83 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 7 Nov 2016 11:13:40 +0800 Subject: x86/apic: Prevent tracing on apic_msr_write_eoi() The following RCU lockdep warning led to adding irq_enter()/irq_exit() into smp_reschedule_interrupt(): RCU used illegally from idle CPU! rcu_scheduler_active = 1, debug_locks = 0 RCU used illegally from extended quiescent state! no locks held by swapper/1/0. do_trace_write_msr native_write_msr native_apic_msr_eoi_write smp_reschedule_interrupt reschedule_interrupt As Peterz pointed out: | So now we're making a very frequent interrupt slower because of debug | code. | | The thing is, many many smp_reschedule_interrupt() invocations don't | actually execute anything much at all and are only sent to tickle the | return to user path (which does the actual preemption). | | Having to do the whole irq_enter/irq_exit dance just for this unlikely | debug case totally blows. Use the wrmsr_notrace() variant in native_apic_msr_write_eoi, annotate the kvm variant with notrace and add a native_apic_eoi callback to the apic structure so KVM guests are covered as well. This allows to revert the irq_enter/irq_exit dance in smp_reschedule_interrupt(). Suggested-by: Peter Zijlstra Suggested-by: Paolo Bonzini Signed-off-by: Wanpeng Li Acked-by: Paolo Bonzini Cc: kvm@vger.kernel.org Cc: Mike Galbraith Cc: Borislav Petkov Link: http://lkml.kernel.org/r/1478488420-5982-3-git-send-email-wanpeng.li@hotmail.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/apic.h | 3 ++- arch/x86/kernel/apic/apic.c | 1 + arch/x86/kernel/kvm.c | 4 ++-- arch/x86/kernel/smp.c | 2 -- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index f5aaf6c83222..a5a0bcfde76a 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -196,7 +196,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v) static inline void native_apic_msr_eoi_write(u32 reg, u32 v) { - wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0); + wrmsr_notrace(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0); } static inline u32 native_apic_msr_read(u32 reg) @@ -332,6 +332,7 @@ struct apic { * on write for EOI. */ void (*eoi_write)(u32 reg, u32 v); + void (*native_eoi_write)(u32 reg, u32 v); u64 (*icr_read)(void); void (*icr_write)(u32 low, u32 high); void (*wait_icr_idle)(void); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 88c657b057e2..2686894350a4 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2263,6 +2263,7 @@ void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) { /* Should happen once for each apic */ WARN_ON((*drv)->eoi_write == eoi_write); + (*drv)->native_eoi_write = (*drv)->eoi_write; (*drv)->eoi_write = eoi_write; } } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index edbbfc854e39..aad52f1b0f9f 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -308,7 +308,7 @@ static void kvm_register_steal_time(void) static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED; -static void kvm_guest_apic_eoi_write(u32 reg, u32 val) +static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val) { /** * This relies on __test_and_clear_bit to modify the memory @@ -319,7 +319,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val) */ if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) return; - apic_write(APIC_EOI, APIC_EOI_ACK); + apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK); } static void kvm_guest_cpu_init(void) diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index c00cb64bc0a1..68f8cc222f25 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -261,10 +261,8 @@ static inline void __smp_reschedule_interrupt(void) __visible void smp_reschedule_interrupt(struct pt_regs *regs) { - irq_enter(); ack_APIC_irq(); __smp_reschedule_interrupt(); - irq_exit(); /* * KVM uses this interrupt to force a cpu out of guest mode */ -- cgit v1.2.3 From 5d07c2cc1962facb2f5d57b7b62987c213643724 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 2 Nov 2016 19:35:22 +0100 Subject: x86/msr: Cleanup/streamline MSR helpers Make the MSR argument an unsigned int, both low and high u32, put "notrace" last in the function signature. Reflow function signatures for better readability and cleanup white space. Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/msr.h | 40 +++++++++++++++++++++------------------- arch/x86/lib/msr.c | 4 ++-- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 9b0a23236836..db0b90c3b03e 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -70,14 +70,14 @@ extern struct tracepoint __tracepoint_read_msr; extern struct tracepoint __tracepoint_write_msr; extern struct tracepoint __tracepoint_rdpmc; #define msr_tracepoint_active(t) static_key_false(&(t).key) -extern void do_trace_write_msr(unsigned msr, u64 val, int failed); -extern void do_trace_read_msr(unsigned msr, u64 val, int failed); -extern void do_trace_rdpmc(unsigned msr, u64 val, int failed); +extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); +extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); +extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); #else #define msr_tracepoint_active(t) false -static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {} -static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {} -static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {} +static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} +static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} +static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} #endif static inline unsigned long long native_read_msr(unsigned int msr) @@ -115,8 +115,8 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, } /* Can be uninlined because referenced by paravirt */ -static notrace inline void __native_write_msr_notrace(unsigned int msr, - unsigned low, unsigned high) +static inline void notrace +__native_write_msr_notrace(unsigned int msr, u32 low, u32 high) { asm volatile("1: wrmsr\n" "2:\n" @@ -125,24 +125,26 @@ static notrace inline void __native_write_msr_notrace(unsigned int msr, } /* Can be uninlined because referenced by paravirt */ -static notrace inline void native_write_msr(unsigned int msr, - unsigned low, unsigned high) +static inline void notrace +native_write_msr(unsigned int msr, u32 low, u32 high) { __native_write_msr_notrace(msr, low, high); if (msr_tracepoint_active(__tracepoint_write_msr)) do_trace_write_msr(msr, ((u64)high << 32 | low), 0); } -static inline void wrmsr_notrace(unsigned msr, unsigned low, unsigned high) +static inline void +wrmsr_notrace(unsigned int msr, u32 low, u32 high) { __native_write_msr_notrace(msr, low, high); } /* Can be uninlined because referenced by paravirt */ -notrace static inline int native_write_msr_safe(unsigned int msr, - unsigned low, unsigned high) +static inline int notrace +native_write_msr_safe(unsigned int msr, u32 low, u32 high) { int err; + asm volatile("2: wrmsr ; xor %[err],%[err]\n" "1:\n\t" ".section .fixup,\"ax\"\n\t" @@ -235,7 +237,7 @@ do { \ (void)((high) = (u32)(__val >> 32)); \ } while (0) -static inline void wrmsr(unsigned msr, unsigned low, unsigned high) +static inline void wrmsr(unsigned int msr, u32 low, u32 high) { native_write_msr(msr, low, high); } @@ -243,13 +245,13 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high) #define rdmsrl(msr, val) \ ((val) = native_read_msr((msr))) -static inline void wrmsrl(unsigned msr, u64 val) +static inline void wrmsrl(unsigned int msr, u64 val) { native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); } /* wrmsr with exception handling */ -static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) +static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high) { return native_write_msr_safe(msr, low, high); } @@ -264,7 +266,7 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) __err; \ }) -static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) +static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p) { int err; @@ -337,12 +339,12 @@ static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, struct msr *msrs) { - rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); + rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); } static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, struct msr *msrs) { - wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); + wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); } static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index d1dee753b949..07764255b611 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c @@ -113,14 +113,14 @@ int msr_clear_bit(u32 msr, u8 bit) } #ifdef CONFIG_TRACEPOINTS -void do_trace_write_msr(unsigned msr, u64 val, int failed) +void do_trace_write_msr(unsigned int msr, u64 val, int failed) { trace_write_msr(msr, val, failed); } EXPORT_SYMBOL(do_trace_write_msr); EXPORT_TRACEPOINT_SYMBOL(write_msr); -void do_trace_read_msr(unsigned msr, u64 val, int failed) +void do_trace_read_msr(unsigned int msr, u64 val, int failed) { trace_read_msr(msr, val, failed); } -- cgit v1.2.3