summaryrefslogtreecommitdiff
path: root/arch/x86/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-16 16:49:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-16 16:49:55 -0700
commitc7eba51cfdf9cd1ca7ed4201b30be8b2bef15ff5 (patch)
tree048bff0b92dc230f01b5af23cd1b5960615bd68a /arch/x86/include
parentcc9b499a1f71696054a2771aae504c53eecff31d (diff)
parente57d143091f1c0b1a98140a4d2e63e113afb62c0 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: - improve rwsem scalability - add uninitialized rwsem debugging check - reduce lockdep's stacktrace memory usage and add diagnostics - misc cleanups, code consolidation and constification * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: mutex: Fix up mutex_waiter usage locking/mutex: Use mutex flags macro instead of hard code locking/mutex: Make __mutex_owner static to mutex.c locking/qspinlock,x86: Clarify virt_spin_lock_key locking/rwsem: Check for operations on an uninitialized rwsem locking/rwsem: Make handoff writer optimistically spin on owner locking/lockdep: Report more stack trace statistics locking/lockdep: Reduce space occupied by stack traces stacktrace: Constify 'entries' arguments locking/lockdep: Make it clear that what lock_class::key points at is not modified
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/qspinlock.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index bd5ac6cc37db..444d6fd9a6d8 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -63,10 +63,25 @@ static inline bool vcpu_is_preempted(long cpu)
#endif
#ifdef CONFIG_PARAVIRT
+/*
+ * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
+ *
+ * Native (and PV wanting native due to vCPU pinning) should disable this key.
+ * It is done in this backwards fashion to only have a single direction change,
+ * which removes ordering between native_pv_spin_init() and HV setup.
+ */
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
void native_pv_lock_init(void) __init;
+/*
+ * Shortcut for the queued_spin_lock_slowpath() function that allows
+ * virt to hijack it.
+ *
+ * Returns:
+ * true - lock has been negotiated, all done;
+ * false - queued_spin_lock_slowpath() will do its thing.
+ */
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{