diff options
Diffstat (limited to 'kernel/printk/printk.c')
-rw-r--r-- | kernel/printk/printk.c | 116 |
1 files changed, 116 insertions, 0 deletions
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 421c35571797..142a58d124d9 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3531,3 +3531,119 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter) EXPORT_SYMBOL_GPL(kmsg_dump_rewind); #endif + +#ifdef CONFIG_SMP +static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1); +static atomic_t printk_cpulock_nested = ATOMIC_INIT(0); + +/** + * __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant + * spinning lock is not owned by any CPU. + * + * Context: Any context. + */ +void __printk_wait_on_cpu_lock(void) +{ + do { + cpu_relax(); + } while (atomic_read(&printk_cpulock_owner) != -1); +} +EXPORT_SYMBOL(__printk_wait_on_cpu_lock); + +/** + * __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant + * spinning lock. + * + * If no processor has the lock, the calling processor takes the lock and + * becomes the owner. If the calling processor is already the owner of the + * lock, this function succeeds immediately. + * + * Context: Any context. Expects interrupts to be disabled. + * Return: 1 on success, otherwise 0. + */ +int __printk_cpu_trylock(void) +{ + int cpu; + int old; + + cpu = smp_processor_id(); + + /* + * Guarantee loads and stores from this CPU when it is the lock owner + * are _not_ visible to the previous lock owner. This pairs with + * __printk_cpu_unlock:B. + * + * Memory barrier involvement: + * + * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then + * __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B. + * + * Relies on: + * + * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B + * of the previous CPU + * matching + * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B + * of this CPU + */ + old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1, + cpu); /* LMM(__printk_cpu_trylock:A) */ + if (old == -1) { + /* + * This CPU is now the owner and begins loading/storing + * data: LMM(__printk_cpu_trylock:B) + */ + return 1; + + } else if (old == cpu) { + /* This CPU is already the owner. */ + atomic_inc(&printk_cpulock_nested); + return 1; + } + + return 0; +} +EXPORT_SYMBOL(__printk_cpu_trylock); + +/** + * __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock. + * + * The calling processor must be the owner of the lock. + * + * Context: Any context. Expects interrupts to be disabled. + */ +void __printk_cpu_unlock(void) +{ + if (atomic_read(&printk_cpulock_nested)) { + atomic_dec(&printk_cpulock_nested); + return; + } + + /* + * This CPU is finished loading/storing data: + * LMM(__printk_cpu_unlock:A) + */ + + /* + * Guarantee loads and stores from this CPU when it was the + * lock owner are visible to the next lock owner. This pairs + * with __printk_cpu_trylock:A. + * + * Memory barrier involvement: + * + * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, + * then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A. + * + * Relies on: + * + * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B + * of this CPU + * matching + * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B + * of the next CPU + */ + atomic_set_release(&printk_cpulock_owner, + -1); /* LMM(__printk_cpu_unlock:B) */ +} +EXPORT_SYMBOL(__printk_cpu_unlock); +#endif /* CONFIG_SMP */ |