diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-03-31 20:38:12 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-04-08 12:05:07 +0200 |
commit | 9a019db0b6bebc84d6b64636faf73ed6d64cd4bb (patch) | |
tree | dfcc531d36b54034c06d82678cf66e2019485363 /kernel/locking | |
parent | a13f58a0cafa7b0416a2898bc3b0defbb305d108 (diff) |
locking/lockdep: Improve 'invalid wait context' splat
The 'invalid wait context' splat doesn't print all the information
required to reconstruct / validate the error, specifically the
irq-context state is missing.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/lockdep.c | 51 |
1 files changed, 31 insertions, 20 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 1511690e4de7..ac10db66cc63 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3952,10 +3952,36 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, return ret; } +static inline short task_wait_context(struct task_struct *curr) +{ + /* + * Set appropriate wait type for the context; for IRQs we have to take + * into account force_irqthread as that is implied by PREEMPT_RT. + */ + if (curr->hardirq_context) { + /* + * Check if force_irqthreads will run us threaded. + */ + if (curr->hardirq_threaded || curr->irq_config) + return LD_WAIT_CONFIG; + + return LD_WAIT_SPIN; + } else if (curr->softirq_context) { + /* + * Softirqs are always threaded. + */ + return LD_WAIT_CONFIG; + } + + return LD_WAIT_MAX; +} + static int print_lock_invalid_wait_context(struct task_struct *curr, struct held_lock *hlock) { + short curr_inner; + if (!debug_locks_off()) return 0; if (debug_locks_silent) @@ -3971,6 +3997,10 @@ print_lock_invalid_wait_context(struct task_struct *curr, print_lock(hlock); pr_warn("other info that might help us debug this:\n"); + + curr_inner = task_wait_context(curr); + pr_warn("context-{%d:%d}\n", curr_inner, curr_inner); + lockdep_print_held_locks(curr); pr_warn("stack backtrace:\n"); @@ -4017,26 +4047,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next) } depth++; - /* - * Set appropriate wait type for the context; for IRQs we have to take - * into account force_irqthread as that is implied by PREEMPT_RT. - */ - if (curr->hardirq_context) { - /* - * Check if force_irqthreads will run us threaded. - */ - if (curr->hardirq_threaded || curr->irq_config) - curr_inner = LD_WAIT_CONFIG; - else - curr_inner = LD_WAIT_SPIN; - } else if (curr->softirq_context) { - /* - * Softirqs are always threaded. - */ - curr_inner = LD_WAIT_CONFIG; - } else { - curr_inner = LD_WAIT_MAX; - } + curr_inner = task_wait_context(curr); for (; depth < curr->lockdep_depth; depth++) { struct held_lock *prev = curr->held_locks + depth; |