diff options
author | Paul E. McKenney <paulmck@kernel.org> | 2020-01-07 15:48:39 -0800 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2020-02-20 15:58:22 -0800 |
commit | a5b8950180f8e5acb802d1672e0b4d0ceee6126e (patch) | |
tree | fa0f8d7e3bd76c2db2df2c59d818c6b2f8def89c /kernel/rcu | |
parent | 65bb0dc437c3e57a6cde2b81170c8af4b9c90735 (diff) |
rcu: Add READ_ONCE() to rcu_data ->gpwrap
The rcu_data structure's ->gpwrap field is read locklessly, and so
this commit adds the required READ_ONCE() to a pair of laods in order
to avoid destructive compiler optimizations.
This data race was reported by KCSAN.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 2 | ||||
-rw-r--r-- | kernel/rcu/tree_stall.h | 2 |
2 files changed, 2 insertions, 2 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a70f56bb56a7..e851a12920e6 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1322,7 +1322,7 @@ static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, rcu_lockdep_assert_cblist_protected(rdp); c = rcu_seq_snap(&rcu_state.gp_seq); - if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { + if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { /* Old request still live, so mark recent callbacks. */ (void)rcu_segcblist_accelerate(&rdp->cblist, c); return; diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 43dc688c3785..bca637b274fb 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -602,7 +602,7 @@ void show_rcu_gp_kthreads(void) continue; for_each_leaf_node_possible_cpu(rnp, cpu) { rdp = per_cpu_ptr(&rcu_data, cpu); - if (rdp->gpwrap || + if (READ_ONCE(rdp->gpwrap) || ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rdp->gp_seq_needed))) continue; |