summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-02-09 02:29:36 -0800
committerPaul E. McKenney <paulmck@kernel.org>2020-04-27 11:01:16 -0700
commit47fbb074536ecca5e0af3dcce340954c09ed4d1e (patch)
treed850c9eb39e66f2fb597305ddcb11707e77fcdb0
parent5822b8126ff01e0baaf7d5168adc4ac8aeae088c (diff)
rcu: Use data_race() for RCU CPU stall-warning prints
Although the accesses used to determine whether or not a stall should be printed are an integral part of the concurrency algorithm governing use of the corresponding variables, the values that are simply printed are ancillary. As such, it is best to use data_race() for these accesses in order to provide the greatest latitude in the use of KCSAN for the other accesses that are an integral part of the algorithm. This commit therefore changes the relevant uses of READ_ONCE() to data_race(). Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
-rw-r--r--kernel/rcu/tree_stall.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 119ed6afd20f..e7da1111ab0c 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -360,7 +360,7 @@ static void rcu_check_gp_kthread_starvation(void)
pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
rcu_state.name, j,
(long)rcu_seq_current(&rcu_state.gp_seq),
- READ_ONCE(rcu_state.gp_flags),
+ data_race(rcu_state.gp_flags),
gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
if (gpk) {
@@ -421,10 +421,10 @@ static void print_other_cpu_stall(unsigned long gp_seq)
pr_err("INFO: Stall ended before state dump start\n");
} else {
j = jiffies;
- gpa = READ_ONCE(rcu_state.gp_activity);
+ gpa = data_race(rcu_state.gp_activity);
pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
rcu_state.name, j - gpa, j, gpa,
- READ_ONCE(jiffies_till_next_fqs),
+ data_race(jiffies_till_next_fqs),
rcu_get_root()->qsmask);
/* In this case, the current CPU might be at fault. */
sched_show_task(current);
@@ -581,23 +581,23 @@ void show_rcu_gp_kthreads(void)
struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
j = jiffies;
- ja = j - READ_ONCE(rcu_state.gp_activity);
- jr = j - READ_ONCE(rcu_state.gp_req_activity);
- jw = j - READ_ONCE(rcu_state.gp_wake_time);
+ ja = j - data_race(rcu_state.gp_activity);
+ jr = j - data_race(rcu_state.gp_req_activity);
+ jw = j - data_race(rcu_state.gp_wake_time);
pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
rcu_state.name, gp_state_getname(rcu_state.gp_state),
rcu_state.gp_state, t ? t->state : 0x1ffffL,
- ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
- (long)READ_ONCE(rcu_state.gp_seq),
- (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
- READ_ONCE(rcu_state.gp_flags));
+ ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
+ (long)data_race(rcu_state.gp_seq),
+ (long)data_race(rcu_get_root()->gp_seq_needed),
+ data_race(rcu_state.gp_flags));
rcu_for_each_node_breadth_first(rnp) {
if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
READ_ONCE(rnp->gp_seq_needed)))
continue;
pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
- rnp->grplo, rnp->grphi, (long)READ_ONCE(rnp->gp_seq),
- (long)READ_ONCE(rnp->gp_seq_needed));
+ rnp->grplo, rnp->grphi, (long)data_race(rnp->gp_seq),
+ (long)data_race(rnp->gp_seq_needed));
if (!rcu_is_leaf_node(rnp))
continue;
for_each_leaf_node_possible_cpu(rnp, cpu) {
@@ -607,7 +607,7 @@ void show_rcu_gp_kthreads(void)
READ_ONCE(rdp->gp_seq_needed)))
continue;
pr_info("\tcpu %d ->gp_seq_needed %ld\n",
- cpu, (long)READ_ONCE(rdp->gp_seq_needed));
+ cpu, (long)data_race(rdp->gp_seq_needed));
}
}
for_each_possible_cpu(cpu) {