summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2019-04-16 14:09:15 -0700
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-08-13 14:35:49 -0700
commitc00045be32fe13333ba8c62748ba04747c182838 (patch)
treef62d6a3588c76d30d16e9f720fa30c5d8f4106ea /kernel/rcu
parent750d7f6a434ff4640fa825dfb1eccb44e79fb6af (diff)
rcu/nocb: Make rcutree_migrate_callbacks() start at leaf rcu_node structure
Because rcutree_migrate_callbacks() is invoked infrequently and because an exact snapshot of the grace-period state might save some callbacks a second trip through a grace period, this function has used the root rcu_node structure. However, this safe-second-trip optimization happens only if rcutree_migrate_callbacks() races with grace-period initialization, so it is not worth the added mental load. This commit therefore makes rcutree_migrate_callbacks() start with the leaf rcu_node structures, as is done elsewhere. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 969ba292a669..ea479d81da7f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3157,8 +3157,8 @@ void rcutree_migrate_callbacks(int cpu)
{
unsigned long flags;
struct rcu_data *my_rdp;
+ struct rcu_node *my_rnp;
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
- struct rcu_node *rnp_root = rcu_get_root();
bool needwake;
if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
@@ -3167,18 +3167,19 @@ void rcutree_migrate_callbacks(int cpu)
local_irq_save(flags);
my_rdp = this_cpu_ptr(&rcu_data);
+ my_rnp = my_rdp->mynode;
if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
local_irq_restore(flags);
return;
}
- raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
+ raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
/* Leverage recent GPs and set GP for new callbacks. */
- needwake = rcu_advance_cbs(rnp_root, rdp) ||
- rcu_advance_cbs(rnp_root, my_rdp);
+ needwake = rcu_advance_cbs(my_rnp, rdp) ||
+ rcu_advance_cbs(my_rnp, my_rdp);
rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
!rcu_segcblist_n_cbs(&my_rdp->cblist));
- raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
+ raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
if (needwake)
rcu_gp_kthread_wake();
WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||