summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/khugepaged.c4
-rw-r--r--mm/mmu_gather.c2
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slab_common.c6
-rw-r--r--mm/swap.c3
5 files changed, 9 insertions, 10 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index c13625c1ad5e..7b86600a47c9 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1225,7 +1225,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
{
struct mm_struct *mm = mm_slot->mm;
- VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
+ lockdep_assert_held(&khugepaged_mm_lock);
if (khugepaged_test_exit(mm)) {
/* free mm_slot */
@@ -1631,7 +1631,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
int progress = 0;
VM_BUG_ON(!pages);
- VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
+ lockdep_assert_held(&khugepaged_mm_lock);
if (khugepaged_scan.mm_slot)
mm_slot = khugepaged_scan.mm_slot;
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 2a9fbc4a37d5..f2f03c655807 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -199,7 +199,7 @@ void tlb_table_flush(struct mmu_gather *tlb)
if (*batch) {
tlb_table_invalidate(tlb);
- call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+ call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
}
}
diff --git a/mm/slab.c b/mm/slab.c
index 2a5654bb3b3f..3abb9feb3818 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -962,10 +962,10 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
* To protect lockless access to n->shared during irq disabled context.
* If n->shared isn't NULL in irq disabled context, accessing to it is
* guaranteed to be valid until irq is re-enabled, because it will be
- * freed after synchronize_sched().
+ * freed after synchronize_rcu().
*/
if (old_shared && force_change)
- synchronize_sched();
+ synchronize_rcu();
fail:
kfree(old_shared);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 7eb8dc136c1c..9c11e8a937d2 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -724,7 +724,7 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
css_get(&s->memcg_params.memcg->css);
s->memcg_params.deact_fn = deact_fn;
- call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
+ call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
}
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
@@ -839,11 +839,11 @@ static void flush_memcg_workqueue(struct kmem_cache *s)
mutex_unlock(&slab_mutex);
/*
- * SLUB deactivates the kmem_caches through call_rcu_sched. Make
+ * SLUB deactivates the kmem_caches through call_rcu. Make
* sure all registered rcu callbacks have been invoked.
*/
if (IS_ENABLED(CONFIG_SLUB))
- rcu_barrier_sched();
+ rcu_barrier();
/*
* SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
diff --git a/mm/swap.c b/mm/swap.c
index aa483719922e..5d786019eab9 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -823,8 +823,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
VM_BUG_ON_PAGE(!PageHead(page), page);
VM_BUG_ON_PAGE(PageCompound(page_tail), page);
VM_BUG_ON_PAGE(PageLRU(page_tail), page);
- VM_BUG_ON(NR_CPUS != 1 &&
- !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
+ lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
if (!list)
SetPageLRU(page_tail);