diff options
author | Christophe Leroy <christophe.leroy@csgroup.eu> | 2021-06-03 09:29:05 +0000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2021-06-17 00:09:09 +1000 |
commit | dac3db1edf8b4c75859f07789f577322f2a51e3a (patch) | |
tree | 4844406e26aca51ddf72681e6e278adb9ef215a5 /arch/powerpc | |
parent | c13066e53aabd8f268f051d267270765e10343aa (diff) |
powerpc/nohash: Remove DEBUG_MAP_CONSISTENCY
mmu_context handling has been there for years, so we
would know if there was problems with maps.
DEBUG_MAP_CONSISTENCY is not user selectable, remove it.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/6fe2b88956db53f8d6ee221525b2c5dc6aec82c6.1622712515.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/mm/nohash/mmu_context.c | 47 |
1 files changed, 1 insertions, 46 deletions
diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c index 1a4991c5aea2..46ff6a6b6e78 100644 --- a/arch/powerpc/mm/nohash/mmu_context.c +++ b/arch/powerpc/mm/nohash/mmu_context.c @@ -21,7 +21,6 @@ * also clear mm->cpu_vm_mask bits when processes are migrated */ -//#define DEBUG_MAP_CONSISTENCY //#define DEBUG_CLAMP_LAST_CONTEXT 31 //#define DEBUG_HARDER @@ -180,9 +179,6 @@ static unsigned int steal_all_contexts(void) if (id != FIRST_CONTEXT) { context_mm[id] = NULL; __clear_bit(id, context_map); -#ifdef DEBUG_MAP_CONSISTENCY - mm->context.active = 0; -#endif } if (IS_ENABLED(CONFIG_SMP)) __clear_bit(id, stale_map[cpu]); @@ -224,37 +220,6 @@ static unsigned int steal_context_up(unsigned int id) return id; } -#ifdef DEBUG_MAP_CONSISTENCY -static void context_check_map(void) -{ - unsigned int id, nrf, nact; - - nrf = nact = 0; - for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) { - int used = test_bit(id, context_map); - if (!used) - nrf++; - if (used != (context_mm[id] != NULL)) - pr_err("MMU: Context %d is %s and MM is %p !\n", - id, used ? "used" : "free", context_mm[id]); - if (context_mm[id] != NULL) - nact += context_mm[id]->context.active; - } - if (nrf != nr_free_contexts) { - pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", - nr_free_contexts, nrf); - nr_free_contexts = nrf; - } - if (nact > num_online_cpus()) - pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n", - nact, num_online_cpus()); - if (FIRST_CONTEXT > 0 && !test_bit(0, context_map)) - pr_err("MMU: Context 0 has been freed !!!\n"); -} -#else -static void context_check_map(void) { } -#endif - static void set_context(unsigned long id, pgd_t *pgd) { if (IS_ENABLED(CONFIG_PPC_8xx)) { @@ -309,14 +274,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, /* If we already have a valid assigned context, skip all that */ id = next->context.id; - if (likely(id != MMU_NO_CONTEXT)) { -#ifdef DEBUG_MAP_CONSISTENCY - if (context_mm[id] != next) - pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", - next, id, id, context_mm[id]); -#endif + if (likely(id != MMU_NO_CONTEXT)) goto ctxt_ok; - } /* We really don't have a context, let's try to acquire one */ id = next_context; @@ -352,7 +311,6 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, next->context.id = id; pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); - context_check_map(); ctxt_ok: /* If that context got marked stale on this CPU, then flush the @@ -421,9 +379,6 @@ void destroy_context(struct mm_struct *mm) if (id != MMU_NO_CONTEXT) { __clear_bit(id, context_map); mm->context.id = MMU_NO_CONTEXT; -#ifdef DEBUG_MAP_CONSISTENCY - mm->context.active = 0; -#endif context_mm[id] = NULL; nr_free_contexts++; } |