diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2014-08-13 12:32:04 +0530 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2014-08-13 18:20:42 +1000 |
commit | 9e813308a5c18c58f9ccae1ec72ed4e14eaf9025 (patch) | |
tree | 3e72135c0e403930b809112552e059deaa7e5c11 /arch | |
parent | 85c1fafd7262e68ad821ee1808686b1392b1167d (diff) |
powerpc/thp: Add tracepoints to track hugepage invalidate
Add tracepoint to track hugepage invalidate. This help us
in debugging difficult to track bugs.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_hash64.c | 4 |
2 files changed, 10 insertions, 0 deletions
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 948a81e02ddb..c8d709ab489d 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -54,6 +54,9 @@ #include "mmu_decl.h" +#define CREATE_TRACE_POINTS +#include <trace/events/thp.h> + /* Some sanity checking */ #if TASK_SIZE_USER64 > PGTABLE_RANGE #error TASK_SIZE_USER64 exceeds pagetable range @@ -537,6 +540,7 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, old = pmd_val(*pmdp); *pmdp = __pmd((old & ~clr) | set); #endif + trace_hugepage_update(addr, old, clr, set); if (old & _PAGE_HASHPTE) hpte_do_hugepage_flush(mm, addr, pmdp, old); return old; @@ -642,6 +646,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, * If we didn't had the splitting flag set, go and flush the * HPTE entries. */ + trace_hugepage_splitting(address, old); if (!(old & _PAGE_SPLITTING)) { /* We need to flush the hpte */ if (old & _PAGE_HASHPTE) @@ -709,6 +714,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, assert_spin_locked(&mm->page_table_lock); WARN_ON(!pmd_trans_huge(pmd)); #endif + trace_hugepage_set_pmd(addr, pmd); return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); } diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 9adda5790463..d2a94b85dbc2 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -30,6 +30,8 @@ #include <asm/tlb.h> #include <asm/bug.h> +#include <trace/events/thp.h> + DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); /* @@ -213,6 +215,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, if (ptep == NULL) continue; pte = pte_val(*ptep); + if (hugepage_shift) + trace_hugepage_invalidate(start, pte_val(pte)); if (!(pte & _PAGE_HASHPTE)) continue; if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) |