From 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 11 Jun 2010 17:32:03 +0200 Subject: perf: Reduce perf_disable() usage Since the current perf_disable() usage is only an optimization, remove it for now. This eases the removal of the __weak hw_perf_enable() interface. Signed-off-by: Peter Zijlstra Cc: paulus Cc: stephane eranian Cc: Robert Richter Cc: Will Deacon Cc: Paul Mundt Cc: Frederic Weisbecker Cc: Cyrill Gorcunov Cc: Lin Ming Cc: Yanmin Cc: Deng-Cheng Zhu Cc: David Miller Cc: Michael Cree LKML-Reference: Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_event.c | 3 +++ arch/powerpc/kernel/perf_event_fsl_emb.c | 8 ++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 19131b2614b9..c1408821dbc2 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -861,6 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + perf_disable(); cpuhw->group_flag |= PERF_EVENT_TXN; cpuhw->n_txn_start = cpuhw->n_events; } @@ -875,6 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu) struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); cpuhw->group_flag &= ~PERF_EVENT_TXN; + perf_enable(); } /* @@ -901,6 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu) cpuhw->event[i]->hw.config = cpuhw->events[i]; cpuhw->group_flag &= ~PERF_EVENT_TXN; + perf_enable(); return 0; } diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index ea6a804e43fd..9bc84a7fd901 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -262,7 +262,7 @@ static int collect_events(struct perf_event *group, int max_count, return n; } -/* perf must be disabled, context locked on entry */ +/* context locked on entry */ static int fsl_emb_pmu_enable(struct perf_event *event) { struct cpu_hw_events *cpuhw; @@ -271,6 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) u64 val; int i; + perf_disable(); cpuhw = &get_cpu_var(cpu_hw_events); if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) @@ -310,15 +311,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event) ret = 0; out: put_cpu_var(cpu_hw_events); + perf_enable(); return ret; } -/* perf must be disabled, context locked on entry */ +/* context locked on entry */ static void fsl_emb_pmu_disable(struct perf_event *event) { struct cpu_hw_events *cpuhw; int i = event->hw.idx; + perf_disable(); if (i < 0) goto out; @@ -346,6 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event) cpuhw->n_events--; out: + perf_enable(); put_cpu_var(cpu_hw_events); } -- cgit v1.2.3