summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-03-30 17:00:06 +0200
committerIngo Molnar <mingo@elte.hu>2010-04-02 19:52:04 +0200
commitb4cdc5c264b35c67007800dec3928e9547a9d70b (patch)
tree1e4c723538c5c1c2bb68c96975cea5bc47789991 /arch/x86/kernel/cpu/perf_event.c
parenta098f4484bc7dae23f5b62360954007b99b64600 (diff)
perf, x86: Fix up the ANY flag stuff
Stephane noticed that the ANY flag was in generic arch code, and Cyrill reported that it broke the P4 code. Solve this by merging x86_pmu::raw_event into x86_pmu::hw_config and provide intel_pmu and amd_pmu specific versions of this callback. The intel_pmu one deals with the ANY flag, the amd_pmu adds the few extra event bits AMD64 has. Reported-by: Stephane Eranian <eranian@google.com> Reported-by: Cyrill Gorcunov <gorcunov@gmail.com> Acked-by: Robert Richter <robert.richter@amd.com> Acked-by: Cyrill Gorcunov <gorcunov@gmail.com> Acked-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1269968113.5258.442.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c35
1 files changed, 12 insertions, 23 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 1dd42c18f1cb..65e9c5efb618 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -196,12 +196,11 @@ struct x86_pmu {
void (*enable_all)(int added);
void (*enable)(struct perf_event *);
void (*disable)(struct perf_event *);
- int (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc);
+ int (*hw_config)(struct perf_event *event);
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
- u64 (*raw_event)(u64);
int max_events;
int num_counters;
int num_counters_fixed;
@@ -426,28 +425,26 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
return 0;
}
-static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
+static int x86_pmu_hw_config(struct perf_event *event)
{
/*
* Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
*/
- hwc->config = ARCH_PERFMON_EVENTSEL_INT;
+ event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
/*
* Count user and OS events unless requested not to
*/
- if (!attr->exclude_user)
- hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
- if (!attr->exclude_kernel)
- hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
+ if (!event->attr.exclude_user)
+ event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
+ if (!event->attr.exclude_kernel)
+ event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
- return 0;
-}
+ if (event->attr.type == PERF_TYPE_RAW)
+ event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
-static u64 x86_pmu_raw_event(u64 hw_event)
-{
- return hw_event & X86_RAW_EVENT_MASK;
+ return 0;
}
/*
@@ -489,7 +486,7 @@ static int __hw_perf_event_init(struct perf_event *event)
hwc->last_tag = ~0ULL;
/* Processor specifics */
- err = x86_pmu.hw_config(attr, hwc);
+ err = x86_pmu.hw_config(event);
if (err)
return err;
@@ -508,16 +505,8 @@ static int __hw_perf_event_init(struct perf_event *event)
return -EOPNOTSUPP;
}
- /*
- * Raw hw_event type provide the config in the hw_event structure
- */
- if (attr->type == PERF_TYPE_RAW) {
- hwc->config |= x86_pmu.raw_event(attr->config);
- if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
- perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
- return -EACCES;
+ if (attr->type == PERF_TYPE_RAW)
return 0;
- }
if (attr->type == PERF_TYPE_HW_CACHE)
return set_ext_hw_attr(hwc, attr);