diff options
author | Kan Liang <kan.liang@linux.intel.com> | 2021-04-12 07:30:48 -0700 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2021-04-19 20:03:25 +0200 |
commit | 0d18f2dfead8dd63bf1186c9ef38528d6a615a55 (patch) | |
tree | 5dba9ccd7d54ca4a549bc2cb0180dec52e5c0fa9 /arch | |
parent | eaacf07d1116f6bf3b93b265515fccf2301097f2 (diff) |
perf/x86: Hybrid PMU support for hardware cache event
The hardware cache events are different among hybrid PMUs. Each hybrid
PMU should have its own hw cache event table.
Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1618237865-33448-9-git-send-email-kan.liang@linux.intel.com
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/events/core.c | 5 | ||||
-rw-r--r-- | arch/x86/events/perf_event.h | 9 |
2 files changed, 11 insertions, 3 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 1aeb31cb4ee5..e8cb892c5826 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -376,8 +376,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) return -EINVAL; cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX); - val = hw_cache_event_ids[cache_type][cache_op][cache_result]; - + val = hybrid_var(event->pmu, hw_cache_event_ids)[cache_type][cache_op][cache_result]; if (val == 0) return -ENOENT; @@ -385,7 +384,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) return -EINVAL; hwc->config |= val; - attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; + attr->config1 = hybrid_var(event->pmu, hw_cache_extra_regs)[cache_type][cache_op][cache_result]; return x86_pmu_extra_regs(val, event); } diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 2688e455df7b..b65cf4633f24 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -639,6 +639,15 @@ struct x86_hybrid_pmu { int num_counters; int num_counters_fixed; struct event_constraint unconstrained; + + u64 hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + u64 hw_cache_extra_regs + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; }; static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) |