From 9511bce9fe8e5e6c0f923c09243a713eba560141 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Tue, 17 Apr 2018 23:29:07 -0700 Subject: perf/core: Fix bad use of igrab() As Miklos reported and suggested: "This pattern repeats two times in trace_uprobe.c and in kernel/events/core.c as well: ret = kern_path(filename, LOOKUP_FOLLOW, &path); if (ret) goto fail_address_parse; inode = igrab(d_inode(path.dentry)); path_put(&path); And it's wrong. You can only hold a reference to the inode if you have an active ref to the superblock as well (which is normally through path.mnt) or holding s_umount. This way unmounting the containing filesystem while the tracepoint is active will give you the "VFS: Busy inodes after unmount..." message and a crash when the inode is finally put. Solution: store path instead of inode." This patch fixes the issue in kernel/event/core.c. Reviewed-and-tested-by: Alexander Shishkin Reported-by: Miklos Szeredi Signed-off-by: Song Liu Signed-off-by: Peter Zijlstra (Intel) Cc: Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Fixes: 375637bc5249 ("perf/core: Introduce address range filtering") Link: http://lkml.kernel.org/r/20180418062907.3210386-2-songliubraving@fb.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/pt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 3b993942a0e4..8d016ce5b80d 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1194,7 +1194,7 @@ static int pt_event_addr_filters_validate(struct list_head *filters) filter->action == PERF_ADDR_FILTER_ACTION_START) return -EOPNOTSUPP; - if (!filter->inode) { + if (!filter->path.dentry) { if (!valid_kernel_ip(filter->offset)) return -EINVAL; @@ -1221,7 +1221,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event) return; list_for_each_entry(filter, &head->list, entry) { - if (filter->inode && !offs[range]) { + if (filter->path.dentry && !offs[range]) { msr_a = msr_b = 0; } else { /* apply the offset */ -- cgit v1.2.3 From 10b1105004fbd81058383537b67df35cc188ab62 Mon Sep 17 00:00:00 2001 From: Alexey Budankov Date: Thu, 24 May 2018 17:11:54 +0300 Subject: perf/x86: Store user space frame-pointer value on a sample Store user space frame-pointer value (BP register) into the perf trace on a sample for a process so the value becomes available when unwinding call stacks for functions gaining event samples. Signed-off-by: Alexey Budankov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: http://lkml.kernel.org/r/311d4a34-f81b-5535-3385-01427ac73b41@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/perf_regs.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index e47b2dbbdef3..c06c4c16c6b6 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -151,17 +151,19 @@ void perf_get_regs_user(struct perf_regs *regs_user, regs_user_copy->sp = user_regs->sp; regs_user_copy->cs = user_regs->cs; regs_user_copy->ss = user_regs->ss; - /* - * Most system calls don't save these registers, don't report them. + * Store user space frame-pointer value on sample + * to facilitate stack unwinding for cases when + * user space executable code has such support + * enabled at compile time: */ + regs_user_copy->bp = user_regs->bp; + regs_user_copy->bx = -1; - regs_user_copy->bp = -1; regs_user_copy->r12 = -1; regs_user_copy->r13 = -1; regs_user_copy->r14 = -1; regs_user_copy->r15 = -1; - /* * For this to be at all useful, we need a reasonable guess for * the ABI. Be careful: we're in NMI context, and we're -- cgit v1.2.3 From 2da331465f44f9618abe8837d1a68405d550b66e Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 3 May 2018 11:25:06 -0700 Subject: perf/x86/intel/uncore: Introduce customized event_read() for client IMC uncore There are two free-running counters for client IMC uncore. The customized event_init() function hard codes their index to 'UNCORE_PMC_IDX_FIXED' and 'UNCORE_PMC_IDX_FIXED + 1'. To support the index 'UNCORE_PMC_IDX_FIXED + 1', the generic uncore_perf_event_update is obscurely hacked. The code quality issue will bring problems when a new counter index is introduced into the generic code, for example, a new index for free-running counter. Introducing a customized event_read() function for client IMC uncore. The customized function is copied from previous generic uncore_pmu_event_read(). The index 'UNCORE_PMC_IDX_FIXED + 1' will be isolated for client IMC uncore only. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: acme@kernel.org Cc: eranian@google.com Link: http://lkml.kernel.org/r/1525371913-10597-1-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore_snb.c | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index aee5e8496be4..df535215d18b 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -450,6 +450,35 @@ static void snb_uncore_imc_event_start(struct perf_event *event, int flags) uncore_pmu_start_hrtimer(box); } +static void snb_uncore_imc_event_read(struct perf_event *event) +{ + struct intel_uncore_box *box = uncore_event_to_box(event); + u64 prev_count, new_count, delta; + int shift; + + /* + * There are two free running counters in IMC. + * The index for the second one is hardcoded to + * UNCORE_PMC_IDX_FIXED + 1. + */ + if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) + shift = 64 - uncore_fixed_ctr_bits(box); + else + shift = 64 - uncore_perf_ctr_bits(box); + + /* the hrtimer might modify the previous event value */ +again: + prev_count = local64_read(&event->hw.prev_count); + new_count = uncore_read_counter(box, event); + if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) + goto again; + + delta = (new_count << shift) - (prev_count << shift); + delta >>= shift; + + local64_add(delta, &event->count); +} + static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) { struct intel_uncore_box *box = uncore_event_to_box(event); @@ -472,7 +501,7 @@ static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) * Drain the remaining delta count out of a event * that we are disabling: */ - uncore_perf_event_update(box, event); + snb_uncore_imc_event_read(event); hwc->state |= PERF_HES_UPTODATE; } } @@ -534,7 +563,7 @@ static struct pmu snb_uncore_imc_pmu = { .del = snb_uncore_imc_event_del, .start = snb_uncore_imc_event_start, .stop = snb_uncore_imc_event_stop, - .read = uncore_pmu_event_read, + .read = snb_uncore_imc_event_read, }; static struct intel_uncore_ops snb_uncore_imc_ops = { -- cgit v1.2.3 From d71f11c076c420c4e2fceb4faefa144e055e0935 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 3 May 2018 11:25:07 -0700 Subject: perf/x86/intel/uncore: Correct fixed counter index check for NHM For Nehalem and Westmere, there is only one fixed counter for W-Box. There is no index which is bigger than UNCORE_PMC_IDX_FIXED. It is not correct to use >= to check fixed counter. The code quality issue will bring problem when new counter index is introduced. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: acme@kernel.org Cc: eranian@google.com Link: http://lkml.kernel.org/r/1525371913-10597-2-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore_nhmex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c index 93e7a8397cde..173e2674be6e 100644 --- a/arch/x86/events/intel/uncore_nhmex.c +++ b/arch/x86/events/intel/uncore_nhmex.c @@ -246,7 +246,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p { struct hw_perf_event *hwc = &event->hw; - if (hwc->idx >= UNCORE_PMC_IDX_FIXED) + if (hwc->idx == UNCORE_PMC_IDX_FIXED) wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); -- cgit v1.2.3 From 4749f8196452eeb73cf2086a6a9705bae479d33d Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 3 May 2018 11:25:08 -0700 Subject: perf/x86/intel/uncore: Correct fixed counter index check in generic code There is no index which is bigger than UNCORE_PMC_IDX_FIXED. The only exception is client IMC uncore, which has been specially handled. For generic code, it is not correct to use >= to check fixed counter. The code quality issue will bring problem when a new counter index is introduced. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: acme@kernel.org Cc: eranian@google.com Link: http://lkml.kernel.org/r/1525371913-10597-3-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index a7956fc7ca1d..3b0f93eb3cc0 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -218,7 +218,7 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e u64 prev_count, new_count, delta; int shift; - if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) + if (event->hw.idx == UNCORE_PMC_IDX_FIXED) shift = 64 - uncore_fixed_ctr_bits(box); else shift = 64 - uncore_perf_ctr_bits(box); -- cgit v1.2.3 From 927b2deb067b8b4753fc09c7a42092f43fc0c1f6 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 3 May 2018 11:25:09 -0700 Subject: perf/x86/intel/uncore: Add new data structures for free running counters There are a number of free running counters introduced for uncore, which provide highly valuable information to a wide array of customers. For example, Skylake Server has IIO free running counters to collect Input/Output x BW/Utilization. There is NO event available on the general purpose counters, that is exactly the same as the free running counters. The generic uncore code needs to be enhanced to support the new counters. In the uncore document, there is no event-code assigned to free running counters. Some events need to be defined to indicate the free running counters. The events are encoded as event-code + umask-code. The event-code for all free running counters is 0xff, which is the same as the fixed counters: - It has not been decided what code will be used for common events on future platforms. 0xff is the only one which will definitely not be used as any common event-code. - Cannot re-use current events on the general purpose counters. Because there is NO event available, that is exactly the same as the free running counters. - Even in the existing codes, the fixed counters for core, that have the same event-code, may count different things. Hence, it should not surprise the users if the free running counters that share the same event-code also count different things. Umask will be used to distinguish the counters. The umask-code is used to distinguish a fixed counter and a free running counter, and different types of free running counters. For fixed counters, the umask-code is 0x0X, where X indicates the index of the fixed counter, which starts from 0. - Compatible with the old event encoding. - Currently, there is only one fixed counter. There are still 15 reserved spaces for extension. For free running counters, the umask-code uses the rest of the space. It would follow the format of 0xXY: - X stands for the type of free running counters, which starts from 1. - Y stands for the index of free running counters of same type, which starts from 0. - The free running counters do different thing. It can be categorized to several types, according to the MSR location, bit width and definition. E.g. there are three types of IIO free running counters on Skylake server to monitor IO CLOCKS, BANDWIDTH and UTILIZATION on different ports. It makes it easy to locate the free running counter of a specific type. - So far, there are at most 8 counters of each type. There are still 8 reserved spaces for extension. Introducing a new index to indicate the free running counters. Only one index is enough for all free running counters. Because the free running counters are always active, and the event and free running counter are always 1:1 mapped, it does not need extra index to indicate the assigned counter. Introducing a new data structure to store free running counters related information for each type. It includes the number of counters, bit width, base address, offset between counters and offset between boxes. Introducing several inline helpers to check index for fixed counter and free running counter, validate free running counter event, and retrieve the free running counter information according to box and event. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: acme@kernel.org Cc: eranian@google.com Link: http://lkml.kernel.org/r/1525371913-10597-4-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore.h | 123 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 122 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 414dc7e7c950..eb0265359019 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -12,8 +12,13 @@ #define UNCORE_FIXED_EVENT 0xff #define UNCORE_PMC_IDX_MAX_GENERIC 8 +#define UNCORE_PMC_IDX_MAX_FIXED 1 +#define UNCORE_PMC_IDX_MAX_FREERUNNING 1 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC -#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) +#define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \ + UNCORE_PMC_IDX_MAX_FIXED) +#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \ + UNCORE_PMC_IDX_MAX_FREERUNNING) #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \ ((dev << 24) | (func << 16) | (type << 8) | idx) @@ -35,6 +40,7 @@ struct intel_uncore_ops; struct intel_uncore_pmu; struct intel_uncore_box; struct uncore_event_desc; +struct freerunning_counters; struct intel_uncore_type { const char *name; @@ -42,6 +48,7 @@ struct intel_uncore_type { int num_boxes; int perf_ctr_bits; int fixed_ctr_bits; + int num_freerunning_types; unsigned perf_ctr; unsigned event_ctl; unsigned event_mask; @@ -59,6 +66,7 @@ struct intel_uncore_type { struct intel_uncore_pmu *pmus; struct intel_uncore_ops *ops; struct uncore_event_desc *event_descs; + struct freerunning_counters *freerunning; const struct attribute_group *attr_groups[4]; struct pmu *pmu; /* for custom pmu ops */ }; @@ -129,6 +137,14 @@ struct uncore_event_desc { const char *config; }; +struct freerunning_counters { + unsigned int counter_base; + unsigned int counter_offset; + unsigned int box_offset; + unsigned int num_counters; + unsigned int bits; +}; + struct pci2phy_map { struct list_head list; int segment; @@ -157,6 +173,16 @@ static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ static struct kobj_attribute format_attr_##_var = \ __ATTR(_name, 0444, __uncore_##_var##_show, NULL) +static inline bool uncore_pmc_fixed(int idx) +{ + return idx == UNCORE_PMC_IDX_FIXED; +} + +static inline bool uncore_pmc_freerunning(int idx) +{ + return idx == UNCORE_PMC_IDX_FREERUNNING; +} + static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) { return box->pmu->type->box_ctl; @@ -214,6 +240,60 @@ static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); } + +/* + * In the uncore document, there is no event-code assigned to free running + * counters. Some events need to be defined to indicate the free running + * counters. The events are encoded as event-code + umask-code. + * + * The event-code for all free running counters is 0xff, which is the same as + * the fixed counters. + * + * The umask-code is used to distinguish a fixed counter and a free running + * counter, and different types of free running counters. + * - For fixed counters, the umask-code is 0x0X. + * X indicates the index of the fixed counter, which starts from 0. + * - For free running counters, the umask-code uses the rest of the space. + * It would bare the format of 0xXY. + * X stands for the type of free running counters, which starts from 1. + * Y stands for the index of free running counters of same type, which + * starts from 0. + * + * For example, there are three types of IIO free running counters on Skylake + * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters. + * The event-code for all the free running counters is 0xff. + * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type, + * which umask-code starts from 0x10. + * So 'ioclk' is encoded as event=0xff,umask=0x10 + * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is + * the second type, which umask-code starts from 0x20. + * So 'bw_in_port2' is encoded as event=0xff,umask=0x22 + */ +static inline unsigned int uncore_freerunning_idx(u64 config) +{ + return ((config >> 8) & 0xf); +} + +#define UNCORE_FREERUNNING_UMASK_START 0x10 + +static inline unsigned int uncore_freerunning_type(u64 config) +{ + return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf); +} + +static inline +unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, + struct perf_event *event) +{ + unsigned int type = uncore_freerunning_type(event->attr.config); + unsigned int idx = uncore_freerunning_idx(event->attr.config); + struct intel_uncore_pmu *pmu = box->pmu; + + return pmu->type->freerunning[type].counter_base + + pmu->type->freerunning[type].counter_offset * idx + + pmu->type->freerunning[type].box_offset * pmu->pmu_idx; +} + static inline unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) { @@ -276,11 +356,52 @@ static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) return box->pmu->type->fixed_ctr_bits; } +static inline +unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, + struct perf_event *event) +{ + unsigned int type = uncore_freerunning_type(event->attr.config); + + return box->pmu->type->freerunning[type].bits; +} + +static inline int uncore_num_freerunning(struct intel_uncore_box *box, + struct perf_event *event) +{ + unsigned int type = uncore_freerunning_type(event->attr.config); + + return box->pmu->type->freerunning[type].num_counters; +} + +static inline int uncore_num_freerunning_types(struct intel_uncore_box *box, + struct perf_event *event) +{ + return box->pmu->type->num_freerunning_types; +} + +static inline bool check_valid_freerunning_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + unsigned int type = uncore_freerunning_type(event->attr.config); + unsigned int idx = uncore_freerunning_idx(event->attr.config); + + return (type < uncore_num_freerunning_types(box, event)) && + (idx < uncore_num_freerunning(box, event)); +} + static inline int uncore_num_counters(struct intel_uncore_box *box) { return box->pmu->type->num_counters; } +static inline bool is_freerunning_event(struct perf_event *event) +{ + u64 cfg = event->attr.config; + + return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) && + (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START); +} + static inline void uncore_disable_box(struct intel_uncore_box *box) { if (box->pmu->type->ops->disable_box) -- cgit v1.2.3 From 0e0162dfcd1fbe4c711ee86f24f966c318999603 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 3 May 2018 11:25:10 -0700 Subject: perf/x86/intel/uncore: Add infrastructure for free running counters There are a number of free running counters introduced for uncore, which provide highly valuable information to a wide array of customers. However, the generic uncore code doesn't support them yet. The free running counters will be specially handled based on their unique attributes: - They are read-only. They cannot be enabled/disabled. - The event and the counter are always 1:1 mapped. It doesn't need to be assigned nor tracked by event_list. - They are always active. It doesn't need to check the availability. - They have different bit width. Also, using inline helpers to replace the check for fixed counter and free running counter. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: acme@kernel.org Cc: eranian@google.com Link: http://lkml.kernel.org/r/1525371913-10597-5-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore.c | 68 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 64 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 3b0f93eb3cc0..0a6f6973690b 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -203,7 +203,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, hwc->idx = idx; hwc->last_tag = ++box->tags[idx]; - if (hwc->idx == UNCORE_PMC_IDX_FIXED) { + if (uncore_pmc_fixed(hwc->idx)) { hwc->event_base = uncore_fixed_ctr(box); hwc->config_base = uncore_fixed_ctl(box); return; @@ -218,7 +218,9 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e u64 prev_count, new_count, delta; int shift; - if (event->hw.idx == UNCORE_PMC_IDX_FIXED) + if (uncore_pmc_freerunning(event->hw.idx)) + shift = 64 - uncore_freerunning_bits(box, event); + else if (uncore_pmc_fixed(event->hw.idx)) shift = 64 - uncore_fixed_ctr_bits(box); else shift = 64 - uncore_perf_ctr_bits(box); @@ -454,10 +456,25 @@ static void uncore_pmu_event_start(struct perf_event *event, int flags) struct intel_uncore_box *box = uncore_event_to_box(event); int idx = event->hw.idx; - if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) return; - if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) + /* + * Free running counter is read-only and always active. + * Use the current counter value as start point. + * There is no overflow interrupt for free running counter. + * Use hrtimer to periodically poll the counter to avoid overflow. + */ + if (uncore_pmc_freerunning(event->hw.idx)) { + list_add_tail(&event->active_entry, &box->active_list); + local64_set(&event->hw.prev_count, + uncore_read_counter(box, event)); + if (box->n_active++ == 0) + uncore_pmu_start_hrtimer(box); + return; + } + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) return; event->hw.state = 0; @@ -479,6 +496,15 @@ static void uncore_pmu_event_stop(struct perf_event *event, int flags) struct intel_uncore_box *box = uncore_event_to_box(event); struct hw_perf_event *hwc = &event->hw; + /* Cannot disable free running counter which is read-only */ + if (uncore_pmc_freerunning(hwc->idx)) { + list_del(&event->active_entry); + if (--box->n_active == 0) + uncore_pmu_cancel_hrtimer(box); + uncore_perf_event_update(box, event); + return; + } + if (__test_and_clear_bit(hwc->idx, box->active_mask)) { uncore_disable_event(box, event); box->n_active--; @@ -512,6 +538,17 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags) if (!box) return -ENODEV; + /* + * The free funning counter is assigned in event_init(). + * The free running counter event and free running counter + * are 1:1 mapped. It doesn't need to be tracked in event_list. + */ + if (uncore_pmc_freerunning(hwc->idx)) { + if (flags & PERF_EF_START) + uncore_pmu_event_start(event, 0); + return 0; + } + ret = n = uncore_collect_events(box, event, false); if (ret < 0) return ret; @@ -570,6 +607,14 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags) uncore_pmu_event_stop(event, PERF_EF_UPDATE); + /* + * The event for free running counter is not tracked by event_list. + * It doesn't need to force event->hw.idx = -1 to reassign the counter. + * Because the event and the free running counter are 1:1 mapped. + */ + if (uncore_pmc_freerunning(event->hw.idx)) + return; + for (i = 0; i < box->n_events; i++) { if (event == box->event_list[i]) { uncore_put_event_constraint(box, event); @@ -603,6 +648,10 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu, struct intel_uncore_box *fake_box; int ret = -EINVAL, n; + /* The free running counter is always active. */ + if (uncore_pmc_freerunning(event->hw.idx)) + return 0; + fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); if (!fake_box) return -ENOMEM; @@ -690,6 +739,17 @@ static int uncore_pmu_event_init(struct perf_event *event) /* fixed counters have event field hardcoded to zero */ hwc->config = 0ULL; + } else if (is_freerunning_event(event)) { + if (!check_valid_freerunning_event(box, event)) + return -EINVAL; + event->hw.idx = UNCORE_PMC_IDX_FREERUNNING; + /* + * The free running counter event and free running counter + * are always 1:1 mapped. + * The free running counter is always active. + * Assign the free running counter here. + */ + event->hw.event_base = uncore_freerunning_counter(box, event); } else { hwc->config = event->attr.config & (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); -- cgit v1.2.3 From 0f519f0352e37e7d71bdce5559517c74a35f6e33 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 3 May 2018 11:25:11 -0700 Subject: perf/x86/intel/uncore: Support IIO free-running counters on SKX As of Skylake Server, there are a number of free running counters in each IIO Box that collect counts of per-box IO clocks and per-port Input/Output x BW/Utilization. The free running counters cannot be part of the existing IIO BOX, because, quoting from Peter Zijlstra: "This will result in some (probably) unexpected scheduling artifacts. Probably the only way to really cure that is to have the free running counters in their own PMU and not share with the GP counters of this box." So let's add a new PMU for the free running counters, as suggested. The free-running counter is read-only and always active. Counting will be suspended only when the IIO Box is powered down. There are three types of IIO free-running counters on Skylake server, IO CLOCKS counter, BANDWIDTH counters and UTILIZATION counters. IO CLOCKS counter is a clock of IIO box. BANDWIDTH counters are to count inbound(PCIe->CPU)/outbound(CPU->PCIe) bandwidth. UTILIZATION counters are to count input/output utilization. The bit width of the free-running counters is 36-bits. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: acme@kernel.org Cc: eranian@google.com Link: http://lkml.kernel.org/r/1525371913-10597-6-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore_snbep.c | 82 ++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 77076a102e34..87dc0263a2e1 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3522,6 +3522,87 @@ static struct intel_uncore_type skx_uncore_iio = { .format_group = &skx_uncore_iio_format_group, }; +enum perf_uncore_iio_freerunning_type_id { + SKX_IIO_MSR_IOCLK = 0, + SKX_IIO_MSR_BW = 1, + SKX_IIO_MSR_UTIL = 2, + + SKX_IIO_FREERUNNING_TYPE_MAX, +}; + + +static struct freerunning_counters skx_iio_freerunning[] = { + [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 }, + [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 }, + [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 }, +}; + +static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = { + /* Free-Running IO CLOCKS Counter */ + INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), + /* Free-Running IIO BANDWIDTH Counters */ + INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"), + INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"), + INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"), + INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"), + INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"), + /* Free-running IIO UTILIZATION Counters */ + INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"), + INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"), + INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"), + INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"), + INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"), + INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"), + INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"), + INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = { + .read_counter = uncore_msr_read_counter, +}; + +static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + NULL, +}; + +static const struct attribute_group skx_uncore_iio_freerunning_format_group = { + .name = "format", + .attrs = skx_uncore_iio_freerunning_formats_attr, +}; + +static struct intel_uncore_type skx_uncore_iio_free_running = { + .name = "iio_free_running", + .num_counters = 17, + .num_boxes = 6, + .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX, + .freerunning = skx_iio_freerunning, + .ops = &skx_uncore_iio_freerunning_ops, + .event_descs = skx_uncore_iio_freerunning_events, + .format_group = &skx_uncore_iio_freerunning_format_group, +}; + static struct attribute *skx_uncore_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -3595,6 +3676,7 @@ static struct intel_uncore_type *skx_msr_uncores[] = { &skx_uncore_ubox, &skx_uncore_chabox, &skx_uncore_iio, + &skx_uncore_iio_free_running, &skx_uncore_irp, &skx_uncore_pcu, NULL, -- cgit v1.2.3 From 5a6c9d94e9ed7410142bc6fcb638a4db1895aa0c Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 3 May 2018 11:25:12 -0700 Subject: perf/x86/intel/uncore: Expose uncore_pmu_event*() functions Some uncores have customized PMU. For customized PMU, it does not need to customize everything. For example, it only needs to customize init() function for client IMC uncore. Other functions like add()/del()/start()/stop()/read() can use generic code. Expose the uncore_pmu_event_add/del/start/stop() functions. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: acme@kernel.org Cc: eranian@google.com Link: http://lkml.kernel.org/r/1525371913-10597-7-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore.c | 8 ++++---- arch/x86/events/intel/uncore.h | 4 ++++ 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 0a6f6973690b..15b07379e72d 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -451,7 +451,7 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int return ret ? -EINVAL : 0; } -static void uncore_pmu_event_start(struct perf_event *event, int flags) +void uncore_pmu_event_start(struct perf_event *event, int flags) { struct intel_uncore_box *box = uncore_event_to_box(event); int idx = event->hw.idx; @@ -491,7 +491,7 @@ static void uncore_pmu_event_start(struct perf_event *event, int flags) } } -static void uncore_pmu_event_stop(struct perf_event *event, int flags) +void uncore_pmu_event_stop(struct perf_event *event, int flags) { struct intel_uncore_box *box = uncore_event_to_box(event); struct hw_perf_event *hwc = &event->hw; @@ -528,7 +528,7 @@ static void uncore_pmu_event_stop(struct perf_event *event, int flags) } } -static int uncore_pmu_event_add(struct perf_event *event, int flags) +int uncore_pmu_event_add(struct perf_event *event, int flags) { struct intel_uncore_box *box = uncore_event_to_box(event); struct hw_perf_event *hwc = &event->hw; @@ -600,7 +600,7 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags) return 0; } -static void uncore_pmu_event_del(struct perf_event *event, int flags) +void uncore_pmu_event_del(struct perf_event *event, int flags) { struct intel_uncore_box *box = uncore_event_to_box(event); int i; diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index eb0265359019..c9e1e0bef3c3 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -467,6 +467,10 @@ struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); +void uncore_pmu_event_start(struct perf_event *event, int flags); +void uncore_pmu_event_stop(struct perf_event *event, int flags); +int uncore_pmu_event_add(struct perf_event *event, int flags); +void uncore_pmu_event_del(struct perf_event *event, int flags); void uncore_pmu_event_read(struct perf_event *event); void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); struct event_constraint * -- cgit v1.2.3 From 9aae1780e7e81e54edfb70ba33ead5b0b48be009 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 3 May 2018 11:25:13 -0700 Subject: perf/x86/intel/uncore: Clean up client IMC uncore The counters in client IMC uncore are free running counters, not fixed counters. It should be corrected. The new infrastructure for free running counter should be applied. Introducing a new type SNB_PCI_UNCORE_IMC_DATA for client IMC free running counters. Keeping the customized event_init() function to be compatible with old event encoding. Clean up other customized event_*() functions. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: acme@kernel.org Cc: eranian@google.com Link: http://lkml.kernel.org/r/1525371913-10597-8-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore_snb.c | 132 ++++++------------------------------- 1 file changed, 20 insertions(+), 112 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index df535215d18b..8527c3e1038b 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -285,6 +285,15 @@ static struct uncore_event_desc snb_uncore_imc_events[] = { #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE +enum perf_snb_uncore_imc_freerunning_types { + SNB_PCI_UNCORE_IMC_DATA = 0, + SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, +}; + +static struct freerunning_counters snb_uncore_imc_freerunning[] = { + [SNB_PCI_UNCORE_IMC_DATA] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 }, +}; + static struct attribute *snb_uncore_imc_formats_attr[] = { &format_attr_event.attr, NULL, @@ -341,9 +350,8 @@ static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf } /* - * custom event_init() function because we define our own fixed, free - * running counters, so we do not want to conflict with generic uncore - * logic. Also simplifies processing + * Keep the custom event_init() function compatible with old event + * encoding for free running counters. */ static int snb_uncore_imc_event_init(struct perf_event *event) { @@ -405,11 +413,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event) switch (cfg) { case SNB_UNCORE_PCI_IMC_DATA_READS: base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; - idx = UNCORE_PMC_IDX_FIXED; + idx = UNCORE_PMC_IDX_FREERUNNING; break; case SNB_UNCORE_PCI_IMC_DATA_WRITES: base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; - idx = UNCORE_PMC_IDX_FIXED + 1; + idx = UNCORE_PMC_IDX_FREERUNNING; break; default: return -EINVAL; @@ -430,104 +438,6 @@ static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_ev return 0; } -static void snb_uncore_imc_event_start(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - u64 count; - - if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) - return; - - event->hw.state = 0; - box->n_active++; - - list_add_tail(&event->active_entry, &box->active_list); - - count = snb_uncore_imc_read_counter(box, event); - local64_set(&event->hw.prev_count, count); - - if (box->n_active == 1) - uncore_pmu_start_hrtimer(box); -} - -static void snb_uncore_imc_event_read(struct perf_event *event) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - u64 prev_count, new_count, delta; - int shift; - - /* - * There are two free running counters in IMC. - * The index for the second one is hardcoded to - * UNCORE_PMC_IDX_FIXED + 1. - */ - if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) - shift = 64 - uncore_fixed_ctr_bits(box); - else - shift = 64 - uncore_perf_ctr_bits(box); - - /* the hrtimer might modify the previous event value */ -again: - prev_count = local64_read(&event->hw.prev_count); - new_count = uncore_read_counter(box, event); - if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) - goto again; - - delta = (new_count << shift) - (prev_count << shift); - delta >>= shift; - - local64_add(delta, &event->count); -} - -static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - struct hw_perf_event *hwc = &event->hw; - - if (!(hwc->state & PERF_HES_STOPPED)) { - box->n_active--; - - WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); - hwc->state |= PERF_HES_STOPPED; - - list_del(&event->active_entry); - - if (box->n_active == 0) - uncore_pmu_cancel_hrtimer(box); - } - - if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { - /* - * Drain the remaining delta count out of a event - * that we are disabling: - */ - snb_uncore_imc_event_read(event); - hwc->state |= PERF_HES_UPTODATE; - } -} - -static int snb_uncore_imc_event_add(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - struct hw_perf_event *hwc = &event->hw; - - if (!box) - return -ENODEV; - - hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; - if (!(flags & PERF_EF_START)) - hwc->state |= PERF_HES_ARCH; - - snb_uncore_imc_event_start(event, 0); - - return 0; -} - -static void snb_uncore_imc_event_del(struct perf_event *event, int flags) -{ - snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); -} - int snb_pci2phy_map_init(int devid) { struct pci_dev *dev = NULL; @@ -559,11 +469,11 @@ int snb_pci2phy_map_init(int devid) static struct pmu snb_uncore_imc_pmu = { .task_ctx_nr = perf_invalid_context, .event_init = snb_uncore_imc_event_init, - .add = snb_uncore_imc_event_add, - .del = snb_uncore_imc_event_del, - .start = snb_uncore_imc_event_start, - .stop = snb_uncore_imc_event_stop, - .read = snb_uncore_imc_event_read, + .add = uncore_pmu_event_add, + .del = uncore_pmu_event_del, + .start = uncore_pmu_event_start, + .stop = uncore_pmu_event_stop, + .read = uncore_pmu_event_read, }; static struct intel_uncore_ops snb_uncore_imc_ops = { @@ -581,12 +491,10 @@ static struct intel_uncore_type snb_uncore_imc = { .name = "imc", .num_counters = 2, .num_boxes = 1, - .fixed_ctr_bits = 32, - .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, + .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, + .freerunning = snb_uncore_imc_freerunning, .event_descs = snb_uncore_imc_events, .format_group = &snb_uncore_imc_format_group, - .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE, - .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK, .ops = &snb_uncore_imc_ops, .pmu = &snb_uncore_imc_pmu, }; -- cgit v1.2.3