diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-16 17:06:21 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-16 17:06:21 -0700 |
commit | 772c1d06bd402f7ee72c61a18c2db74cd74b6758 (patch) | |
tree | e362fc7e158b3580d810a26189ecf91ec8a4f141 /tools/perf/util/cpumap.c | |
parent | c7eba51cfdf9cd1ca7ed4201b30be8b2bef15ff5 (diff) | |
parent | e336b4027775cb458dc713745e526fa1a1996b2a (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"Kernel side changes:
- Improved kbprobes robustness
- Intel PEBS support for PT hardware tracing
- Other Intel PT improvements: high order pages memory footprint
reduction and various related cleanups
- Misc cleanups
The perf tooling side has been very busy in this cycle, with over 300
commits. This is an incomplete high-level summary of the many
improvements done by over 30 developers:
- Lots of updates to the following tools:
'perf c2c'
'perf config'
'perf record'
'perf report'
'perf script'
'perf test'
'perf top'
'perf trace'
- Updates to libperf and libtraceevent, and a consolidation of the
proliferation of x86 instruction decoder libraries.
- Vendor event updates for Intel and PowerPC CPUs,
- Updates to hardware tracing tooling for ARM and Intel CPUs,
- ... and lots of other changes and cleanups - see the shortlog and
Git log for details"
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (322 commits)
kprobes: Prohibit probing on BUG() and WARN() address
perf/x86: Make more stuff static
x86, perf: Fix the dependency of the x86 insn decoder selftest
objtool: Ignore intentional differences for the x86 insn decoder
objtool: Update sync-check.sh from perf's check-headers.sh
perf build: Ignore intentional differences for the x86 insn decoder
perf intel-pt: Use shared x86 insn decoder
perf intel-pt: Remove inat.c from build dependency list
perf: Update .gitignore file
objtool: Move x86 insn decoder to a common location
perf metricgroup: Support multiple events for metricgroup
perf metricgroup: Scale the metric result
perf pmu: Change convert_scale from static to global
perf symbols: Move mem_info and branch_info out of symbol.h
perf auxtrace: Uninline functions that touch perf_session
perf tools: Remove needless evlist.h include directives
perf tools: Remove needless evlist.h include directives
perf tools: Remove needless thread_map.h include directives
perf tools: Remove needless thread.h include directives
perf tools: Remove needless map.h include directives
...
Diffstat (limited to 'tools/perf/util/cpumap.c')
-rw-r--r-- | tools/perf/util/cpumap.c | 285 |
1 files changed, 30 insertions, 255 deletions
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 39cce66b4ebc..a22c1114e880 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include <api/fs/fs.h> -#include "../perf.h" #include "cpumap.h" +#include "debug.h" +#include "event.h" #include <assert.h> #include <dirent.h> #include <stdio.h> @@ -17,190 +18,11 @@ static int max_present_cpu_num; static int max_node_num; static int *cpunode_map; -static struct cpu_map *cpu_map__default_new(void) +static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus) { - struct cpu_map *cpus; - int nr_cpus; + struct perf_cpu_map *map; - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - if (nr_cpus < 0) - return NULL; - - cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int)); - if (cpus != NULL) { - int i; - for (i = 0; i < nr_cpus; ++i) - cpus->map[i] = i; - - cpus->nr = nr_cpus; - refcount_set(&cpus->refcnt, 1); - } - - return cpus; -} - -static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) -{ - size_t payload_size = nr_cpus * sizeof(int); - struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size); - - if (cpus != NULL) { - cpus->nr = nr_cpus; - memcpy(cpus->map, tmp_cpus, payload_size); - refcount_set(&cpus->refcnt, 1); - } - - return cpus; -} - -struct cpu_map *cpu_map__read(FILE *file) -{ - struct cpu_map *cpus = NULL; - int nr_cpus = 0; - int *tmp_cpus = NULL, *tmp; - int max_entries = 0; - int n, cpu, prev; - char sep; - - sep = 0; - prev = -1; - for (;;) { - n = fscanf(file, "%u%c", &cpu, &sep); - if (n <= 0) - break; - if (prev >= 0) { - int new_max = nr_cpus + cpu - prev - 1; - - if (new_max >= max_entries) { - max_entries = new_max + MAX_NR_CPUS / 2; - tmp = realloc(tmp_cpus, max_entries * sizeof(int)); - if (tmp == NULL) - goto out_free_tmp; - tmp_cpus = tmp; - } - - while (++prev < cpu) - tmp_cpus[nr_cpus++] = prev; - } - if (nr_cpus == max_entries) { - max_entries += MAX_NR_CPUS; - tmp = realloc(tmp_cpus, max_entries * sizeof(int)); - if (tmp == NULL) - goto out_free_tmp; - tmp_cpus = tmp; - } - - tmp_cpus[nr_cpus++] = cpu; - if (n == 2 && sep == '-') - prev = cpu; - else - prev = -1; - if (n == 1 || sep == '\n') - break; - } - - if (nr_cpus > 0) - cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); - else - cpus = cpu_map__default_new(); -out_free_tmp: - free(tmp_cpus); - return cpus; -} - -static struct cpu_map *cpu_map__read_all_cpu_map(void) -{ - struct cpu_map *cpus = NULL; - FILE *onlnf; - - onlnf = fopen("/sys/devices/system/cpu/online", "r"); - if (!onlnf) - return cpu_map__default_new(); - - cpus = cpu_map__read(onlnf); - fclose(onlnf); - return cpus; -} - -struct cpu_map *cpu_map__new(const char *cpu_list) -{ - struct cpu_map *cpus = NULL; - unsigned long start_cpu, end_cpu = 0; - char *p = NULL; - int i, nr_cpus = 0; - int *tmp_cpus = NULL, *tmp; - int max_entries = 0; - - if (!cpu_list) - return cpu_map__read_all_cpu_map(); - - /* - * must handle the case of empty cpumap to cover - * TOPOLOGY header for NUMA nodes with no CPU - * ( e.g., because of CPU hotplug) - */ - if (!isdigit(*cpu_list) && *cpu_list != '\0') - goto out; - - while (isdigit(*cpu_list)) { - p = NULL; - start_cpu = strtoul(cpu_list, &p, 0); - if (start_cpu >= INT_MAX - || (*p != '\0' && *p != ',' && *p != '-')) - goto invalid; - - if (*p == '-') { - cpu_list = ++p; - p = NULL; - end_cpu = strtoul(cpu_list, &p, 0); - - if (end_cpu >= INT_MAX || (*p != '\0' && *p != ',')) - goto invalid; - - if (end_cpu < start_cpu) - goto invalid; - } else { - end_cpu = start_cpu; - } - - for (; start_cpu <= end_cpu; start_cpu++) { - /* check for duplicates */ - for (i = 0; i < nr_cpus; i++) - if (tmp_cpus[i] == (int)start_cpu) - goto invalid; - - if (nr_cpus == max_entries) { - max_entries += MAX_NR_CPUS; - tmp = realloc(tmp_cpus, max_entries * sizeof(int)); - if (tmp == NULL) - goto invalid; - tmp_cpus = tmp; - } - tmp_cpus[nr_cpus++] = (int)start_cpu; - } - if (*p) - ++p; - - cpu_list = p; - } - - if (nr_cpus > 0) - cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); - else if (*cpu_list != '\0') - cpus = cpu_map__default_new(); - else - cpus = cpu_map__dummy_new(); -invalid: - free(tmp_cpus); -out: - return cpus; -} - -static struct cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus) -{ - struct cpu_map *map; - - map = cpu_map__empty_new(cpus->nr); + map = perf_cpu_map__empty_new(cpus->nr); if (map) { unsigned i; @@ -220,14 +42,14 @@ static struct cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus) return map; } -static struct cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask) +static struct perf_cpu_map *cpu_map__from_mask(struct perf_record_record_cpu_map *mask) { - struct cpu_map *map; + struct perf_cpu_map *map; int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE; nr = bitmap_weight(mask->mask, nbits); - map = cpu_map__empty_new(nr); + map = perf_cpu_map__empty_new(nr); if (map) { int cpu, i = 0; @@ -238,15 +60,15 @@ static struct cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask) } -struct cpu_map *cpu_map__new_data(struct cpu_map_data *data) +struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data) { if (data->type == PERF_CPU_MAP__CPUS) return cpu_map__from_entries((struct cpu_map_entries *)data->data); else - return cpu_map__from_mask((struct cpu_map_mask *)data->data); + return cpu_map__from_mask((struct perf_record_record_cpu_map *)data->data); } -size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp) +size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp) { #define BUFSIZE 1024 char buf[BUFSIZE]; @@ -256,22 +78,9 @@ size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp) #undef BUFSIZE } -struct cpu_map *cpu_map__dummy_new(void) +struct perf_cpu_map *perf_cpu_map__empty_new(int nr) { - struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int)); - - if (cpus != NULL) { - cpus->nr = 1; - cpus->map[0] = -1; - refcount_set(&cpus->refcnt, 1); - } - - return cpus; -} - -struct cpu_map *cpu_map__empty_new(int nr) -{ - struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr); + struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr); if (cpus != NULL) { int i; @@ -286,28 +95,6 @@ struct cpu_map *cpu_map__empty_new(int nr) return cpus; } -static void cpu_map__delete(struct cpu_map *map) -{ - if (map) { - WARN_ONCE(refcount_read(&map->refcnt) != 0, - "cpu_map refcnt unbalanced\n"); - free(map); - } -} - -struct cpu_map *cpu_map__get(struct cpu_map *map) -{ - if (map) - refcount_inc(&map->refcnt); - return map; -} - -void cpu_map__put(struct cpu_map *map) -{ - if (map && refcount_dec_and_test(&map->refcnt)) - cpu_map__delete(map); -} - static int cpu__get_topology_int(int cpu, const char *name, int *value) { char path[PATH_MAX]; @@ -324,7 +111,7 @@ int cpu_map__get_socket_id(int cpu) return ret ?: value; } -int cpu_map__get_socket(struct cpu_map *map, int idx, void *data __maybe_unused) +int cpu_map__get_socket(struct perf_cpu_map *map, int idx, void *data __maybe_unused) { int cpu; @@ -341,11 +128,11 @@ static int cmp_ids(const void *a, const void *b) return *(int *)a - *(int *)b; } -int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, - int (*f)(struct cpu_map *map, int cpu, void *data), +int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res, + int (*f)(struct perf_cpu_map *map, int cpu, void *data), void *data) { - struct cpu_map *c; + struct perf_cpu_map *c; int nr = cpus->nr; int cpu, s1, s2; @@ -380,7 +167,7 @@ int cpu_map__get_die_id(int cpu) return ret ?: value; } -int cpu_map__get_die(struct cpu_map *map, int idx, void *data) +int cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data) { int cpu, die_id, s; @@ -419,7 +206,7 @@ int cpu_map__get_core_id(int cpu) return ret ?: value; } -int cpu_map__get_core(struct cpu_map *map, int idx, void *data) +int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data) { int cpu, s_die; @@ -448,17 +235,17 @@ int cpu_map__get_core(struct cpu_map *map, int idx, void *data) return (s_die << 16) | (cpu & 0xffff); } -int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp) +int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct perf_cpu_map **sockp) { return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL); } -int cpu_map__build_die_map(struct cpu_map *cpus, struct cpu_map **diep) +int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct perf_cpu_map **diep) { return cpu_map__build_map(cpus, diep, cpu_map__get_die, NULL); } -int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep) +int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct perf_cpu_map **corep) { return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL); } @@ -670,29 +457,17 @@ int cpu__setup_cpunode_map(void) return 0; } -bool cpu_map__has(struct cpu_map *cpus, int cpu) +bool cpu_map__has(struct perf_cpu_map *cpus, int cpu) { - return cpu_map__idx(cpus, cpu) != -1; -} - -int cpu_map__idx(struct cpu_map *cpus, int cpu) -{ - int i; - - for (i = 0; i < cpus->nr; ++i) { - if (cpus->map[i] == cpu) - return i; - } - - return -1; + return perf_cpu_map__idx(cpus, cpu) != -1; } -int cpu_map__cpu(struct cpu_map *cpus, int idx) +int cpu_map__cpu(struct perf_cpu_map *cpus, int idx) { return cpus->map[idx]; } -size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size) +size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size) { int i, cpu, start = -1; bool first = true; @@ -744,7 +519,7 @@ static char hex_char(unsigned char val) return '?'; } -size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size) +size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size) { int i, cpu; char *ptr = buf; @@ -784,12 +559,12 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size) return ptr - buf; } -const struct cpu_map *cpu_map__online(void) /* thread unsafe */ +const struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */ { - static const struct cpu_map *online = NULL; + static const struct perf_cpu_map *online = NULL; if (!online) - online = cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */ + online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */ return online; } |