diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2010-08-14 20:45:13 +0200 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2010-08-19 01:32:53 +0200 |
commit | 7ae07ea3a48d30689ee037cb136bc21f0b37d8ae (patch) | |
tree | 2cb895a0794bcb2e45a4f48ef7e93302c1f6332c /kernel | |
parent | 927c7a9e92c4f69097a6e9e086d11fc2f8a5b40b (diff) |
perf: Humanize the number of contexts
Instead of hardcoding the number of contexts for the recursions
barriers, define a cpp constant to make the code more
self-explanatory.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_event.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 8 |
2 files changed, 6 insertions, 6 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 75ab8a2df6b2..f416aef242c3 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1772,7 +1772,7 @@ struct callchain_cpus_entries { struct perf_callchain_entry *cpu_entries[0]; }; -static DEFINE_PER_CPU(int, callchain_recursion[4]); +static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); static atomic_t nr_callchain_events; static DEFINE_MUTEX(callchain_mutex); struct callchain_cpus_entries *callchain_cpus_entries; @@ -1828,7 +1828,7 @@ static int alloc_callchain_buffers(void) if (!entries) return -ENOMEM; - size = sizeof(struct perf_callchain_entry) * 4; + size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; for_each_possible_cpu(cpu) { entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 000e6e85b445..db2eae2efcf2 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -9,7 +9,7 @@ #include <linux/kprobes.h> #include "trace.h" -static char *perf_trace_buf[4]; +static char *perf_trace_buf[PERF_NR_CONTEXTS]; /* * Force it to be aligned to unsigned long to avoid misaligned accesses @@ -45,7 +45,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, char *buf; int i; - for (i = 0; i < 4; i++) { + for (i = 0; i < PERF_NR_CONTEXTS; i++) { buf = (char *)alloc_percpu(perf_trace_t); if (!buf) goto fail; @@ -65,7 +65,7 @@ fail: if (!total_ref_count) { int i; - for (i = 0; i < 4; i++) { + for (i = 0; i < PERF_NR_CONTEXTS; i++) { free_percpu(perf_trace_buf[i]); perf_trace_buf[i] = NULL; } @@ -140,7 +140,7 @@ void perf_trace_destroy(struct perf_event *p_event) tp_event->perf_events = NULL; if (!--total_ref_count) { - for (i = 0; i < 4; i++) { + for (i = 0; i < PERF_NR_CONTEXTS; i++) { free_percpu(perf_trace_buf[i]); perf_trace_buf[i] = NULL; } |