diff options
author | Kan Liang <kan.liang@linux.intel.com> | 2020-03-19 13:25:10 -0700 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2020-04-18 09:05:01 -0300 |
commit | 9c6c3f471d85a9b0bcda3ce6fc1e2646685e3f60 (patch) | |
tree | 56ded07276f2adf4b3f1a0d0d32430bca581d4d2 /tools/perf | |
parent | 771fd155dfaa5332da69d606db16fe27bd9d388d (diff) |
perf thread: Save previous sample for LBR stitching approach
To retrieve the overwritten LBRs from previous sample for LBR stitching
approach, perf has to save the previous sample.
Only allocate the struct lbr_stitch once, when LBR stitching approach is
enabled and kernel supports hw_idx.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Pavel Gerasimov <pavel.gerasimov@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vitaly Slobodskoy <vitaly.slobodskoy@intel.com>
Link: http://lore.kernel.org/lkml/20200319202517.23423-11-kan.liang@linux.intel.com
[ Use zalloc()/zfree() for thread->lbr_stitch ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf')
-rw-r--r-- | tools/perf/util/machine.c | 23 | ||||
-rw-r--r-- | tools/perf/util/thread.c | 1 | ||||
-rw-r--r-- | tools/perf/util/thread.h | 12 |
3 files changed, 36 insertions, 0 deletions
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index f9d69fce584a..a54ca09a1d00 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2292,6 +2292,21 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread, return 0; } +static bool alloc_lbr_stitch(struct thread *thread) +{ + if (thread->lbr_stitch) + return true; + + thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch)); + if (!thread->lbr_stitch) + goto err; + +err: + pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n"); + thread->lbr_stitch_enable = false; + return false; +} + /* * Recolve LBR callstack chain sample * Return: @@ -2308,6 +2323,7 @@ static int resolve_lbr_callchain_sample(struct thread *thread, { struct ip_callchain *chain = sample->callchain; int chain_nr = min(max_stack, (int)chain->nr), i; + struct lbr_stitch *lbr_stitch; u64 branch_from = 0; int err; @@ -2320,6 +2336,13 @@ static int resolve_lbr_callchain_sample(struct thread *thread, if (i == chain_nr) return 0; + if (thread->lbr_stitch_enable && !sample->no_hw_idx && + alloc_lbr_stitch(thread)) { + lbr_stitch = thread->lbr_stitch; + + memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample)); + } + if (callchain_param.order == ORDER_CALLEE) { /* Add kernel ip */ err = lbr_callchain_add_kernel_ip(thread, cursor, sample, diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 1f080db23615..8d0da260c84c 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -111,6 +111,7 @@ void thread__delete(struct thread *thread) exit_rwsem(&thread->namespaces_lock); exit_rwsem(&thread->comm_lock); + thread__free_stitch_list(thread); free(thread); } diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 95294050cff2..34eb61cee6a4 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -5,6 +5,7 @@ #include <linux/refcount.h> #include <linux/rbtree.h> #include <linux/list.h> +#include <linux/zalloc.h> #include <stdio.h> #include <unistd.h> #include <sys/types.h> @@ -13,6 +14,7 @@ #include <strlist.h> #include <intlist.h> #include "rwsem.h" +#include "event.h" struct addr_location; struct map; @@ -20,6 +22,10 @@ struct perf_record_namespaces; struct thread_stack; struct unwind_libunwind_ops; +struct lbr_stitch { + struct perf_sample prev_sample; +}; + struct thread { union { struct rb_node rb_node; @@ -49,6 +55,7 @@ struct thread { /* LBR call stack stitch */ bool lbr_stitch_enable; + struct lbr_stitch *lbr_stitch; }; struct machine; @@ -145,4 +152,9 @@ static inline bool thread__is_filtered(struct thread *thread) return false; } +static inline void thread__free_stitch_list(struct thread *thread) +{ + zfree(&thread->lbr_stitch); +} + #endif /* __PERF_THREAD_H */ |