1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
|
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <stdlib.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <linux/btf.h>
#include "bpf-event.h"
#include "debug.h"
#include "symbol.h"
#include "machine.h"
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
{
int ret = 0;
size_t i;
for (i = 0; i < len; i++)
ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
return ret;
}
int machine__process_bpf_event(struct machine *machine __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_bpf_event(event, stdout);
return 0;
}
/*
* Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
* program. One PERF_RECORD_BPF_EVENT is generated for the program. And
* one PERF_RECORD_KSYMBOL is generated for each sub program.
*
* Returns:
* 0 for success;
* -1 for failures;
* -2 for lack of kernel support.
*/
static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
int fd,
union perf_event *event,
struct record_opts *opts)
{
struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
struct bpf_event *bpf_event = &event->bpf_event;
u32 sub_prog_cnt, i, func_info_rec_size = 0;
u8 (*prog_tags)[BPF_TAG_SIZE] = NULL;
struct bpf_prog_info info = { .type = 0, };
u32 info_len = sizeof(info);
void *func_infos = NULL;
u64 *prog_addrs = NULL;
struct btf *btf = NULL;
u32 *prog_lens = NULL;
bool has_btf = false;
char errbuf[512];
int err = 0;
/* Call bpf_obj_get_info_by_fd() to get sizes of arrays */
err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
if (err) {
pr_debug("%s: failed to get BPF program info: %s, aborting\n",
__func__, str_error_r(errno, errbuf, sizeof(errbuf)));
return -1;
}
if (info_len < offsetof(struct bpf_prog_info, prog_tags)) {
pr_debug("%s: the kernel is too old, aborting\n", __func__);
return -2;
}
/* number of ksyms, func_lengths, and tags should match */
sub_prog_cnt = info.nr_jited_ksyms;
if (sub_prog_cnt != info.nr_prog_tags ||
sub_prog_cnt != info.nr_jited_func_lens)
return -1;
/* check BTF func info support */
if (info.btf_id && info.nr_func_info && info.func_info_rec_size) {
/* btf func info number should be same as sub_prog_cnt */
if (sub_prog_cnt != info.nr_func_info) {
pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
return -1;
}
if (btf__get_from_id(info.btf_id, &btf)) {
pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info.btf_id);
return -1;
}
func_info_rec_size = info.func_info_rec_size;
func_infos = calloc(sub_prog_cnt, func_info_rec_size);
if (!func_infos) {
pr_debug("%s: failed to allocate memory for func_infos, aborting\n", __func__);
return -1;
}
has_btf = true;
}
/*
* We need address, length, and tag for each sub program.
* Allocate memory and call bpf_obj_get_info_by_fd() again
*/
prog_addrs = calloc(sub_prog_cnt, sizeof(u64));
if (!prog_addrs) {
pr_debug("%s: failed to allocate memory for prog_addrs, aborting\n", __func__);
goto out;
}
prog_lens = calloc(sub_prog_cnt, sizeof(u32));
if (!prog_lens) {
pr_debug("%s: failed to allocate memory for prog_lens, aborting\n", __func__);
goto out;
}
prog_tags = calloc(sub_prog_cnt, BPF_TAG_SIZE);
if (!prog_tags) {
pr_debug("%s: failed to allocate memory for prog_tags, aborting\n", __func__);
goto out;
}
memset(&info, 0, sizeof(info));
info.nr_jited_ksyms = sub_prog_cnt;
info.nr_jited_func_lens = sub_prog_cnt;
info.nr_prog_tags = sub_prog_cnt;
info.jited_ksyms = ptr_to_u64(prog_addrs);
info.jited_func_lens = ptr_to_u64(prog_lens);
info.prog_tags = ptr_to_u64(prog_tags);
info_len = sizeof(info);
if (has_btf) {
info.nr_func_info = sub_prog_cnt;
info.func_info_rec_size = func_info_rec_size;
info.func_info = ptr_to_u64(func_infos);
}
err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
if (err) {
pr_debug("%s: failed to get BPF program info, aborting\n", __func__);
goto out;
}
/* Synthesize PERF_RECORD_KSYMBOL */
for (i = 0; i < sub_prog_cnt; i++) {
const struct bpf_func_info *finfo;
const char *short_name = NULL;
const struct btf_type *t;
int name_len;
*ksymbol_event = (struct ksymbol_event){
.header = {
.type = PERF_RECORD_KSYMBOL,
.size = offsetof(struct ksymbol_event, name),
},
.addr = prog_addrs[i],
.len = prog_lens[i],
.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
.flags = 0,
};
name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
"bpf_prog_");
name_len += snprintf_hex(ksymbol_event->name + name_len,
KSYM_NAME_LEN - name_len,
prog_tags[i], BPF_TAG_SIZE);
if (has_btf) {
finfo = func_infos + i * info.func_info_rec_size;
t = btf__type_by_id(btf, finfo->type_id);
short_name = btf__name_by_offset(btf, t->name_off);
} else if (i == 0 && sub_prog_cnt == 1) {
/* no subprog */
if (info.name[0])
short_name = info.name;
} else
short_name = "F";
if (short_name)
name_len += snprintf(ksymbol_event->name + name_len,
KSYM_NAME_LEN - name_len,
"_%s", short_name);
ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
sizeof(u64));
memset((void *)event + event->header.size, 0, machine->id_hdr_size);
event->header.size += machine->id_hdr_size;
err = perf_tool__process_synth_event(tool, event,
machine, process);
}
/* Synthesize PERF_RECORD_BPF_EVENT */
if (!opts->no_bpf_event) {
*bpf_event = (struct bpf_event){
.header = {
.type = PERF_RECORD_BPF_EVENT,
.size = sizeof(struct bpf_event),
},
.type = PERF_BPF_EVENT_PROG_LOAD,
.flags = 0,
.id = info.id,
};
memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE);
memset((void *)event + event->header.size, 0, machine->id_hdr_size);
event->header.size += machine->id_hdr_size;
err = perf_tool__process_synth_event(tool, event,
machine, process);
}
out:
free(prog_tags);
free(prog_lens);
free(prog_addrs);
free(func_infos);
free(btf);
return err ? -1 : 0;
}
int perf_event__synthesize_bpf_events(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
struct record_opts *opts)
{
union perf_event *event;
__u32 id = 0;
int err;
int fd;
event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN + machine->id_hdr_size);
if (!event)
return -1;
while (true) {
err = bpf_prog_get_next_id(id, &id);
if (err) {
if (errno == ENOENT) {
err = 0;
break;
}
pr_debug("%s: can't get next program: %s%s\n",
__func__, strerror(errno),
errno == EINVAL ? " -- kernel too old?" : "");
/* don't report error on old kernel or EPERM */
err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
break;
}
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0) {
pr_debug("%s: failed to get fd for prog_id %u\n",
__func__, id);
continue;
}
err = perf_event__synthesize_one_bpf_prog(tool, process,
machine, fd,
event, opts);
close(fd);
if (err) {
/* do not return error for old kernel */
if (err == -2)
err = 0;
break;
}
}
free(event);
return err;
}
|