summaryrefslogtreecommitdiff
path: root/kernel/bpf/trampoline.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2019-12-08 16:01:13 -0800
committerAlexei Starovoitov <ast@kernel.org>2019-12-11 15:18:08 -0800
commitb91e014f078e2e4f24778680e28dbbdecc7f0eb9 (patch)
tree7d92e7ddbdd5cc48543536c6311b6ea292c7f949 /kernel/bpf/trampoline.c
parent5b79bcdf03628a3a9ee04d9cd5fabcf61a8e20be (diff)
bpf: Make BPF trampoline use register_ftrace_direct() API
Make BPF trampoline attach its generated assembly code to kernel functions via register_ftrace_direct() API. It helps ftrace-based tracers co-exist with BPF trampoline on the same kernel function. It also switches attaching logic from arch specific text_poke to generic ftrace that is available on many architectures. text_poke is still necessary for bpf-to-bpf attach and for bpf_tail_call optimization. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20191209000114.1876138-3-ast@kernel.org
Diffstat (limited to 'kernel/bpf/trampoline.c')
-rw-r--r--kernel/bpf/trampoline.c64
1 files changed, 58 insertions, 6 deletions
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 7e89f1f49d77..23b0d5cfd47e 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -3,6 +3,7 @@
#include <linux/hash.h>
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <linux/ftrace.h>
/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
#define TRAMPOLINE_HASH_BITS 10
@@ -59,6 +60,60 @@ out:
return tr;
}
+static int is_ftrace_location(void *ip)
+{
+ long addr;
+
+ addr = ftrace_location((long)ip);
+ if (!addr)
+ return 0;
+ if (WARN_ON_ONCE(addr != (long)ip))
+ return -EFAULT;
+ return 1;
+}
+
+static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
+{
+ void *ip = tr->func.addr;
+ int ret;
+
+ if (tr->func.ftrace_managed)
+ ret = unregister_ftrace_direct((long)ip, (long)old_addr);
+ else
+ ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
+ return ret;
+}
+
+static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
+{
+ void *ip = tr->func.addr;
+ int ret;
+
+ if (tr->func.ftrace_managed)
+ ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
+ else
+ ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
+ return ret;
+}
+
+/* first time registering */
+static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
+{
+ void *ip = tr->func.addr;
+ int ret;
+
+ ret = is_ftrace_location(ip);
+ if (ret < 0)
+ return ret;
+ tr->func.ftrace_managed = ret;
+
+ if (tr->func.ftrace_managed)
+ ret = register_ftrace_direct((long)ip, (long)new_addr);
+ else
+ ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
+ return ret;
+}
+
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86. Pick a number to fit into PAGE_SIZE / 2
*/
@@ -77,8 +132,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
int err;
if (fentry_cnt + fexit_cnt == 0) {
- err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
- old_image, NULL);
+ err = unregister_fentry(tr, old_image);
tr->selector = 0;
goto out;
}
@@ -105,12 +159,10 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
if (tr->selector)
/* progs already running at this address */
- err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
- old_image, new_image);
+ err = modify_fentry(tr, old_image, new_image);
else
/* first time registering */
- err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, NULL,
- new_image);
+ err = register_fentry(tr, new_image);
if (err)
goto out;
tr->selector++;