diff options
author | Alexei Starovoitov <ast@kernel.org> | 2021-02-09 19:36:29 -0800 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2021-02-11 16:19:13 +0100 |
commit | ca06f55b90020cd97f4cc6d52db95436162e7dcf (patch) | |
tree | 8b26786ccefbcd4c87f9229ddc680f6feb4004a2 /kernel | |
parent | f2dd3b39467411c53703125a111f45b3672c1771 (diff) |
bpf: Add per-program recursion prevention mechanism
Since both sleepable and non-sleepable programs execute under migrate_disable
add recursion prevention mechanism to both types of programs when they're
executed via bpf trampoline.
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20210210033634.62081-5-alexei.starovoitov@gmail.com
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bpf/core.c | 8 | ||||
-rw-r--r-- | kernel/bpf/trampoline.c | 23 |
2 files changed, 27 insertions, 4 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 2cf71fd39c22..334070c4b8a1 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -91,6 +91,12 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag vfree(fp); return NULL; } + fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags); + if (!fp->active) { + vfree(fp); + kfree(aux); + return NULL; + } fp->pages = size / PAGE_SIZE; fp->aux = aux; @@ -116,6 +122,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); if (!prog->stats) { + free_percpu(prog->active); kfree(prog->aux); vfree(prog); return NULL; @@ -253,6 +260,7 @@ void __bpf_prog_free(struct bpf_prog *fp) kfree(fp->aux); } free_percpu(fp->stats); + free_percpu(fp->active); vfree(fp); } diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 48eb021e1421..89ef6320d19b 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -381,13 +381,16 @@ out: mutex_unlock(&trampoline_mutex); } -#define NO_START_TIME 0 +#define NO_START_TIME 1 static u64 notrace bpf_prog_start_time(void) { u64 start = NO_START_TIME; - if (static_branch_unlikely(&bpf_stats_enabled_key)) + if (static_branch_unlikely(&bpf_stats_enabled_key)) { start = sched_clock(); + if (unlikely(!start)) + start = NO_START_TIME; + } return start; } @@ -397,12 +400,20 @@ static u64 notrace bpf_prog_start_time(void) * call __bpf_prog_enter * call prog->bpf_func * call __bpf_prog_exit + * + * __bpf_prog_enter returns: + * 0 - skip execution of the bpf prog + * 1 - execute bpf prog + * [2..MAX_U64] - excute bpf prog and record execution time. + * This is start time. */ -u64 notrace __bpf_prog_enter(void) +u64 notrace __bpf_prog_enter(struct bpf_prog *prog) __acquires(RCU) { rcu_read_lock(); migrate_disable(); + if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) + return 0; return bpf_prog_start_time(); } @@ -430,21 +441,25 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start) __releases(RCU) { update_prog_stats(prog, start); + __this_cpu_dec(*(prog->active)); migrate_enable(); rcu_read_unlock(); } -u64 notrace __bpf_prog_enter_sleepable(void) +u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog) { rcu_read_lock_trace(); migrate_disable(); might_fault(); + if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) + return 0; return bpf_prog_start_time(); } void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start) { update_prog_stats(prog, start); + __this_cpu_dec(*(prog->active)); migrate_enable(); rcu_read_unlock_trace(); } |