summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-02-24 15:01:48 +0100
committerAlexei Starovoitov <ast@kernel.org>2020-02-24 16:20:10 -0800
commit085fee1a72a9fba101a4a68a2c02fa8bd2b6f913 (patch)
treee215f625f6ca1185cd82d9fd65a2585c109f6b5d
parentc518cfa0c5ad75ddf3d743f1e35b9cf5fc2c346e (diff)
bpf: Use recursion prevention helpers in hashtab code
The required protection is that the caller cannot be migrated to a different CPU as these places take either a hash bucket lock or might trigger a kprobe inside the memory allocator. Both scenarios can lead to deadlocks. The deadlock prevention is per CPU by incrementing a per CPU variable which temporarily blocks the invocation of BPF programs from perf and kprobes. Replace the open coded preempt_disable/enable() and this_cpu_inc/dec() pairs with the new recursion prevention helpers to prepare BPF to work on PREEMPT_RT enabled kernels. On a non-RT kernel the migrate disable/enable in the helpers map to preempt_disable/enable(), i.e. no functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200224145644.211208533@linutronix.de
-rw-r--r--kernel/bpf/hashtab.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 431cef22d29d..ef83b012d8d8 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1333,8 +1333,7 @@ alloc:
}
again:
- preempt_disable();
- this_cpu_inc(bpf_prog_active);
+ bpf_disable_instrumentation();
rcu_read_lock();
again_nocopy:
dst_key = keys;
@@ -1362,8 +1361,7 @@ again_nocopy:
*/
raw_spin_unlock_irqrestore(&b->lock, flags);
rcu_read_unlock();
- this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
goto after_loop;
}
@@ -1374,8 +1372,7 @@ again_nocopy:
*/
raw_spin_unlock_irqrestore(&b->lock, flags);
rcu_read_unlock();
- this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
kvfree(keys);
kvfree(values);
goto alloc;
@@ -1445,8 +1442,7 @@ next_batch:
}
rcu_read_unlock();
- this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
key_size * bucket_cnt) ||
copy_to_user(uvalues + total * value_size, values,