summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bpf-cgroup.h4
-rw-r--r--include/linux/bpf.h80
-rw-r--r--include/linux/filter.h41
3 files changed, 103 insertions, 22 deletions
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 9be71c195d74..a11d5b7dbbf3 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -85,6 +85,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
+ struct bpf_prog *replace_prog,
enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type);
@@ -93,7 +94,8 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, u32 flags);
+ struct bpf_prog *replace_prog, enum bpf_attach_type type,
+ u32 flags);
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags);
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 085a59afba85..b14e51d56a82 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -471,11 +471,69 @@ struct bpf_trampoline {
void *image;
u64 selector;
};
+
+#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
+
+struct bpf_dispatcher_prog {
+ struct bpf_prog *prog;
+ refcount_t users;
+};
+
+struct bpf_dispatcher {
+ /* dispatcher mutex */
+ struct mutex mutex;
+ void *func;
+ struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
+ int num_progs;
+ void *image;
+ u32 image_off;
+};
+
+static __always_inline unsigned int bpf_dispatcher_nopfunc(
+ const void *ctx,
+ const struct bpf_insn *insnsi,
+ unsigned int (*bpf_func)(const void *,
+ const struct bpf_insn *))
+{
+ return bpf_func(ctx, insnsi);
+}
#ifdef CONFIG_BPF_JIT
struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
int bpf_trampoline_link_prog(struct bpf_prog *prog);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
void bpf_trampoline_put(struct bpf_trampoline *tr);
+void *bpf_jit_alloc_exec_page(void);
+#define BPF_DISPATCHER_INIT(name) { \
+ .mutex = __MUTEX_INITIALIZER(name.mutex), \
+ .func = &name##func, \
+ .progs = {}, \
+ .num_progs = 0, \
+ .image = NULL, \
+ .image_off = 0 \
+}
+
+#define DEFINE_BPF_DISPATCHER(name) \
+ noinline unsigned int name##func( \
+ const void *ctx, \
+ const struct bpf_insn *insnsi, \
+ unsigned int (*bpf_func)(const void *, \
+ const struct bpf_insn *)) \
+ { \
+ return bpf_func(ctx, insnsi); \
+ } \
+ EXPORT_SYMBOL(name##func); \
+ struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name);
+#define DECLARE_BPF_DISPATCHER(name) \
+ unsigned int name##func( \
+ const void *ctx, \
+ const struct bpf_insn *insnsi, \
+ unsigned int (*bpf_func)(const void *, \
+ const struct bpf_insn *)); \
+ extern struct bpf_dispatcher name;
+#define BPF_DISPATCHER_FUNC(name) name##func
+#define BPF_DISPATCHER_PTR(name) (&name)
+void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
+ struct bpf_prog *to);
#else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
@@ -490,6 +548,13 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
return -ENOTSUPP;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
+#define DEFINE_BPF_DISPATCHER(name)
+#define DECLARE_BPF_DISPATCHER(name)
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc
+#define BPF_DISPATCHER_PTR(name) NULL
+static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
+ struct bpf_prog *from,
+ struct bpf_prog *to) {}
#endif
struct bpf_func_info_aux {
@@ -897,14 +962,14 @@ struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
-void __dev_map_flush(struct bpf_map *map);
+void __dev_map_flush(void);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
struct bpf_prog *xdp_prog);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
-void __cpu_map_flush(struct bpf_map *map);
+void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx);
@@ -943,6 +1008,8 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog);
+struct bpf_prog *bpf_prog_by_id(u32 id);
+
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -1004,7 +1071,7 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map
return NULL;
}
-static inline void __dev_map_flush(struct bpf_map *map)
+static inline void __dev_map_flush(void)
{
}
@@ -1033,7 +1100,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
return NULL;
}
-static inline void __cpu_map_flush(struct bpf_map *map)
+static inline void __cpu_map_flush(void)
{
}
@@ -1074,6 +1141,11 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
static inline void bpf_map_put(struct bpf_map *map)
{
}
+
+static inline struct bpf_prog *bpf_prog_by_id(u32 id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
#endif /* CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 345f3748e0fb..70e6dd960bca 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -559,23 +559,26 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
-#define BPF_PROG_RUN(prog, ctx) ({ \
- u32 ret; \
- cant_sleep(); \
- if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
- struct bpf_prog_stats *stats; \
- u64 start = sched_clock(); \
- ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
- stats = this_cpu_ptr(prog->aux->stats); \
- u64_stats_update_begin(&stats->syncp); \
- stats->cnt++; \
- stats->nsecs += sched_clock() - start; \
- u64_stats_update_end(&stats->syncp); \
- } else { \
- ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
- } \
+#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
+ u32 ret; \
+ cant_sleep(); \
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
+ struct bpf_prog_stats *stats; \
+ u64 start = sched_clock(); \
+ ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ stats = this_cpu_ptr(prog->aux->stats); \
+ u64_stats_update_begin(&stats->syncp); \
+ stats->cnt++; \
+ stats->nsecs += sched_clock() - start; \
+ u64_stats_update_end(&stats->syncp); \
+ } else { \
+ ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ } \
ret; })
+#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx, \
+ bpf_dispatcher_nopfunc)
+
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
struct bpf_skb_data_end {
@@ -589,7 +592,6 @@ struct bpf_redirect_info {
u32 tgt_index;
void *tgt_value;
struct bpf_map *map;
- struct bpf_map *map_to_flush;
u32 kern_flags;
};
@@ -699,6 +701,8 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
return res;
}
+DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp)
+
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
@@ -708,9 +712,12 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
* already takes rcu_read_lock() when fetching the program, so
* it's not necessary here anymore.
*/
- return BPF_PROG_RUN(prog, xdp);
+ return __BPF_PROG_RUN(prog, xdp,
+ BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
}
+void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
+
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);