summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-08-18 01:00:40 +0200
committerDavid S. Miller <davem@davemloft.net>2016-08-18 23:38:16 -0700
commit4de16969523c15fb53cf8945dfc6b495d01d1512 (patch)
tree02ec053515e7262d2be7fa73dee1cbfc54522c06 /net/core
parent5293efe62df81908f2e90c9820c7edcc8e61f5e9 (diff)
bpf: enable event output helper also for xdp types
Follow-up to 555c8a8623a3 ("bpf: avoid stack copy and use skb ctx for event output") for also adding the event output helper for XDP typed programs. The event output helper has been very useful in particular for debugging or event notification purposes, since it's much faster and flexible than regular trace printk due to programmatically being able to attach meta data. Same flags structure applies as with tc BPF programs. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c42
1 files changed, 41 insertions, 1 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index abf546d96b6b..3b60dfd2ce92 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2408,6 +2408,41 @@ static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
};
#endif
+static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
+ unsigned long off, unsigned long len)
+{
+ memcpy(dst_buff, src_buff + off, len);
+ return 0;
+}
+
+static u64 bpf_xdp_event_output(u64 r1, u64 r2, u64 flags, u64 r4,
+ u64 meta_size)
+{
+ struct xdp_buff *xdp = (struct xdp_buff *)(long) r1;
+ struct bpf_map *map = (struct bpf_map *)(long) r2;
+ u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
+ void *meta = (void *)(long) r4;
+
+ if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
+ return -EINVAL;
+ if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
+ return -EFAULT;
+
+ return bpf_event_output(map, flags, meta, meta_size, xdp, xdp_size,
+ bpf_xdp_copy);
+}
+
+static const struct bpf_func_proto bpf_xdp_event_output_proto = {
+ .func = bpf_xdp_event_output,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_STACK,
+ .arg5_type = ARG_CONST_STACK_SIZE,
+};
+
static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id)
{
@@ -2492,7 +2527,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
static const struct bpf_func_proto *
xdp_func_proto(enum bpf_func_id func_id)
{
- return sk_filter_func_proto(func_id);
+ switch (func_id) {
+ case BPF_FUNC_perf_event_output:
+ return &bpf_xdp_event_output_proto;
+ default:
+ return sk_filter_func_proto(func_id);
+ }
}
static bool __is_valid_access(int off, int size, enum bpf_access_type type)