diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2015-12-17 23:51:53 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-12-18 16:04:50 -0500 |
commit | 05c74e5e53f6cb07502c3e6a820f33e2777b6605 (patch) | |
tree | bb21567c453dc262c318bdfed152cb76b00ea7e4 /net/core | |
parent | 59ce9670ce18d067433883adf213d04ded074cbf (diff) |
bpf: add bpf_skb_load_bytes helper
When hacking tc programs with eBPF, one of the issues that come up
from time to time is to load addresses from headers. In eBPF as in
classic BPF, we have BPF_LD | BPF_ABS | BPF_{B,H,W} instructions that
extract a byte, half-word or word out of the skb data though helpers
such as bpf_load_pointer() (interpreter case).
F.e. extracting a whole IPv6 address could possibly look like ...
union v6addr {
struct {
__u32 p1;
__u32 p2;
__u32 p3;
__u32 p4;
};
__u8 addr[16];
};
[...]
a.p1 = htonl(load_word(skb, off));
a.p2 = htonl(load_word(skb, off + 4));
a.p3 = htonl(load_word(skb, off + 8));
a.p4 = htonl(load_word(skb, off + 12));
[...]
/* access to a.addr[...] */
This work adds a complementary helper bpf_skb_load_bytes() (we also
have bpf_skb_store_bytes()) as an alternative where the same call
would look like from an eBPF program:
ret = bpf_skb_load_bytes(skb, off, addr, sizeof(addr));
Same verifier restrictions apply as in ffeedafbf023 ("bpf: introduce
current->pid, tgid, uid, gid, comm accessors") case, where stack memory
access needs to be statically verified and thus guaranteed to be
initialized in first use (otherwise verifier cannot tell whether a
subsequent access to it is valid or not as it's runtime dependent).
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/filter.c | 35 |
1 files changed, 34 insertions, 1 deletions
diff --git a/net/core/filter.c b/net/core/filter.c index 672eefbfbe99..34bf6fc77c1d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1245,6 +1245,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk) } #define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1) +#define BPF_LDST_LEN 16U static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) { @@ -1252,7 +1253,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) int offset = (int) r2; void *from = (void *) (long) r3; unsigned int len = (unsigned int) r4; - char buf[16]; + char buf[BPF_LDST_LEN]; void *ptr; /* bpf verifier guarantees that: @@ -1299,6 +1300,36 @@ const struct bpf_func_proto bpf_skb_store_bytes_proto = { .arg5_type = ARG_ANYTHING, }; +static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1; + int offset = (int) r2; + void *to = (void *)(unsigned long) r3; + unsigned int len = (unsigned int) r4; + void *ptr; + + if (unlikely((u32) offset > 0xffff || len > BPF_LDST_LEN)) + return -EFAULT; + + ptr = skb_header_pointer(skb, offset, len, to); + if (unlikely(!ptr)) + return -EFAULT; + if (ptr != to) + memcpy(to, ptr, len); + + return 0; +} + +const struct bpf_func_proto bpf_skb_load_bytes_proto = { + .func = bpf_skb_load_bytes, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_PTR_TO_STACK, + .arg4_type = ARG_CONST_STACK_SIZE, +}; + #define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f) #define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10) @@ -1654,6 +1685,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) switch (func_id) { case BPF_FUNC_skb_store_bytes: return &bpf_skb_store_bytes_proto; + case BPF_FUNC_skb_load_bytes: + return &bpf_skb_load_bytes_proto; case BPF_FUNC_l3_csum_replace: return &bpf_l3_csum_replace_proto; case BPF_FUNC_l4_csum_replace: |