From 105c03614bff2de60adf338e3ee90652b65c2d05 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 30 May 2017 13:31:32 -0700 Subject: bpf: fix stack_depth usage by test_bpf.ko test_bpf.ko doesn't call verifier before selecting interpreter or JITing, hence the tests need to manually specify the amount of stack they consume. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- lib/test_bpf.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/test_bpf.c b/lib/test_bpf.c index be88cbaadde3..070bde56474c 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -84,6 +84,7 @@ struct bpf_test { } test[MAX_SUBTESTS]; int (*fill_helper)(struct bpf_test *self); __u8 frag_data[MAX_DATA]; + int stack_depth; /* for eBPF only, since tests don't call verifier */ }; /* Large test cases need separate allocation and fill handler. */ @@ -455,6 +456,7 @@ static int __bpf_fill_stxdw(struct bpf_test *self, int size) self->u.ptr.insns = insn; self->u.ptr.len = len; + self->stack_depth = 40; return 0; } @@ -2317,7 +2319,8 @@ static struct bpf_test tests[] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, - { { 38, 256 } } + { { 38, 256 } }, + .stack_depth = 64, }, /* BPF_ALU | BPF_MOV | BPF_X */ { @@ -4169,6 +4172,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xff } }, + .stack_depth = 40, }, { "ST_MEM_B: Store/Load byte: max positive", @@ -4181,6 +4185,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7f } }, + .stack_depth = 40, }, { "STX_MEM_B: Store/Load byte: max negative", @@ -4194,6 +4199,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xff } }, + .stack_depth = 40, }, { "ST_MEM_H: Store/Load half word: max negative", @@ -4206,6 +4212,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffff } }, + .stack_depth = 40, }, { "ST_MEM_H: Store/Load half word: max positive", @@ -4218,6 +4225,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7fff } }, + .stack_depth = 40, }, { "STX_MEM_H: Store/Load half word: max negative", @@ -4231,6 +4239,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffff } }, + .stack_depth = 40, }, { "ST_MEM_W: Store/Load word: max negative", @@ -4243,6 +4252,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, { "ST_MEM_W: Store/Load word: max positive", @@ -4255,6 +4265,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7fffffff } }, + .stack_depth = 40, }, { "STX_MEM_W: Store/Load word: max negative", @@ -4268,6 +4279,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, { "ST_MEM_DW: Store/Load double word: max negative", @@ -4280,6 +4292,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, { "ST_MEM_DW: Store/Load double word: max negative 2", @@ -4297,6 +4310,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x1 } }, + .stack_depth = 40, }, { "ST_MEM_DW: Store/Load double word: max positive", @@ -4309,6 +4323,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7fffffff } }, + .stack_depth = 40, }, { "STX_MEM_DW: Store/Load double word: max negative", @@ -4322,6 +4337,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, /* BPF_STX | BPF_XADD | BPF_W/DW */ { @@ -4336,6 +4352,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x22 } }, + .stack_depth = 40, }, { "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22", @@ -4351,6 +4368,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0 } }, + .stack_depth = 40, }, { "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22", @@ -4363,6 +4381,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x12 } }, + .stack_depth = 40, }, { "STX_XADD_W: X + 1 + 1 + 1 + ...", @@ -4384,6 +4403,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x22 } }, + .stack_depth = 40, }, { "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22", @@ -4399,6 +4419,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0 } }, + .stack_depth = 40, }, { "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22", @@ -4411,6 +4432,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x12 } }, + .stack_depth = 40, }, { "STX_XADD_DW: X + 1 + 1 + 1 + ...", @@ -5809,6 +5831,7 @@ static struct bpf_prog *generate_filter(int which, int *err) /* Type doesn't really matter here as long as it's not unspec. */ fp->type = BPF_PROG_TYPE_SOCKET_FILTER; memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); + fp->aux->stack_depth = tests[which].stack_depth; /* We cannot error here as we don't need type compatibility * checks. -- cgit v1.2.3 From aa9f979c41043d9fcf7957c99948e20bbddefc7f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 13 Jun 2017 14:28:18 +0200 Subject: networking: use skb_put_zero() Use the recently introduced helper to replace the pattern of skb_put() && memset(), this transformation was done with the following spatch: @@ identifier p; expression len; expression skb; @@ -p = skb_put(skb, len); -memset(p, 0, len); +p = skb_put_zero(skb, len); Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- lib/nlattr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/nlattr.c b/lib/nlattr.c index a7e0b16078df..d09d9746fc5d 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -400,8 +400,7 @@ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { void *start; - start = skb_put(skb, NLA_ALIGN(attrlen)); - memset(start, 0, NLA_ALIGN(attrlen)); + start = skb_put_zero(skb, NLA_ALIGN(attrlen)); return start; } -- cgit v1.2.3 From b7127cfea050a3b371d6da7a3ce9e69942f945f0 Mon Sep 17 00:00:00 2001 From: David Daney Date: Tue, 13 Jun 2017 16:49:36 -0700 Subject: test_bpf: Add test to make conditional jump cross a large number of insns. On MIPS, conditional branches can only span 32k instructions. To exceed this limit in the JIT with the BPF maximum of 4k insns, we need to choose eBPF insns that expand to more than 8 machine instructions. Use BPF_LD_ABS as it is quite complex. This forces the JIT to invert the sense of the branch to branch around a long jump to the end. This (somewhat) verifies that the branch inversion logic and target address calculation of the long jumps are done correctly. Signed-off-by: David Daney Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- lib/test_bpf.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'lib') diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 070bde56474c..c871e0e76c2a 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -435,6 +435,30 @@ loop: return 0; } +static int bpf_fill_jump_around_ld_abs(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct bpf_insn *insn; + int i = 0; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[i++] = BPF_MOV64_REG(R6, R1); + insn[i++] = BPF_LD_ABS(BPF_B, 0); + insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2); + i++; + while (i < len - 1) + insn[i++] = BPF_LD_ABS(BPF_B, 1); + insn[i] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + static int __bpf_fill_stxdw(struct bpf_test *self, int size) { unsigned int len = BPF_MAXINSNS; @@ -5044,6 +5068,14 @@ static struct bpf_test tests[] = { { { ETH_HLEN, 0xbef } }, .fill_helper = bpf_fill_ld_abs_vlan_push_pop, }, + { + "BPF_MAXINSNS: jump around ld_abs", + { }, + INTERNAL, + { 10, 11 }, + { { 2, 10 } }, + .fill_helper = bpf_fill_jump_around_ld_abs, + }, /* * LD_IND / LD_ABS on fragmented SKBs */ -- cgit v1.2.3 From 59ae1d127ac0ae404baf414c434ba2651b793f46 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 16 Jun 2017 14:29:20 +0200 Subject: networking: introduce and use skb_put_data() A common pattern with skb_put() is to just want to memcpy() some data into the new space, introduce skb_put_data() for this. An spatch similar to the one for skb_put_zero() converts many of the places using it: @@ identifier p, p2; expression len, skb, data; type t, t2; @@ ( -p = skb_put(skb, len); +p = skb_put_data(skb, data, len); | -p = (t)skb_put(skb, len); +p = skb_put_data(skb, data, len); ) ( p2 = (t2)p; -memcpy(p2, data, len); | -memcpy(p, data, len); ) @@ type t, t2; identifier p, p2; expression skb, data; @@ t *p; ... ( -p = skb_put(skb, sizeof(t)); +p = skb_put_data(skb, data, sizeof(t)); | -p = (t *)skb_put(skb, sizeof(t)); +p = skb_put_data(skb, data, sizeof(t)); ) ( p2 = (t2)p; -memcpy(p2, data, sizeof(*p)); | -memcpy(p, data, sizeof(*p)); ) @@ expression skb, len, data; @@ -memcpy(skb_put(skb, len), data, len); +skb_put_data(skb, data, len); (again, manually post-processed to retain some comments) Reviewed-by: Stephen Hemminger Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- lib/nlattr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/nlattr.c b/lib/nlattr.c index d09d9746fc5d..ab15a6c095d3 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -616,7 +616,7 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data) if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; - memcpy(skb_put(skb, attrlen), data, attrlen); + skb_put_data(skb, data, attrlen); return 0; } EXPORT_SYMBOL(nla_append); -- cgit v1.2.3 From 4df864c1d9afb46e2461a9f808d9f11a42d31bad Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 16 Jun 2017 14:29:21 +0200 Subject: networking: make skb_put & friends return void pointers It seems like a historic accident that these return unsigned char *, and in many places that means casts are required, more often than not. Make these functions (skb_put, __skb_put and pskb_put) return void * and remove all the casts across the tree, adding a (u8 *) cast only where the unsigned char pointer was used directly, all done with the following spatch: @@ expression SKB, LEN; typedef u8; identifier fn = { skb_put, __skb_put }; @@ - *(fn(SKB, LEN)) + *(u8 *)fn(SKB, LEN) @@ expression E, SKB, LEN; identifier fn = { skb_put, __skb_put }; type T; @@ - E = ((T *)(fn(SKB, LEN))) + E = fn(SKB, LEN) which actually doesn't cover pskb_put since there are only three users overall. A handful of stragglers were converted manually, notably a macro in drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many instances in net/bluetooth/hci_sock.c. In the former file, I also had to fix one whitespace problem spatch introduced. Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- lib/nlattr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/nlattr.c b/lib/nlattr.c index ab15a6c095d3..a0c738aa6a79 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -352,7 +352,7 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { struct nlattr *nla; - nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen)); + nla = skb_put(skb, nla_total_size(attrlen)); nla->nla_type = attrtype; nla->nla_len = nla_attr_size(attrlen); -- cgit v1.2.3 From de77b966ce8adcb4c58d50e2f087320d5479812a Mon Sep 17 00:00:00 2001 From: yuan linyu Date: Sun, 18 Jun 2017 22:48:17 +0800 Subject: net: introduce __skb_put_[zero, data, u8] follow Johannes Berg, semantic patch file as below, @@ identifier p, p2; expression len; expression skb; type t, t2; @@ ( -p = __skb_put(skb, len); +p = __skb_put_zero(skb, len); | -p = (t)__skb_put(skb, len); +p = __skb_put_zero(skb, len); ) ... when != p ( p2 = (t2)p; -memset(p2, 0, len); | -memset(p, 0, len); ) @@ identifier p; expression len; expression skb; type t; @@ ( -t p = __skb_put(skb, len); +t p = __skb_put_zero(skb, len); ) ... when != p ( -memset(p, 0, len); ) @@ type t, t2; identifier p, p2; expression skb; @@ t *p; ... ( -p = __skb_put(skb, sizeof(t)); +p = __skb_put_zero(skb, sizeof(t)); | -p = (t *)__skb_put(skb, sizeof(t)); +p = __skb_put_zero(skb, sizeof(t)); ) ... when != p ( p2 = (t2)p; -memset(p2, 0, sizeof(*p)); | -memset(p, 0, sizeof(*p)); ) @@ expression skb, len; @@ -memset(__skb_put(skb, len), 0, len); +__skb_put_zero(skb, len); @@ expression skb, len, data; @@ -memcpy(__skb_put(skb, len), data, len); +__skb_put_data(skb, data, len); @@ expression SKB, C, S; typedef u8; identifier fn = {__skb_put}; fresh identifier fn2 = fn ## "_u8"; @@ - *(u8 *)fn(SKB, S) = C; + fn2(SKB, C); Signed-off-by: yuan linyu Signed-off-by: David S. Miller --- lib/test_bpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/test_bpf.c b/lib/test_bpf.c index c871e0e76c2a..d9d5a410955c 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -5717,7 +5717,7 @@ static struct sk_buff *populate_skb(char *buf, int size) if (!skb) return NULL; - memcpy(__skb_put(skb, size), buf, size); + __skb_put_data(skb, buf, size); /* Initialize a fake skb with test pattern. */ skb_reset_mac_header(skb); -- cgit v1.2.3 From b952f4dff2751252db073c27c0f8a16a416a2ddc Mon Sep 17 00:00:00 2001 From: yuan linyu Date: Sun, 18 Jun 2017 22:52:04 +0800 Subject: net: manual clean code which call skb_put_[data:zero] Signed-off-by: yuan linyu Signed-off-by: David S. Miller --- lib/nlattr.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/nlattr.c b/lib/nlattr.c index a0c738aa6a79..fb52435be42d 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -398,11 +398,7 @@ EXPORT_SYMBOL(__nla_reserve_64bit); */ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { - void *start; - - start = skb_put_zero(skb, NLA_ALIGN(attrlen)); - - return start; + return skb_put_zero(skb, NLA_ALIGN(attrlen)); } EXPORT_SYMBOL(__nla_reserve_nohdr); -- cgit v1.2.3