diff options
author | David S. Miller <davem@davemloft.net> | 2018-10-21 21:11:46 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-10-21 21:11:46 -0700 |
commit | a19c59cc10a5ebc6b5a542e56bfd9f427ce01d74 (patch) | |
tree | cd04c1af4e800eef175cbc51ffb6e78040d7ee27 /drivers/net/ethernet | |
parent | 92303c86b7e9b7d3895ccafb441a0354143e2a18 (diff) | |
parent | fe8ecccc10b3adc071de05ca7af728ca1a4ac9aa (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
pull-request: bpf-next 2018-10-21
The following pull-request contains BPF updates for your *net-next* tree.
The main changes are:
1) Implement two new kind of BPF maps, that is, queue and stack
map along with new peek, push and pop operations, from Mauricio.
2) Add support for MSG_PEEK flag when redirecting into an ingress
psock sk_msg queue, and add a new helper bpf_msg_push_data() for
insert data into the message, from John.
3) Allow for BPF programs of type BPF_PROG_TYPE_CGROUP_SKB to use
direct packet access for __skb_buff, from Song.
4) Use more lightweight barriers for walking perf ring buffer for
libbpf and perf tool as well. Also, various fixes and improvements
from verifier side, from Daniel.
5) Add per-symbol visibility for DSO in libbpf and hide by default
global symbols such as netlink related functions, from Andrey.
6) Two improvements to nfp's BPF offload to check vNIC capabilities
in case prog is shared with multiple vNICs and to protect against
mis-initializing atomic counters, from Jakub.
7) Fix for bpftool to use 4 context mode for the nfp disassembler,
also from Jakub.
8) Fix a return value comparison in test_libbpf.sh and add several
bpftool improvements in bash completion, documentation of bpf fs
restrictions and batch mode summary print, from Quentin.
9) Fix a file resource leak in BPF selftest's load_kallsyms()
helper, from Peng.
10) Fix an unused variable warning in map_lookup_and_delete_elem(),
from Alexei.
11) Fix bpf_skb_adjust_room() signature in BPF UAPI helper doc,
from Nicolas.
12) Add missing executables to .gitignore in BPF selftests, from Anders.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/bpf/main.h | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/bpf/offload.c | 32 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 69 |
3 files changed, 98 insertions, 13 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 52457ae3b259..7f591d71ab28 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -189,6 +189,11 @@ enum nfp_bpf_map_use { NFP_MAP_USE_ATOMIC_CNT, }; +struct nfp_bpf_map_word { + unsigned char type :4; + unsigned char non_zero_update :1; +}; + /** * struct nfp_bpf_map - private per-map data attached to BPF maps for offload * @offmap: pointer to the offloaded BPF map @@ -202,7 +207,7 @@ struct nfp_bpf_map { struct nfp_app_bpf *bpf; u32 tid; struct list_head l; - enum nfp_bpf_map_use use_map[]; + struct nfp_bpf_map_word use_map[]; }; struct nfp_bpf_neutral_map { @@ -436,6 +441,7 @@ struct nfp_bpf_subprog_info { * @prog: machine code * @prog_len: number of valid instructions in @prog array * @__prog_alloc_len: alloc size of @prog array + * @stack_size: total amount of stack used * @verifier_meta: temporary storage for verifier's insn meta * @type: BPF program type * @last_bpf_off: address of the last instruction translated from BPF @@ -460,6 +466,8 @@ struct nfp_prog { unsigned int prog_len; unsigned int __prog_alloc_len; + unsigned int stack_size; + struct nfp_insn_meta *verifier_meta; enum bpf_prog_type type; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 927e038d9f77..ba8ceedcf6a2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -262,10 +262,25 @@ static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value) unsigned int i; for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++) - if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT) + if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT) word[i] = (__force u32)cpu_to_be32(word[i]); } +/* Mark value as unsafely initialized in case it becomes atomic later + * and we didn't byte swap something non-byte swap neutral. + */ +static void +nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value) +{ + u32 *word = value; + unsigned int i; + + for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++) + if (nfp_map->use_map[i].type == NFP_MAP_UNUSED && + word[i] != (__force u32)cpu_to_be32(word[i])) + nfp_map->use_map[i].non_zero_update = 1; +} + static int nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap, void *key, void *value) @@ -285,6 +300,7 @@ nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap, void *key, void *value, u64 flags) { nfp_map_bpf_byte_swap(offmap->dev_priv, value); + nfp_map_bpf_byte_swap_record(offmap->dev_priv, value); return nfp_bpf_ctrl_update_entry(offmap, key, value, flags); } @@ -473,7 +489,7 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; - unsigned int max_mtu; + unsigned int max_mtu, max_stack, max_prog_len; dma_addr_t dma_addr; void *img; int err; @@ -484,6 +500,18 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, return -EOPNOTSUPP; } + max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; + if (nfp_prog->stack_size > max_stack) { + NL_SET_ERR_MSG_MOD(extack, "stack too large"); + return -EOPNOTSUPP; + } + + max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); + if (nfp_prog->prog_len > max_prog_len) { + NL_SET_ERR_MSG_MOD(extack, "program too long"); + return -EOPNOTSUPP; + } + img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv); if (IS_ERR(img)) return PTR_ERR(img); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 193dd685b365..99f977bfd8cc 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -80,6 +80,46 @@ exit_set_location: nfp_prog->adjust_head_location = location; } +static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env) +{ + const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1; + const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3; + struct bpf_offloaded_map *offmap; + struct bpf_func_state *state; + struct nfp_bpf_map *nfp_map; + int off, i; + + state = env->cur_state->frame[reg3->frameno]; + + /* We need to record each time update happens with non-zero words, + * in case such word is used in atomic operations. + * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before. + */ + + offmap = map_to_offmap(reg1->map_ptr); + nfp_map = offmap->dev_priv; + off = reg3->off + reg3->var_off.value; + + for (i = 0; i < offmap->map.value_size; i++) { + struct bpf_stack_state *stack_entry; + unsigned int soff; + + soff = -(off + i) - 1; + stack_entry = &state->stack[soff / BPF_REG_SIZE]; + if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO) + continue; + + if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) { + pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n", + i, soff); + return false; + } + nfp_map->use_map[i / 4].non_zero_update = 1; + } + + return true; +} + static int nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env, const struct bpf_reg_state *reg, @@ -171,7 +211,8 @@ nfp_bpf_check_helper_call(struct nfp_prog *nfp_prog, bpf->helpers.map_update, reg1) || !nfp_bpf_stack_arg_ok("map_update", env, reg2, meta->func_id ? &meta->arg2 : NULL) || - !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL)) + !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL) || + !nfp_bpf_map_update_value_ok(env)) return -EOPNOTSUPP; break; @@ -352,15 +393,22 @@ nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env, struct nfp_bpf_map *nfp_map, unsigned int off, enum nfp_bpf_map_use use) { - if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED && - nfp_map->use_map[off / 4] != use) { + if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED && + nfp_map->use_map[off / 4].type != use) { pr_vlog(env, "map value use type conflict %s vs %s off: %u\n", - nfp_bpf_map_use_name(nfp_map->use_map[off / 4]), + nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type), nfp_bpf_map_use_name(use), off); return -EOPNOTSUPP; } - nfp_map->use_map[off / 4] = use; + if (nfp_map->use_map[off / 4].non_zero_update && + use == NFP_MAP_USE_ATOMIC_CNT) { + pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n", + off); + return -EOPNOTSUPP; + } + + nfp_map->use_map[off / 4].type = use; return 0; } @@ -699,9 +747,9 @@ continue_subprog: static int nfp_bpf_finalize(struct bpf_verifier_env *env) { - unsigned int stack_size, stack_needed; struct bpf_subprog_info *info; struct nfp_prog *nfp_prog; + unsigned int max_stack; struct nfp_net *nn; int i; @@ -729,11 +777,12 @@ static int nfp_bpf_finalize(struct bpf_verifier_env *env) } nn = netdev_priv(env->prog->aux->offload->netdev); - stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; - stack_needed = nfp_bpf_get_stack_usage(nfp_prog, env->prog->len); - if (stack_needed > stack_size) { + max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; + nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog, + env->prog->len); + if (nfp_prog->stack_size > max_stack) { pr_vlog(env, "stack too large: program %dB > FW stack %dB\n", - stack_needed, stack_size); + nfp_prog->stack_size, max_stack); return -EOPNOTSUPP; } |