summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2019-08-17 23:18:54 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2019-08-17 23:18:54 +0200
commit8e46c3534a550782405206c5a1be8d8a721bf45a (patch)
tree58bd99ef6f3fc2528b6e0d41801238af71e7374e /kernel
parentfae55527ac1164b66bee983a4d82ade2bfedb332 (diff)
parentc3bbf176fbad5d7470f8a4f311f7c11126ad36c2 (diff)
Merge branch 'bpf-sk-storage-clone'
Stanislav Fomichev says: ==================== Currently there is no way to propagate sk storage from the listener socket to a newly accepted one. Consider the following use case: fd = socket(); setsockopt(fd, SOL_IP, IP_TOS,...); /* ^^^ setsockopt BPF program triggers here and saves something * into sk storage of the listener. */ listen(fd, ...); while (client = accept(fd)) { /* At this point all association between listener * socket and newly accepted one is gone. New * socket will not have any sk storage attached. */ } Let's add new BPF_F_CLONE flag that can be specified when creating a socket storage map. This new flag indicates that map contents should be cloned when the socket is cloned. v4: * drop 'goto err' in bpf_sk_storage_clone (Yonghong Song) * add comment about race with bpf_sk_storage_map_free to the bpf_sk_storage_clone side as well (Daniel Borkmann) v3: * make sure BPF_F_NO_PREALLOC is always present when creating a map (Martin KaFai Lau) * don't call bpf_sk_storage_free explicitly, rely on sk_free_unlock_clone to do the cleanup (Martin KaFai Lau) v2: * remove spinlocks around selem_link_map/sk (Martin KaFai Lau) * BPF_F_CLONE on a map, not selem (Martin KaFai Lau) * hold a map while cloning (Martin KaFai Lau) * use BTF maps in selftests (Yonghong Song) * do proper cleanup selftests; don't call close(-1) (Yonghong Song) * export bpf_map_inc_not_zero ==================== Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/syscall.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5d141f16f6fa..cf8052b016e7 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -683,8 +683,8 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
}
/* map_idr_lock should have been held */
-static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
- bool uref)
+static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map,
+ bool uref)
{
int refold;
@@ -704,6 +704,16 @@ static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
return map;
}
+struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
+{
+ spin_lock_bh(&map_idr_lock);
+ map = __bpf_map_inc_not_zero(map, uref);
+ spin_unlock_bh(&map_idr_lock);
+
+ return map;
+}
+EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
+
int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{
return -ENOTSUPP;
@@ -2177,7 +2187,7 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
spin_lock_bh(&map_idr_lock);
map = idr_find(&map_idr, id);
if (map)
- map = bpf_map_inc_not_zero(map, true);
+ map = __bpf_map_inc_not_zero(map, true);
else
map = ERR_PTR(-ENOENT);
spin_unlock_bh(&map_idr_lock);