summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-02-21 15:22:45 -0800
committerDavid S. Miller <davem@davemloft.net>2020-02-21 15:22:45 -0800
commitb105e8e281ac2dbea4229982ad57fbefab05963d (patch)
tree58ea551205dd646d809c789fccb2381b60b00d9e /include/net
parente65ee2fb54d4745d7b7d9061d7fe33c5c5bf3b06 (diff)
parenteb1e1478b6f4e70d99fee3f49bb7f7143c8c871d (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2020-02-21 The following pull-request contains BPF updates for your *net-next* tree. We've added 25 non-merge commits during the last 4 day(s) which contain a total of 33 files changed, 2433 insertions(+), 161 deletions(-). The main changes are: 1) Allow for adding TCP listen sockets into sock_map/hash so they can be used with reuseport BPF programs, from Jakub Sitnicki. 2) Add a new bpf_program__set_attach_target() helper for adding libbpf support to specify the tracepoint/function dynamically, from Eelco Chaudron. 3) Add bpf_read_branch_records() BPF helper which helps use cases like profile guided optimizations, from Daniel Xu. 4) Enable bpf_perf_event_read_value() in all tracing programs, from Song Liu. 5) Relax BTF mandatory check if only used for libbpf itself e.g. to process BTF defined maps, from Andrii Nakryiko. 6) Move BPF selftests -mcpu compilation attribute from 'probe' to 'v3' as it has been observed that former fails in envs with low memlock, from Yonghong Song. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/sock.h37
-rw-r--r--include/net/sock_reuseport.h2
-rw-r--r--include/net/tcp.h7
3 files changed, 42 insertions, 4 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 328564525526..b5cca7bae69b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -527,10 +527,43 @@ enum sk_pacing {
SK_PACING_FQ = 2,
};
+/* Pointer stored in sk_user_data might not be suitable for copying
+ * when cloning the socket. For instance, it can point to a reference
+ * counted object. sk_user_data bottom bit is set if pointer must not
+ * be copied.
+ */
+#define SK_USER_DATA_NOCOPY 1UL
+#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY)
+
+/**
+ * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
+ * @sk: socket
+ */
+static inline bool sk_user_data_is_nocopy(const struct sock *sk)
+{
+ return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY);
+}
+
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
-#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
-#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
+#define rcu_dereference_sk_user_data(sk) \
+({ \
+ void *__tmp = rcu_dereference(__sk_user_data((sk))); \
+ (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \
+})
+#define rcu_assign_sk_user_data(sk, ptr) \
+({ \
+ uintptr_t __tmp = (uintptr_t)(ptr); \
+ WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
+ rcu_assign_pointer(__sk_user_data((sk)), __tmp); \
+})
+#define rcu_assign_sk_user_data_nocopy(sk, ptr) \
+({ \
+ uintptr_t __tmp = (uintptr_t)(ptr); \
+ WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
+ rcu_assign_pointer(__sk_user_data((sk)), \
+ __tmp | SK_USER_DATA_NOCOPY); \
+})
/*
* SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 43f4a818d88f..3ecaa15d1850 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -55,6 +55,4 @@ static inline bool reuseport_has_conns(struct sock *sk, bool set)
return ret;
}
-int reuseport_get_id(struct sock_reuseport *reuse);
-
#endif /* _SOCK_REUSEPORT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a5ea27df3c2b..07f947cc80e6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2203,6 +2203,13 @@ int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
struct msghdr *msg, int len, int flags);
+#ifdef CONFIG_NET_SOCK_MSG
+void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
+#else
+static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
+{
+}
+#endif
/* Call BPF_SOCK_OPS program that returns an int. If the return value
* is < 0, then the BPF op failed (for example if the loaded BPF