summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-31 23:33:04 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-31 23:33:04 -0400
commitd4069fe6fc91d496e4d1fe38b1a8b71aeb181c50 (patch)
tree2ca8148a4b4bc20965e98e03abd417c57bc0777e /kernel
parent70ae7222c61d4f19c844c8fe75f053f8976b9552 (diff)
parent7828f20e3779e4e85e55371e0e43f5006a15fb41 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2018-03-31 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Add raw BPF tracepoint API in order to have a BPF program type that can access kernel internal arguments of the tracepoints in their raw form similar to kprobes based BPF programs. This infrastructure also adds a new BPF_RAW_TRACEPOINT_OPEN command to BPF syscall which returns an anon-inode backed fd for the tracepoint object that allows for automatic detach of the BPF program resp. unregistering of the tracepoint probe on fd release, from Alexei. 2) Add new BPF cgroup hooks at bind() and connect() entry in order to allow BPF programs to reject, inspect or modify user space passed struct sockaddr, and as well a hook at post bind time once the port has been allocated. They are used in FB's container management engine for implementing policy, replacing fragile LD_PRELOAD wrapper intercepting bind() and connect() calls that only works in limited scenarios like glibc based apps but not for other runtimes in containerized applications, from Andrey. 3) BPF_F_INGRESS flag support has been added to sockmap programs for their redirect helper call bringing it in line with cls_bpf based programs. Support is added for both variants of sockmap programs, meaning for tx ULP hooks as well as recv skb hooks, from John. 4) Various improvements on BPF side for the nfp driver, besides others this work adds BPF map update and delete helper call support from the datapath, JITing of 32 and 64 bit XADD instructions as well as offload support of bpf_get_prandom_u32() call. Initial implementation of nfp packet cache has been tackled that optimizes memory access (see merge commit for further details), from Jakub and Jiong. 5) Removal of struct bpf_verifier_env argument from the print_bpf_insn() API has been done in order to prepare to use print_bpf_insn() soon out of perf tool directly. This makes the print_bpf_insn() API more generic and pushes the env into private data. bpftool is adjusted as well with the print_bpf_insn() argument removal, from Jiri. 6) Couple of cleanups and prep work for the upcoming BTF (BPF Type Format). The latter will reuse the current BPF verifier log as well, thus bpf_verifier_log() is further generalized, from Martin. 7) For bpf_getsockopt() and bpf_setsockopt() helpers, IPv4 IP_TOS read and write support has been added in similar fashion to existing IPv6 IPV6_TCLASS socket option we already have, from Nikita. 8) Fixes in recent sockmap scatterlist API usage, which did not use sg_init_table() for initialization thus triggering a BUG_ON() in scatterlist API when CONFIG_DEBUG_SG was enabled. This adds and uses a small helper sg_init_marker() to properly handle the affected cases, from Prashant. 9) Let the BPF core follow IDR code convention and therefore use the idr_preload() and idr_preload_end() helpers, which would also help idr_alloc_cyclic() under GFP_ATOMIC to better succeed under memory pressure, from Shaohua. 10) Last but not least, a spelling fix in an error message for the BPF cookie UID helper under BPF sample code, from Colin. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/cgroup.c39
-rw-r--r--kernel/bpf/disasm.c52
-rw-r--r--kernel/bpf/disasm.h5
-rw-r--r--kernel/bpf/sockmap.c303
-rw-r--r--kernel/bpf/syscall.c184
-rw-r--r--kernel/bpf/verifier.c64
-rw-r--r--kernel/trace/bpf_trace.c206
7 files changed, 764 insertions, 89 deletions
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index c1c0b60d3f2f..43171a0bb02b 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -495,6 +495,42 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
/**
+ * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
+ * provided by user sockaddr
+ * @sk: sock struct that will use sockaddr
+ * @uaddr: sockaddr struct provided by user
+ * @type: The type of program to be exectuted
+ *
+ * socket is expected to be of type INET or INET6.
+ *
+ * This function will return %-EPERM if an attached program is found and
+ * returned value != 1 during execution. In all other cases, 0 is returned.
+ */
+int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
+ struct sockaddr *uaddr,
+ enum bpf_attach_type type)
+{
+ struct bpf_sock_addr_kern ctx = {
+ .sk = sk,
+ .uaddr = uaddr,
+ };
+ struct cgroup *cgrp;
+ int ret;
+
+ /* Check socket family since not all sockets represent network
+ * endpoint (e.g. AF_UNIX).
+ */
+ if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
+ return 0;
+
+ cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
+
+ return ret == 1 ? 0 : -EPERM;
+}
+EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
+
+/**
* __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
* @sk: socket to get cgroup from
* @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
@@ -545,7 +581,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
static const struct bpf_func_proto *
-cgroup_dev_func_proto(enum bpf_func_id func_id)
+cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
@@ -566,6 +602,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id)
static bool cgroup_dev_is_valid_access(int off, int size,
enum bpf_access_type type,
+ const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
const int size_default = sizeof(__u32);
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index 8740406df2cd..d6b76377cb6e 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -113,16 +113,16 @@ static const char *const bpf_jmp_string[16] = {
};
static void print_bpf_end_insn(bpf_insn_print_t verbose,
- struct bpf_verifier_env *env,
+ void *private_data,
const struct bpf_insn *insn)
{
- verbose(env, "(%02x) r%d = %s%d r%d\n", insn->code, insn->dst_reg,
+ verbose(private_data, "(%02x) r%d = %s%d r%d\n",
+ insn->code, insn->dst_reg,
BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le",
insn->imm, insn->dst_reg);
}
void print_bpf_insn(const struct bpf_insn_cbs *cbs,
- struct bpf_verifier_env *env,
const struct bpf_insn *insn,
bool allow_ptr_leaks)
{
@@ -132,23 +132,23 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
if (class == BPF_ALU || class == BPF_ALU64) {
if (BPF_OP(insn->code) == BPF_END) {
if (class == BPF_ALU64)
- verbose(env, "BUG_alu64_%02x\n", insn->code);
+ verbose(cbs->private_data, "BUG_alu64_%02x\n", insn->code);
else
- print_bpf_end_insn(verbose, env, insn);
+ print_bpf_end_insn(verbose, cbs->private_data, insn);
} else if (BPF_OP(insn->code) == BPF_NEG) {
- verbose(env, "(%02x) r%d = %s-r%d\n",
+ verbose(cbs->private_data, "(%02x) r%d = %s-r%d\n",
insn->code, insn->dst_reg,
class == BPF_ALU ? "(u32) " : "",
insn->dst_reg);
} else if (BPF_SRC(insn->code) == BPF_X) {
- verbose(env, "(%02x) %sr%d %s %sr%d\n",
+ verbose(cbs->private_data, "(%02x) %sr%d %s %sr%d\n",
insn->code, class == BPF_ALU ? "(u32) " : "",
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
class == BPF_ALU ? "(u32) " : "",
insn->src_reg);
} else {
- verbose(env, "(%02x) %sr%d %s %s%d\n",
+ verbose(cbs->private_data, "(%02x) %sr%d %s %s%d\n",
insn->code, class == BPF_ALU ? "(u32) " : "",
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
@@ -157,46 +157,46 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
}
} else if (class == BPF_STX) {
if (BPF_MODE(insn->code) == BPF_MEM)
- verbose(env, "(%02x) *(%s *)(r%d %+d) = r%d\n",
+ verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = r%d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg,
insn->off, insn->src_reg);
else if (BPF_MODE(insn->code) == BPF_XADD)
- verbose(env, "(%02x) lock *(%s *)(r%d %+d) += r%d\n",
+ verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) += r%d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg, insn->off,
insn->src_reg);
else
- verbose(env, "BUG_%02x\n", insn->code);
+ verbose(cbs->private_data, "BUG_%02x\n", insn->code);
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM) {
- verbose(env, "BUG_st_%02x\n", insn->code);
+ verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
return;
}
- verbose(env, "(%02x) *(%s *)(r%d %+d) = %d\n",
+ verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg,
insn->off, insn->imm);
} else if (class == BPF_LDX) {
if (BPF_MODE(insn->code) != BPF_MEM) {
- verbose(env, "BUG_ldx_%02x\n", insn->code);
+ verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
return;
}
- verbose(env, "(%02x) r%d = *(%s *)(r%d %+d)\n",
+ verbose(cbs->private_data, "(%02x) r%d = *(%s *)(r%d %+d)\n",
insn->code, insn->dst_reg,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->off);
} else if (class == BPF_LD) {
if (BPF_MODE(insn->code) == BPF_ABS) {
- verbose(env, "(%02x) r0 = *(%s *)skb[%d]\n",
+ verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[%d]\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->imm);
} else if (BPF_MODE(insn->code) == BPF_IND) {
- verbose(env, "(%02x) r0 = *(%s *)skb[r%d + %d]\n",
+ verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[r%d + %d]\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->imm);
@@ -212,12 +212,12 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
if (map_ptr && !allow_ptr_leaks)
imm = 0;
- verbose(env, "(%02x) r%d = %s\n",
+ verbose(cbs->private_data, "(%02x) r%d = %s\n",
insn->code, insn->dst_reg,
__func_imm_name(cbs, insn, imm,
tmp, sizeof(tmp)));
} else {
- verbose(env, "BUG_ld_%02x\n", insn->code);
+ verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code);
return;
}
} else if (class == BPF_JMP) {
@@ -227,35 +227,35 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
char tmp[64];
if (insn->src_reg == BPF_PSEUDO_CALL) {
- verbose(env, "(%02x) call pc%s\n",
+ verbose(cbs->private_data, "(%02x) call pc%s\n",
insn->code,
__func_get_name(cbs, insn,
tmp, sizeof(tmp)));
} else {
strcpy(tmp, "unknown");
- verbose(env, "(%02x) call %s#%d\n", insn->code,
+ verbose(cbs->private_data, "(%02x) call %s#%d\n", insn->code,
__func_get_name(cbs, insn,
tmp, sizeof(tmp)),
insn->imm);
}
} else if (insn->code == (BPF_JMP | BPF_JA)) {
- verbose(env, "(%02x) goto pc%+d\n",
+ verbose(cbs->private_data, "(%02x) goto pc%+d\n",
insn->code, insn->off);
} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
- verbose(env, "(%02x) exit\n", insn->code);
+ verbose(cbs->private_data, "(%02x) exit\n", insn->code);
} else if (BPF_SRC(insn->code) == BPF_X) {
- verbose(env, "(%02x) if r%d %s r%d goto pc%+d\n",
+ verbose(cbs->private_data, "(%02x) if r%d %s r%d goto pc%+d\n",
insn->code, insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->src_reg, insn->off);
} else {
- verbose(env, "(%02x) if r%d %s 0x%x goto pc%+d\n",
+ verbose(cbs->private_data, "(%02x) if r%d %s 0x%x goto pc%+d\n",
insn->code, insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->imm, insn->off);
}
} else {
- verbose(env, "(%02x) %s\n",
+ verbose(cbs->private_data, "(%02x) %s\n",
insn->code, bpf_class_string[class]);
}
}
diff --git a/kernel/bpf/disasm.h b/kernel/bpf/disasm.h
index 266fe8ee542b..e1324a834a24 100644
--- a/kernel/bpf/disasm.h
+++ b/kernel/bpf/disasm.h
@@ -22,14 +22,12 @@
#include <string.h>
#endif
-struct bpf_verifier_env;
-
extern const char *const bpf_alu_string[16];
extern const char *const bpf_class_string[8];
const char *func_id_name(int id);
-typedef __printf(2, 3) void (*bpf_insn_print_t)(struct bpf_verifier_env *env,
+typedef __printf(2, 3) void (*bpf_insn_print_t)(void *private_data,
const char *, ...);
typedef const char *(*bpf_insn_revmap_call_t)(void *private_data,
const struct bpf_insn *insn);
@@ -45,7 +43,6 @@ struct bpf_insn_cbs {
};
void print_bpf_insn(const struct bpf_insn_cbs *cbs,
- struct bpf_verifier_env *env,
const struct bpf_insn *insn,
bool allow_ptr_leaks);
#endif
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 69c5bccabd22..d2bda5aa25d7 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -41,6 +41,8 @@
#include <linux/mm.h>
#include <net/strparser.h>
#include <net/tcp.h>
+#include <linux/ptr_ring.h>
+#include <net/inet_common.h>
#define SOCK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
@@ -82,6 +84,7 @@ struct smap_psock {
int sg_size;
int eval;
struct sk_msg_buff *cork;
+ struct list_head ingress;
struct strparser strp;
struct bpf_prog *bpf_tx_msg;
@@ -103,6 +106,8 @@ struct smap_psock {
};
static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
+static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len);
static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
@@ -112,6 +117,21 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
return rcu_dereference_sk_user_data(sk);
}
+static bool bpf_tcp_stream_read(const struct sock *sk)
+{
+ struct smap_psock *psock;
+ bool empty = true;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock))
+ goto out;
+ empty = list_empty(&psock->ingress);
+out:
+ rcu_read_unlock();
+ return !empty;
+}
+
static struct proto tcp_bpf_proto;
static int bpf_tcp_init(struct sock *sk)
{
@@ -135,6 +155,8 @@ static int bpf_tcp_init(struct sock *sk)
if (psock->bpf_tx_msg) {
tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
+ tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
+ tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
}
sk->sk_prot = &tcp_bpf_proto;
@@ -170,6 +192,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
{
void (*close_fun)(struct sock *sk, long timeout);
struct smap_psock_map_entry *e, *tmp;
+ struct sk_msg_buff *md, *mtmp;
struct smap_psock *psock;
struct sock *osk;
@@ -188,6 +211,12 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
close_fun = psock->save_close;
write_lock_bh(&sk->sk_callback_lock);
+ list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
+ list_del(&md->list);
+ free_start_sg(psock->sock, md);
+ kfree(md);
+ }
+
list_for_each_entry_safe(e, tmp, &psock->maps, list) {
osk = cmpxchg(e->entry, sk, NULL);
if (osk == sk) {
@@ -312,7 +341,7 @@ retry:
md->sg_start++;
if (md->sg_start == MAX_SKB_FRAGS)
md->sg_start = 0;
- memset(sg, 0, sizeof(*sg));
+ sg_init_table(sg, 1);
if (md->sg_start == md->sg_end)
break;
@@ -468,6 +497,72 @@ verdict:
return _rc;
}
+static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
+ struct smap_psock *psock,
+ struct sk_msg_buff *md, int flags)
+{
+ bool apply = apply_bytes;
+ size_t size, copied = 0;
+ struct sk_msg_buff *r;
+ int err = 0, i;
+
+ r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL);
+ if (unlikely(!r))
+ return -ENOMEM;
+
+ lock_sock(sk);
+ r->sg_start = md->sg_start;
+ i = md->sg_start;
+
+ do {
+ r->sg_data[i] = md->sg_data[i];
+
+ size = (apply && apply_bytes < md->sg_data[i].length) ?
+ apply_bytes : md->sg_data[i].length;
+
+ if (!sk_wmem_schedule(sk, size)) {
+ if (!copied)
+ err = -ENOMEM;
+ break;
+ }
+
+ sk_mem_charge(sk, size);
+ r->sg_data[i].length = size;
+ md->sg_data[i].length -= size;
+ md->sg_data[i].offset += size;
+ copied += size;
+
+ if (md->sg_data[i].length) {
+ get_page(sg_page(&r->sg_data[i]));
+ r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1;
+ } else {
+ i++;
+ if (i == MAX_SKB_FRAGS)
+ i = 0;
+ r->sg_end = i;
+ }
+
+ if (apply) {
+ apply_bytes -= size;
+ if (!apply_bytes)
+ break;
+ }
+ } while (i != md->sg_end);
+
+ md->sg_start = i;
+
+ if (!err) {
+ list_add_tail(&r->list, &psock->ingress);
+ sk->sk_data_ready(sk);
+ } else {
+ free_start_sg(sk, r);
+ kfree(r);
+ }
+
+ release_sock(sk);
+ return err;
+}
+
static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
struct sk_msg_buff *md,
int flags)
@@ -475,6 +570,7 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
struct smap_psock *psock;
struct scatterlist *sg;
int i, err, free = 0;
+ bool ingress = !!(md->flags & BPF_F_INGRESS);
sg = md->sg_data;
@@ -487,9 +583,14 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
goto out_rcu;
rcu_read_unlock();
- lock_sock(sk);
- err = bpf_tcp_push(sk, send, md, flags, false);
- release_sock(sk);
+
+ if (ingress) {
+ err = bpf_tcp_ingress(sk, send, psock, md, flags);
+ } else {
+ lock_sock(sk);
+ err = bpf_tcp_push(sk, send, md, flags, false);
+ release_sock(sk);
+ }
smap_release_sock(psock, sk);
if (unlikely(err))
goto out;
@@ -623,6 +724,92 @@ out_err:
return err;
}
+static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len)
+{
+ struct iov_iter *iter = &msg->msg_iter;
+ struct smap_psock *psock;
+ int copied = 0;
+
+ if (unlikely(flags & MSG_ERRQUEUE))
+ return inet_recv_error(sk, msg, len, addr_len);
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock))
+ goto out;
+
+ if (unlikely(!refcount_inc_not_zero(&psock->refcnt)))
+ goto out;
+ rcu_read_unlock();
+
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+
+ lock_sock(sk);
+ while (copied != len) {
+ struct scatterlist *sg;
+ struct sk_msg_buff *md;
+ int i;
+
+ md = list_first_entry_or_null(&psock->ingress,
+ struct sk_msg_buff, list);
+ if (unlikely(!md))
+ break;
+ i = md->sg_start;
+ do {
+ struct page *page;
+ int n, copy;
+
+ sg = &md->sg_data[i];
+ copy = sg->length;
+ page = sg_page(sg);
+
+ if (copied + copy > len)
+ copy = len - copied;
+
+ n = copy_page_to_iter(page, sg->offset, copy, iter);
+ if (n != copy) {
+ md->sg_start = i;
+ release_sock(sk);
+ smap_release_sock(psock, sk);
+ return -EFAULT;
+ }
+
+ copied += copy;
+ sg->offset += copy;
+ sg->length -= copy;
+ sk_mem_uncharge(sk, copy);
+
+ if (!sg->length) {
+ i++;
+ if (i == MAX_SKB_FRAGS)
+ i = 0;
+ if (!md->skb)
+ put_page(page);
+ }
+ if (copied == len)
+ break;
+ } while (i != md->sg_end);
+ md->sg_start = i;
+
+ if (!sg->length && md->sg_start == md->sg_end) {
+ list_del(&md->list);
+ if (md->skb)
+ consume_skb(md->skb);
+ kfree(md);
+ }
+ }
+
+ release_sock(sk);
+ smap_release_sock(psock, sk);
+ return copied;
+out:
+ rcu_read_unlock();
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+}
+
+
static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
@@ -656,7 +843,7 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
}
sg = md.sg_data;
- sg_init_table(sg, MAX_SKB_FRAGS);
+ sg_init_marker(sg, MAX_SKB_FRAGS);
rcu_read_unlock();
lock_sock(sk);
@@ -763,10 +950,14 @@ static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
lock_sock(sk);
- if (psock->cork_bytes)
+ if (psock->cork_bytes) {
m = psock->cork;
- else
+ sg = &m->sg_data[m->sg_end];
+ } else {
m = &md;
+ sg = m->sg_data;
+ sg_init_marker(sg, MAX_SKB_FRAGS);
+ }
/* Catch case where ring is full and sendpage is stalled. */
if (unlikely(m->sg_end == m->sg_start &&
@@ -774,7 +965,6 @@ static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
goto out_err;
psock->sg_size += size;
- sg = &m->sg_data[m->sg_end];
sg_set_page(sg, page, size, offset);
get_page(page);
m->sg_copy[m->sg_end] = true;
@@ -861,27 +1051,72 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
__SK_DROP;
}
+static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb)
+{
+ struct sock *sk = psock->sock;
+ int copied = 0, num_sg;
+ struct sk_msg_buff *r;
+
+ r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC);
+ if (unlikely(!r))
+ return -EAGAIN;
+
+ if (!sk_rmem_schedule(sk, skb, skb->len)) {
+ kfree(r);
+ return -EAGAIN;
+ }
+
+ sg_init_table(r->sg_data, MAX_SKB_FRAGS);
+ num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len);
+ if (unlikely(num_sg < 0)) {
+ kfree(r);
+ return num_sg;
+ }
+ sk_mem_charge(sk, skb->len);
+ copied = skb->len;
+ r->sg_start = 0;
+ r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg;
+ r->skb = skb;
+ list_add_tail(&r->list, &psock->ingress);
+ sk->sk_data_ready(sk);
+ return copied;
+}
+
static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
{
+ struct smap_psock *peer;
struct sock *sk;
+ __u32 in;
int rc;
rc = smap_verdict_func(psock, skb);
switch (rc) {
case __SK_REDIRECT:
sk = do_sk_redirect_map(skb);
- if (likely(sk)) {
- struct smap_psock *peer = smap_psock_sk(sk);
-
- if (likely(peer &&
- test_bit(SMAP_TX_RUNNING, &peer->state) &&
- !sock_flag(sk, SOCK_DEAD) &&
- sock_writeable(sk))) {
- skb_set_owner_w(skb, sk);
- skb_queue_tail(&peer->rxqueue, skb);
- schedule_work(&peer->tx_work);
- break;
- }
+ if (!sk) {
+ kfree_skb(skb);
+ break;
+ }
+
+ peer = smap_psock_sk(sk);
+ in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
+
+ if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) ||
+ !test_bit(SMAP_TX_RUNNING, &peer->state))) {
+ kfree_skb(skb);
+ break;
+ }
+
+ if (!in && sock_writeable(sk)) {
+ skb_set_owner_w(skb, sk);
+ skb_queue_tail(&peer->rxqueue, skb);
+ schedule_work(&peer->tx_work);
+ break;
+ } else if (in &&
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
+ skb_queue_tail(&peer->rxqueue, skb);
+ schedule_work(&peer->tx_work);
+ break;
}
/* Fall through and free skb otherwise */
case __SK_DROP:
@@ -943,15 +1178,23 @@ static void smap_tx_work(struct work_struct *w)
}
while ((skb = skb_dequeue(&psock->rxqueue))) {
+ __u32 flags;
+
rem = skb->len;
off = 0;
start:
+ flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
do {
- if (likely(psock->sock->sk_socket))
- n = skb_send_sock_locked(psock->sock,
- skb, off, rem);
- else
+ if (likely(psock->sock->sk_socket)) {
+ if (flags)
+ n = smap_do_ingress(psock, skb);
+ else
+ n = skb_send_sock_locked(psock->sock,
+ skb, off, rem);
+ } else {
n = -EINVAL;
+ }
+
if (n <= 0) {
if (n == -EAGAIN) {
/* Retry when space is available */
@@ -969,7 +1212,9 @@ start:
rem -= n;
off += n;
} while (rem);
- kfree_skb(skb);
+
+ if (!flags)
+ kfree_skb(skb);
}
out:
release_sock(psock->sock);
@@ -1107,6 +1352,7 @@ static void sock_map_remove_complete(struct bpf_stab *stab)
static void smap_gc_work(struct work_struct *w)
{
struct smap_psock_map_entry *e, *tmp;
+ struct sk_msg_buff *md, *mtmp;
struct smap_psock *psock;
psock = container_of(w, struct smap_psock, gc_work);
@@ -1131,6 +1377,12 @@ static void smap_gc_work(struct work_struct *w)
kfree(psock->cork);
}
+ list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
+ list_del(&md->list);
+ free_start_sg(psock->sock, md);
+ kfree(md);
+ }
+
list_for_each_entry_safe(e, tmp, &psock->maps, list) {
list_del(&e->list);
kfree(e);
@@ -1160,6 +1412,7 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
INIT_WORK(&psock->tx_work, smap_tx_work);
INIT_WORK(&psock->gc_work, smap_gc_work);
INIT_LIST_HEAD(&psock->maps);
+ INIT_LIST_HEAD(&psock->ingress);
refcount_set(&psock->refcnt, 1);
rcu_assign_sk_user_data(sock, psock);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index dd172ee16716..0244973ee544 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -203,11 +203,13 @@ static int bpf_map_alloc_id(struct bpf_map *map)
{
int id;
+ idr_preload(GFP_KERNEL);
spin_lock_bh(&map_idr_lock);
id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
if (id > 0)
map->id = id;
spin_unlock_bh(&map_idr_lock);
+ idr_preload_end();
if (WARN_ON_ONCE(!id))
return -ENOSPC;
@@ -940,11 +942,13 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
{
int id;
+ idr_preload(GFP_KERNEL);
spin_lock_bh(&prog_idr_lock);
id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
if (id > 0)
prog->aux->id = id;
spin_unlock_bh(&prog_idr_lock);
+ idr_preload_end();
/* id is in [1, INT_MAX) */
if (WARN_ON_ONCE(!id))
@@ -1167,8 +1171,75 @@ struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
+/* Initially all BPF programs could be loaded w/o specifying
+ * expected_attach_type. Later for some of them specifying expected_attach_type
+ * at load time became required so that program could be validated properly.
+ * Programs of types that are allowed to be loaded both w/ and w/o (for
+ * backward compatibility) expected_attach_type, should have the default attach
+ * type assigned to expected_attach_type for the latter case, so that it can be
+ * validated later at attach time.
+ *
+ * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
+ * prog type requires it but has some attach types that have to be backward
+ * compatible.
+ */
+static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
+{
+ switch (attr->prog_type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
+ * exist so checking for non-zero is the way to go here.
+ */
+ if (!attr->expected_attach_type)
+ attr->expected_attach_type =
+ BPF_CGROUP_INET_SOCK_CREATE;
+ break;
+ }
+}
+
+static int
+bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
+ enum bpf_attach_type expected_attach_type)
+{
+ switch (prog_type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ switch (expected_attach_type) {
+ case BPF_CGROUP_INET_SOCK_CREATE:
+ case BPF_CGROUP_INET4_POST_BIND:
+ case BPF_CGROUP_INET6_POST_BIND:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ switch (expected_attach_type) {
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
+ case BPF_CGROUP_INET4_CONNECT:
+ case BPF_CGROUP_INET6_CONNECT:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
+ enum bpf_attach_type attach_type)
+{
+ switch (prog->type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
+ default:
+ return 0;
+ }
+}
+
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD prog_ifindex
+#define BPF_PROG_LOAD_LAST_FIELD expected_attach_type
static int bpf_prog_load(union bpf_attr *attr)
{
@@ -1205,11 +1276,17 @@ static int bpf_prog_load(union bpf_attr *attr)
!capable(CAP_SYS_ADMIN))
return -EPERM;
+ bpf_prog_load_fixup_attach_type(attr);
+ if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type))
+ return -EINVAL;
+
/* plain bpf_prog allocation */
prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
if (!prog)
return -ENOMEM;
+ prog->expected_attach_type = attr->expected_attach_type;
+
prog->aux->offload_requested = !!attr->prog_ifindex;
err = security_bpf_prog_alloc(prog->aux);
@@ -1311,6 +1388,81 @@ static int bpf_obj_get(const union bpf_attr *attr)
attr->file_flags);
}
+struct bpf_raw_tracepoint {
+ struct bpf_raw_event_map *btp;
+ struct bpf_prog *prog;
+};
+
+static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
+{
+ struct bpf_raw_tracepoint *raw_tp = filp->private_data;
+
+ if (raw_tp->prog) {
+ bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
+ bpf_prog_put(raw_tp->prog);
+ }
+ kfree(raw_tp);
+ return 0;
+}
+
+static const struct file_operations bpf_raw_tp_fops = {
+ .release = bpf_raw_tracepoint_release,
+ .read = bpf_dummy_read,
+ .write = bpf_dummy_write,
+};
+
+#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
+
+static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
+{
+ struct bpf_raw_tracepoint *raw_tp;
+ struct bpf_raw_event_map *btp;
+ struct bpf_prog *prog;
+ char tp_name[128];
+ int tp_fd, err;
+
+ if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name),
+ sizeof(tp_name) - 1) < 0)
+ return -EFAULT;
+ tp_name[sizeof(tp_name) - 1] = 0;
+
+ btp = bpf_find_raw_tracepoint(tp_name);
+ if (!btp)
+ return -ENOENT;
+
+ raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
+ if (!raw_tp)
+ return -ENOMEM;
+ raw_tp->btp = btp;
+
+ prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd,
+ BPF_PROG_TYPE_RAW_TRACEPOINT);
+ if (IS_ERR(prog)) {
+ err = PTR_ERR(prog);
+ goto out_free_tp;
+ }
+
+ err = bpf_probe_register(raw_tp->btp, prog);
+ if (err)
+ goto out_put_prog;
+
+ raw_tp->prog = prog;
+ tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp,
+ O_CLOEXEC);
+ if (tp_fd < 0) {
+ bpf_probe_unregister(raw_tp->btp, prog);
+ err = tp_fd;
+ goto out_put_prog;
+ }
+ return tp_fd;
+
+out_put_prog:
+ bpf_prog_put(prog);
+out_free_tp:
+ kfree(raw_tp);
+ return err;
+}
+
#ifdef CONFIG_CGROUP_BPF
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
@@ -1374,8 +1526,16 @@ static int bpf_prog_attach(const union bpf_attr *attr)
ptype = BPF_PROG_TYPE_CGROUP_SKB;
break;
case BPF_CGROUP_INET_SOCK_CREATE:
+ case BPF_CGROUP_INET4_POST_BIND:
+ case BPF_CGROUP_INET6_POST_BIND:
ptype = BPF_PROG_TYPE_CGROUP_SOCK;
break;
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
+ case BPF_CGROUP_INET4_CONNECT:
+ case BPF_CGROUP_INET6_CONNECT:
+ ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
+ break;
case BPF_CGROUP_SOCK_OPS:
ptype = BPF_PROG_TYPE_SOCK_OPS;
break;
@@ -1395,6 +1555,11 @@ static int bpf_prog_attach(const union bpf_attr *attr)
if (IS_ERR(prog))
return PTR_ERR(prog);
+ if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
+ bpf_prog_put(prog);
+ return -EINVAL;
+ }
+
cgrp = cgroup_get_from_fd(attr->target_fd);
if (IS_ERR(cgrp)) {
bpf_prog_put(prog);
@@ -1431,8 +1596,16 @@ static int bpf_prog_detach(const union bpf_attr *attr)
ptype = BPF_PROG_TYPE_CGROUP_SKB;
break;
case BPF_CGROUP_INET_SOCK_CREATE:
+ case BPF_CGROUP_INET4_POST_BIND:
+ case BPF_CGROUP_INET6_POST_BIND:
ptype = BPF_PROG_TYPE_CGROUP_SOCK;
break;
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
+ case BPF_CGROUP_INET4_CONNECT:
+ case BPF_CGROUP_INET6_CONNECT:
+ ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
+ break;
case BPF_CGROUP_SOCK_OPS:
ptype = BPF_PROG_TYPE_SOCK_OPS;
break;
@@ -1482,6 +1655,12 @@ static int bpf_prog_query(const union bpf_attr *attr,
case BPF_CGROUP_INET_INGRESS:
case BPF_CGROUP_INET_EGRESS:
case BPF_CGROUP_INET_SOCK_CREATE:
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
+ case BPF_CGROUP_INET4_POST_BIND:
+ case BPF_CGROUP_INET6_POST_BIND:
+ case BPF_CGROUP_INET4_CONNECT:
+ case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_SOCK_OPS:
case BPF_CGROUP_DEVICE:
break;
@@ -1921,6 +2100,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_OBJ_GET_INFO_BY_FD:
err = bpf_obj_get_info_by_fd(&attr, uattr);
break;
+ case BPF_RAW_TRACEPOINT_OPEN:
+ err = bpf_raw_tracepoint_open(&attr);
+ break;
default:
err = -EINVAL;
break;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e9f7c20691c1..5dd1dcb902bf 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -168,23 +168,12 @@ struct bpf_call_arg_meta {
static DEFINE_MUTEX(bpf_verifier_lock);
-/* log_level controls verbosity level of eBPF verifier.
- * bpf_verifier_log_write() is used to dump the verification trace to the log,
- * so the user can figure out what's wrong with the program
- */
-__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
- const char *fmt, ...)
+void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
+ va_list args)
{
- struct bpf_verifer_log *log = &env->log;
unsigned int n;
- va_list args;
-
- if (!log->level || !log->ubuf || bpf_verifier_log_full(log))
- return;
- va_start(args, fmt);
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
- va_end(args);
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
"verifier log line truncated - local buffer too short\n");
@@ -197,14 +186,37 @@ __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
else
log->ubuf = NULL;
}
-EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
-/* Historically bpf_verifier_log_write was called verbose, but the name was too
- * generic for symbol export. The function was renamed, but not the calls in
- * the verifier to avoid complicating backports. Hence the alias below.
+
+/* log_level controls verbosity level of eBPF verifier.
+ * bpf_verifier_log_write() is used to dump the verification trace to the log,
+ * so the user can figure out what's wrong with the program
*/
-static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
- const char *fmt, ...)
- __attribute__((alias("bpf_verifier_log_write")));
+__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ if (!bpf_verifier_log_needed(&env->log))
+ return;
+
+ va_start(args, fmt);
+ bpf_verifier_vlog(&env->log, fmt, args);
+ va_end(args);
+}
+EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
+
+__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
+{
+ struct bpf_verifier_env *env = private_data;
+ va_list args;
+
+ if (!bpf_verifier_log_needed(&env->log))
+ return;
+
+ va_start(args, fmt);
+ bpf_verifier_vlog(&env->log, fmt, args);
+ va_end(args);
+}
static bool type_is_pkt_pointer(enum bpf_reg_type type)
{
@@ -1311,7 +1323,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
};
if (env->ops->is_valid_access &&
- env->ops->is_valid_access(off, size, t, &info)) {
+ env->ops->is_valid_access(off, size, t, env->prog, &info)) {
/* A non zero info.ctx_field_size indicates that this field is a
* candidate for later verifier transformation to load the whole
* field and then apply a mask when accessed with a narrower
@@ -2337,7 +2349,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
}
if (env->ops->get_func_proto)
- fn = env->ops->get_func_proto(func_id);
+ fn = env->ops->get_func_proto(func_id, env->prog);
if (!fn) {
verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
func_id);
@@ -3875,6 +3887,7 @@ static int check_return_code(struct bpf_verifier_env *env)
switch (env->prog->type) {
case BPF_PROG_TYPE_CGROUP_SKB:
case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
case BPF_PROG_TYPE_SOCK_OPS:
case BPF_PROG_TYPE_CGROUP_DEVICE:
break;
@@ -4600,10 +4613,11 @@ static int do_check(struct bpf_verifier_env *env)
if (env->log.level) {
const struct bpf_insn_cbs cbs = {
.cb_print = verbose,
+ .private_data = env,
};
verbose(env, "%d: ", insn_idx);
- print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
+ print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
}
if (bpf_prog_is_dev_bound(env->prog->aux)) {
@@ -5559,7 +5573,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn = new_prog->insnsi + i + delta;
}
patch_call_imm:
- fn = env->ops->get_func_proto(insn->imm);
+ fn = env->ops->get_func_proto(insn->imm, env->prog);
/* all functions that have prototype and verifier allowed
* programs to call them, must be real in-kernel functions
*/
@@ -5601,7 +5615,7 @@ static void free_states(struct bpf_verifier_env *env)
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
{
struct bpf_verifier_env *env;
- struct bpf_verifer_log *log;
+ struct bpf_verifier_log *log;
int ret = -EINVAL;
/* no program is valid */
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 7f9691c86b6e..d88e96d4e12c 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -524,7 +524,8 @@ static const struct bpf_func_proto bpf_probe_read_str_proto = {
.arg3_type = ARG_ANYTHING,
};
-static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
+static const struct bpf_func_proto *
+tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
@@ -568,7 +569,8 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
}
}
-static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
+static const struct bpf_func_proto *
+kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_perf_event_output:
@@ -582,12 +584,13 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
return &bpf_override_return_proto;
#endif
default:
- return tracing_func_proto(func_id);
+ return tracing_func_proto(func_id, prog);
}
}
/* bpf+kprobe programs can access fields of 'struct pt_regs' */
static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= sizeof(struct pt_regs))
@@ -661,7 +664,8 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
.arg3_type = ARG_ANYTHING,
};
-static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
+static const struct bpf_func_proto *
+tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_perf_event_output:
@@ -669,11 +673,12 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_get_stackid:
return &bpf_get_stackid_proto_tp;
default:
- return tracing_func_proto(func_id);
+ return tracing_func_proto(func_id, prog);
}
}
static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
@@ -721,7 +726,8 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
.arg3_type = ARG_CONST_SIZE,
};
-static const struct bpf_func_proto *pe_prog_func_proto(enum bpf_func_id func_id)
+static const struct bpf_func_proto *
+pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_perf_event_output:
@@ -731,11 +737,94 @@ static const struct bpf_func_proto *pe_prog_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_perf_prog_read_value:
return &bpf_perf_prog_read_value_proto;
default:
- return tracing_func_proto(func_id);
+ return tracing_func_proto(func_id, prog);
}
}
+/*
+ * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
+ * to avoid potential recursive reuse issue when/if tracepoints are added
+ * inside bpf_*_event_output and/or bpf_get_stack_id
+ */
+static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs);
+BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
+ struct bpf_map *, map, u64, flags, void *, data, u64, size)
+{
+ struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+
+ perf_fetch_caller_regs(regs);
+ return ____bpf_perf_event_output(regs, map, flags, data, size);
+}
+
+static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
+ .func = bpf_perf_event_output_raw_tp,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_MEM,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
+};
+
+BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
+ struct bpf_map *, map, u64, flags)
+{
+ struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+
+ perf_fetch_caller_regs(regs);
+ /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
+ return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
+ flags, 0, 0);
+}
+
+static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
+ .func = bpf_get_stackid_raw_tp,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *
+raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+ case BPF_FUNC_perf_event_output:
+ return &bpf_perf_event_output_proto_raw_tp;
+ case BPF_FUNC_get_stackid:
+ return &bpf_get_stackid_proto_raw_tp;
+ default:
+ return tracing_func_proto(func_id, prog);
+ }
+}
+
+static bool raw_tp_prog_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ /* largest tracepoint in the kernel has 12 args */
+ if (off < 0 || off >= sizeof(__u64) * 12)
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ return true;
+}
+
+const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
+ .get_func_proto = raw_tp_prog_func_proto,
+ .is_valid_access = raw_tp_prog_is_valid_access,
+};
+
+const struct bpf_prog_ops raw_tracepoint_prog_ops = {
+};
+
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
const int size_u64 = sizeof(u64);
@@ -908,3 +997,106 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
return ret;
}
+
+extern struct bpf_raw_event_map __start__bpf_raw_tp[];
+extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
+
+struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
+{
+ struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
+
+ for (; btp < __stop__bpf_raw_tp; btp++) {
+ if (!strcmp(btp->tp->name, name))
+ return btp;
+ }
+ return NULL;
+}
+
+static __always_inline
+void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
+{
+ rcu_read_lock();
+ preempt_disable();
+ (void) BPF_PROG_RUN(prog, args);
+ preempt_enable();
+ rcu_read_unlock();
+}
+
+#define UNPACK(...) __VA_ARGS__
+#define REPEAT_1(FN, DL, X, ...) FN(X)
+#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
+#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
+#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
+#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
+#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
+#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
+#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
+#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
+#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
+#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
+#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
+#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
+
+#define SARG(X) u64 arg##X
+#define COPY(X) args[X] = arg##X
+
+#define __DL_COM (,)
+#define __DL_SEM (;)
+
+#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
+
+#define BPF_TRACE_DEFN_x(x) \
+ void bpf_trace_run##x(struct bpf_prog *prog, \
+ REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
+ { \
+ u64 args[x]; \
+ REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
+ __bpf_trace_run(prog, args); \
+ } \
+ EXPORT_SYMBOL_GPL(bpf_trace_run##x)
+BPF_TRACE_DEFN_x(1);
+BPF_TRACE_DEFN_x(2);
+BPF_TRACE_DEFN_x(3);
+BPF_TRACE_DEFN_x(4);
+BPF_TRACE_DEFN_x(5);
+BPF_TRACE_DEFN_x(6);
+BPF_TRACE_DEFN_x(7);
+BPF_TRACE_DEFN_x(8);
+BPF_TRACE_DEFN_x(9);
+BPF_TRACE_DEFN_x(10);
+BPF_TRACE_DEFN_x(11);
+BPF_TRACE_DEFN_x(12);
+
+static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
+{
+ struct tracepoint *tp = btp->tp;
+
+ /*
+ * check that program doesn't access arguments beyond what's
+ * available in this tracepoint
+ */
+ if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
+ return -EINVAL;
+
+ return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
+}
+
+int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
+{
+ int err;
+
+ mutex_lock(&bpf_event_mutex);
+ err = __bpf_probe_register(btp, prog);
+ mutex_unlock(&bpf_event_mutex);
+ return err;
+}
+
+int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
+{
+ int err;
+
+ mutex_lock(&bpf_event_mutex);
+ err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
+ mutex_unlock(&bpf_event_mutex);
+ return err;
+}