summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-03-13 20:52:03 -0700
committerDavid S. Miller <davem@davemloft.net>2020-03-13 20:52:03 -0700
commit44ef976ab3c4ccd6c886714e5349caa53c477010 (patch)
treefad7059aad1e1ac040e59a2f4870400bc8e9e30a /arch/x86
parent48f5d5cb80b4e414cb97dd7dba43b0370bdee130 (diff)
parent832165d225f71040a2c1fc2407752e462d00de1f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2020-03-13 The following pull-request contains BPF updates for your *net-next* tree. We've added 86 non-merge commits during the last 12 day(s) which contain a total of 107 files changed, 5771 insertions(+), 1700 deletions(-). The main changes are: 1) Add modify_return attach type which allows to attach to a function via BPF trampoline and is run after the fentry and before the fexit programs and can pass a return code to the original caller, from KP Singh. 2) Generalize BPF's kallsyms handling and add BPF trampoline and dispatcher objects to be visible in /proc/kallsyms so they can be annotated in stack traces, from Jiri Olsa. 3) Extend BPF sockmap to allow for UDP next to existing TCP support in order in order to enable this for BPF based socket dispatch, from Lorenz Bauer. 4) Introduce a new bpftool 'prog profile' command which attaches to existing BPF programs via fentry and fexit hooks and reads out hardware counters during that period, from Song Liu. Example usage: bpftool prog profile id 337 duration 3 cycles instructions llc_misses 4228 run_cnt 3403698 cycles (84.08%) 3525294 instructions # 1.04 insn per cycle (84.05%) 13 llc_misses # 3.69 LLC misses per million isns (83.50%) 5) Batch of improvements to libbpf, bpftool and BPF selftests. Also addition of a new bpf_link abstraction to keep in particular BPF tracing programs attached even when the applicaion owning them exits, from Andrii Nakryiko. 6) New bpf_get_current_pid_tgid() helper for tracing to perform PID filtering and which returns the PID as seen by the init namespace, from Carlos Neira. 7) Refactor of RISC-V JIT code to move out common pieces and addition of a new RV32G BPF JIT compiler, from Luke Nelson. 8) Add gso_size context member to __sk_buff in order to be able to know whether a given skb is GSO or not, from Willem de Bruijn. 9) Add a new bpf_xdp_output() helper which reuses XDP's existing perf RB output implementation but can be called from tracepoint programs, from Eelco Chaudron. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/init_32.c14
-rw-r--r--arch/x86/net/bpf_jit_comp.c260
2 files changed, 194 insertions, 80 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 23df4885bbed..eb6ede2c3d43 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -238,7 +238,11 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
}
}
-static inline int is_kernel_text(unsigned long addr)
+/*
+ * The <linux/kallsyms.h> already defines is_kernel_text,
+ * using '__' prefix not to get in conflict.
+ */
+static inline int __is_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
return 1;
@@ -328,8 +332,8 @@ repeat:
addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
PAGE_OFFSET + PAGE_SIZE-1;
- if (is_kernel_text(addr) ||
- is_kernel_text(addr2))
+ if (__is_kernel_text(addr) ||
+ __is_kernel_text(addr2))
prot = PAGE_KERNEL_LARGE_EXEC;
pages_2m++;
@@ -354,7 +358,7 @@ repeat:
*/
pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
- if (is_kernel_text(addr))
+ if (__is_kernel_text(addr))
prot = PAGE_KERNEL_EXEC;
pages_4k++;
@@ -881,7 +885,7 @@ static void mark_nxdata_nx(void)
*/
unsigned long start = PFN_ALIGN(_etext);
/*
- * This comes from is_kernel_text upper limit. Also HPAGE where used:
+ * This comes from __is_kernel_text upper limit. Also HPAGE where used:
*/
unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 9ba08e9abc09..5ea7c2cf7ab4 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1361,37 +1361,140 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
-(stack_size - i * 8));
}
+static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
+ struct bpf_prog *p, int stack_size, bool mod_ret)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ if (emit_call(&prog, __bpf_prog_enter, prog))
+ return -EINVAL;
+ /* remember prog start time returned by __bpf_prog_enter */
+ emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
+
+ /* arg1: lea rdi, [rbp - stack_size] */
+ EMIT4(0x48, 0x8D, 0x7D, -stack_size);
+ /* arg2: progs[i]->insnsi for interpreter */
+ if (!p->jited)
+ emit_mov_imm64(&prog, BPF_REG_2,
+ (long) p->insnsi >> 32,
+ (u32) (long) p->insnsi);
+ /* call JITed bpf program or interpreter */
+ if (emit_call(&prog, p->bpf_func, prog))
+ return -EINVAL;
+
+ /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
+ * of the previous call which is then passed on the stack to
+ * the next BPF program.
+ */
+ if (mod_ret)
+ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+
+ /* arg1: mov rdi, progs[i] */
+ emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
+ (u32) (long) p);
+ /* arg2: mov rsi, rbx <- start time in nsec */
+ emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
+ if (emit_call(&prog, __bpf_prog_exit, prog))
+ return -EINVAL;
+
+ *pprog = prog;
+ return 0;
+}
+
+static void emit_nops(u8 **pprog, unsigned int len)
+{
+ unsigned int i, noplen;
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ while (len > 0) {
+ noplen = len;
+
+ if (noplen > ASM_NOP_MAX)
+ noplen = ASM_NOP_MAX;
+
+ for (i = 0; i < noplen; i++)
+ EMIT1(ideal_nops[noplen][i]);
+ len -= noplen;
+ }
+
+ *pprog = prog;
+}
+
+static void emit_align(u8 **pprog, u32 align)
+{
+ u8 *target, *prog = *pprog;
+
+ target = PTR_ALIGN(prog, align);
+ if (target != prog)
+ emit_nops(&prog, target - prog);
+
+ *pprog = prog;
+}
+
+static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+ s64 offset;
+
+ offset = func - (ip + 2 + 4);
+ if (!is_simm32(offset)) {
+ pr_err("Target %p is out of range\n", func);
+ return -EINVAL;
+ }
+ EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
+ *pprog = prog;
+ return 0;
+}
+
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
- struct bpf_prog **progs, int prog_cnt, int stack_size)
+ struct bpf_tramp_progs *tp, int stack_size)
{
+ int i;
u8 *prog = *pprog;
- int cnt = 0, i;
- for (i = 0; i < prog_cnt; i++) {
- if (emit_call(&prog, __bpf_prog_enter, prog))
- return -EINVAL;
- /* remember prog start time returned by __bpf_prog_enter */
- emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
-
- /* arg1: lea rdi, [rbp - stack_size] */
- EMIT4(0x48, 0x8D, 0x7D, -stack_size);
- /* arg2: progs[i]->insnsi for interpreter */
- if (!progs[i]->jited)
- emit_mov_imm64(&prog, BPF_REG_2,
- (long) progs[i]->insnsi >> 32,
- (u32) (long) progs[i]->insnsi);
- /* call JITed bpf program or interpreter */
- if (emit_call(&prog, progs[i]->bpf_func, prog))
+ for (i = 0; i < tp->nr_progs; i++) {
+ if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
return -EINVAL;
+ }
+ *pprog = prog;
+ return 0;
+}
+
+static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
+ struct bpf_tramp_progs *tp, int stack_size,
+ u8 **branches)
+{
+ u8 *prog = *pprog;
+ int i, cnt = 0;
- /* arg1: mov rdi, progs[i] */
- emit_mov_imm64(&prog, BPF_REG_1, (long) progs[i] >> 32,
- (u32) (long) progs[i]);
- /* arg2: mov rsi, rbx <- start time in nsec */
- emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
- if (emit_call(&prog, __bpf_prog_exit, prog))
+ /* The first fmod_ret program will receive a garbage return value.
+ * Set this to 0 to avoid confusing the program.
+ */
+ emit_mov_imm32(&prog, false, BPF_REG_0, 0);
+ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+ for (i = 0; i < tp->nr_progs; i++) {
+ if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
return -EINVAL;
+
+ /* mod_ret prog stored return value into [rbp - 8]. Emit:
+ * if (*(u64 *)(rbp - 8) != 0)
+ * goto do_fexit;
+ */
+ /* cmp QWORD PTR [rbp - 0x8], 0x0 */
+ EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
+
+ /* Save the location of the branch and Generate 6 nops
+ * (4 bytes for an offset and 2 bytes for the jump) These nops
+ * are replaced with a conditional jump once do_fexit (i.e. the
+ * start of the fexit invocation) is finalized.
+ */
+ branches[i] = prog;
+ emit_nops(&prog, 4 + 2);
}
+
*pprog = prog;
return 0;
}
@@ -1458,12 +1561,15 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
*/
int arch_prepare_bpf_trampoline(void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
- struct bpf_prog **fentry_progs, int fentry_cnt,
- struct bpf_prog **fexit_progs, int fexit_cnt,
+ struct bpf_tramp_progs *tprogs,
void *orig_call)
{
- int cnt = 0, nr_args = m->nr_args;
+ int ret, i, cnt = 0, nr_args = m->nr_args;
int stack_size = nr_args * 8;
+ struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
+ struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
+ struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
+ u8 **branches = NULL;
u8 *prog;
/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
@@ -1492,28 +1598,64 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
save_regs(m, &prog, nr_args, stack_size);
- if (fentry_cnt)
- if (invoke_bpf(m, &prog, fentry_progs, fentry_cnt, stack_size))
+ if (fentry->nr_progs)
+ if (invoke_bpf(m, &prog, fentry, stack_size))
return -EINVAL;
+ if (fmod_ret->nr_progs) {
+ branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
+ GFP_KERNEL);
+ if (!branches)
+ return -ENOMEM;
+
+ if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
+ branches)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- if (fentry_cnt)
+ if (fentry->nr_progs || fmod_ret->nr_progs)
restore_regs(m, &prog, nr_args, stack_size);
/* call original function */
- if (emit_call(&prog, orig_call, prog))
- return -EINVAL;
+ if (emit_call(&prog, orig_call, prog)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
/* remember return value in a stack for bpf prog to access */
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
}
- if (fexit_cnt)
- if (invoke_bpf(m, &prog, fexit_progs, fexit_cnt, stack_size))
- return -EINVAL;
+ if (fmod_ret->nr_progs) {
+ /* From Intel 64 and IA-32 Architectures Optimization
+ * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
+ * Coding Rule 11: All branch targets should be 16-byte
+ * aligned.
+ */
+ emit_align(&prog, 16);
+ /* Update the branches saved in invoke_bpf_mod_ret with the
+ * aligned address of do_fexit.
+ */
+ for (i = 0; i < fmod_ret->nr_progs; i++)
+ emit_cond_near_jump(&branches[i], prog, branches[i],
+ X86_JNE);
+ }
+
+ if (fexit->nr_progs)
+ if (invoke_bpf(m, &prog, fexit, stack_size)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
if (flags & BPF_TRAMP_F_RESTORE_REGS)
restore_regs(m, &prog, nr_args, stack_size);
+ /* This needs to be done regardless. If there were fmod_ret programs,
+ * the return value is only updated on the stack and still needs to be
+ * restored to R0.
+ */
if (flags & BPF_TRAMP_F_CALL_ORIG)
/* restore original return value back into RAX */
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
@@ -1525,45 +1667,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
EMIT1(0xC3); /* ret */
/* Make sure the trampoline generation logic doesn't overflow */
- if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY))
- return -EFAULT;
- return prog - (u8 *)image;
-}
-
-static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
-{
- u8 *prog = *pprog;
- int cnt = 0;
- s64 offset;
-
- offset = func - (ip + 2 + 4);
- if (!is_simm32(offset)) {
- pr_err("Target %p is out of range\n", func);
- return -EINVAL;
- }
- EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
- *pprog = prog;
- return 0;
-}
-
-static void emit_nops(u8 **pprog, unsigned int len)
-{
- unsigned int i, noplen;
- u8 *prog = *pprog;
- int cnt = 0;
-
- while (len > 0) {
- noplen = len;
-
- if (noplen > ASM_NOP_MAX)
- noplen = ASM_NOP_MAX;
-
- for (i = 0; i < noplen; i++)
- EMIT1(ideal_nops[noplen][i]);
- len -= noplen;
+ if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
+ ret = -EFAULT;
+ goto cleanup;
}
+ ret = prog - (u8 *)image;
- *pprog = prog;
+cleanup:
+ kfree(branches);
+ return ret;
}
static int emit_fallback_jump(u8 **pprog)
@@ -1588,7 +1700,7 @@ static int emit_fallback_jump(u8 **pprog)
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
{
- u8 *jg_reloc, *jg_target, *prog = *pprog;
+ u8 *jg_reloc, *prog = *pprog;
int pivot, err, jg_bytes = 1, cnt = 0;
s64 jg_offset;
@@ -1643,9 +1755,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
* Coding Rule 11: All branch targets should be 16-byte
* aligned.
*/
- jg_target = PTR_ALIGN(prog, 16);
- if (jg_target != prog)
- emit_nops(&prog, jg_target - prog);
+ emit_align(&prog, 16);
jg_offset = prog - jg_reloc;
emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);