summaryrefslogtreecommitdiff
path: root/kernel/bpf/core.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-11-28 22:10:54 -0800
committerDavid S. Miller <davem@davemloft.net>2018-11-28 22:10:54 -0800
commite561bb29b650d2817d10a4858f1817836ed08399 (patch)
tree0bc92b5bb8a287a8e4a88732f3c64b56d126da58 /kernel/bpf/core.c
parent62e3a931788223048120357ab3f29dcb55c5ef79 (diff)
parent60b548237fed4b4164bab13c994dd9615f6c4323 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Trivial conflict in net/core/filter.c, a locally computed 'sdif' is now an argument to the function. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/core.c')
-rw-r--r--kernel/bpf/core.c34
1 files changed, 34 insertions, 0 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 16d77012ad3e..539e8575689f 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -685,6 +685,40 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
+int bpf_jit_get_func_addr(const struct bpf_prog *prog,
+ const struct bpf_insn *insn, bool extra_pass,
+ u64 *func_addr, bool *func_addr_fixed)
+{
+ s16 off = insn->off;
+ s32 imm = insn->imm;
+ u8 *addr;
+
+ *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
+ if (!*func_addr_fixed) {
+ /* Place-holder address till the last pass has collected
+ * all addresses for JITed subprograms in which case we
+ * can pick them up from prog->aux.
+ */
+ if (!extra_pass)
+ addr = NULL;
+ else if (prog->aux->func &&
+ off >= 0 && off < prog->aux->func_cnt)
+ addr = (u8 *)prog->aux->func[off]->bpf_func;
+ else
+ return -EINVAL;
+ } else {
+ /* Address of a BPF helper call. Since part of the core
+ * kernel, it's always at a fixed location. __bpf_call_base
+ * and the helper with imm relative to it are both in core
+ * kernel.
+ */
+ addr = (u8 *)__bpf_call_base + imm;
+ }
+
+ *func_addr = (unsigned long)addr;
+ return 0;
+}
+
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
struct bpf_insn *to_buff)