summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/netronome
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-04-26 21:19:50 -0400
committerDavid S. Miller <davem@davemloft.net>2018-04-26 21:19:50 -0400
commit79741a38b4a2538a68342c45b813ecb9dd648ee8 (patch)
treebd744350673c8e3a912525b4733ab8e0ae24cdfd /drivers/net/ethernet/netronome
parentcb586c63e3fc5b227c51fd8c4cb40b34d3750645 (diff)
parentc0885f61bbb6a89c35397d3a8fe49c35822cde81 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2018-04-27 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Add extensive BPF helper description into include/uapi/linux/bpf.h and a new script bpf_helpers_doc.py which allows for generating a man page out of it. Thus, every helper in BPF now comes with proper function signature, detailed description and return code explanation, from Quentin. 2) Migrate the BPF collect metadata tunnel tests from BPF samples over to the BPF selftests and further extend them with v6 vxlan, geneve and ipip tests, simplify the ipip tests, improve documentation and convert to bpf_ntoh*() / bpf_hton*() api, from William. 3) Currently, helpers that expect ARG_PTR_TO_MAP_{KEY,VALUE} can only access stack and packet memory. Extend this to allow such helpers to also use map values, which enabled use cases where value from a first lookup can be directly used as a key for a second lookup, from Paul. 4) Add a new helper bpf_skb_get_xfrm_state() for tc BPF programs in order to retrieve XFRM state information containing SPI, peer address and reqid values, from Eyal. 5) Various optimizations in nfp driver's BPF JIT in order to turn ADD and SUB instructions with negative immediate into the opposite operation with a positive immediate such that nfp can better fit small immediates into instructions. Savings in instruction count up to 4% have been observed, from Jakub. 6) Add the BPF prog's gpl_compatible flag to struct bpf_prog_info and add support for dumping this through bpftool, from Jiri. 7) Move the BPF sockmap samples over into BPF selftests instead since sockmap was rather a series of tests than sample anyway and this way this can be run from automated bots, from John. 8) Follow-up fix for bpf_adjust_tail() helper in order to make it work with generic XDP, from Nikita. 9) Some follow-up cleanups to BTF, namely, removing unused defines from BTF uapi header and renaming 'name' struct btf_* members into name_off to make it more clear they are offsets into string section, from Martin. 10) Remove test_sock_addr from TEST_GEN_PROGS in BPF selftests since not run directly but invoked from test_sock_addr.sh, from Yonghong. 11) Remove redundant ret assignment in sample BPF loader, from Wang. 12) Add couple of missing files to BPF selftest's gitignore, from Anders. There are two trivial merge conflicts while pulling: 1) Remove samples/sockmap/Makefile since all sockmap tests have been moved to selftests. 2) Add both hunks from tools/testing/selftests/bpf/.gitignore to the file since git should ignore all of them. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/netronome')
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c231
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h6
2 files changed, 124 insertions, 113 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 29b4e5f8c102..65f0791cae0c 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1214,45 +1214,83 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
-static int
-wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- enum br_mask br_mask, bool swap)
+static const struct jmp_code_map {
+ enum br_mask br_mask;
+ bool swap;
+} jmp_code_map[] = {
+ [BPF_JGT >> 4] = { BR_BLO, true },
+ [BPF_JGE >> 4] = { BR_BHS, false },
+ [BPF_JLT >> 4] = { BR_BLO, false },
+ [BPF_JLE >> 4] = { BR_BHS, true },
+ [BPF_JSGT >> 4] = { BR_BLT, true },
+ [BPF_JSGE >> 4] = { BR_BGE, false },
+ [BPF_JSLT >> 4] = { BR_BLT, false },
+ [BPF_JSLE >> 4] = { BR_BGE, true },
+};
+
+static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta)
+{
+ unsigned int op;
+
+ op = BPF_OP(meta->insn.code) >> 4;
+ /* br_mask of 0 is BR_BEQ which we don't use in jump code table */
+ if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) ||
+ !jmp_code_map[op].br_mask,
+ "no code found for jump instruction"))
+ return NULL;
+
+ return &jmp_code_map[op];
+}
+
+static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
+ const struct jmp_code_map *code;
+ enum alu_op alu_op, carry_op;
u8 reg = insn->dst_reg * 2;
swreg tmp_reg;
+ code = nfp_jmp_code_get(meta);
+ if (!code)
+ return -EINVAL;
+
+ alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB;
+ carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C;
+
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
- if (!swap)
- emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
+ if (!code->swap)
+ emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg);
else
- emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
+ emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
- if (!swap)
+ if (!code->swap)
emit_alu(nfp_prog, reg_none(),
- reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
+ reg_a(reg + 1), carry_op, tmp_reg);
else
emit_alu(nfp_prog, reg_none(),
- tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
+ tmp_reg, carry_op, reg_a(reg + 1));
- emit_br(nfp_prog, br_mask, insn->off, 0);
+ emit_br(nfp_prog, code->br_mask, insn->off, 0);
return 0;
}
-static int
-wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- enum br_mask br_mask, bool swap)
+static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
+ const struct jmp_code_map *code;
u8 areg, breg;
+ code = nfp_jmp_code_get(meta);
+ if (!code)
+ return -EINVAL;
+
areg = insn->dst_reg * 2;
breg = insn->src_reg * 2;
- if (swap) {
+ if (code->swap) {
areg ^= breg;
breg ^= areg;
areg ^= breg;
@@ -1261,7 +1299,7 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
emit_alu(nfp_prog, reg_none(),
reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
- emit_br(nfp_prog, br_mask, insn->off, 0);
+ emit_br(nfp_prog, code->br_mask, insn->off, 0);
return 0;
}
@@ -1400,7 +1438,7 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
if (!load_lm_ptr)
return 0;
- emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
+ emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
wrp_nops(nfp_prog, 3);
return 0;
@@ -2283,46 +2321,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
-static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
-}
-
-static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
-}
-
-static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
-}
-
-static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
-}
-
-static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
-}
-
-static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
-}
-
-static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
-}
-
-static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
-}
-
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -2392,46 +2390,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
-static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
-}
-
-static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
-}
-
-static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
-}
-
-static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
-}
-
-static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
-}
-
-static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
-}
-
-static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
-}
-
-static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
-}
-
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
@@ -2520,25 +2478,25 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ST | BPF_MEM | BPF_DW] = mem_st8,
[BPF_JMP | BPF_JA | BPF_K] = jump,
[BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
- [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
- [BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
- [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm,
- [BPF_JMP | BPF_JLE | BPF_K] = jle_imm,
- [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm,
- [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm,
- [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm,
- [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm,
+ [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm,
[BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
[BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
[BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
- [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg,
- [BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
- [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg,
- [BPF_JMP | BPF_JLE | BPF_X] = jle_reg,
- [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg,
- [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg,
- [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg,
- [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg,
+ [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP | BPF_CALL] = call,
@@ -2777,6 +2735,54 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
}
}
+/* abs(insn.imm) will fit better into unrestricted reg immediate -
+ * convert add/sub of a negative number into a sub/add of a positive one.
+ */
+static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ struct bpf_insn insn = meta->insn;
+
+ if (meta->skip)
+ continue;
+
+ if (BPF_CLASS(insn.code) != BPF_ALU &&
+ BPF_CLASS(insn.code) != BPF_ALU64 &&
+ BPF_CLASS(insn.code) != BPF_JMP)
+ continue;
+ if (BPF_SRC(insn.code) != BPF_K)
+ continue;
+ if (insn.imm >= 0)
+ continue;
+
+ if (BPF_CLASS(insn.code) == BPF_JMP) {
+ switch (BPF_OP(insn.code)) {
+ case BPF_JGE:
+ case BPF_JSGE:
+ case BPF_JLT:
+ case BPF_JSLT:
+ meta->jump_neg_op = true;
+ break;
+ default:
+ continue;
+ }
+ } else {
+ if (BPF_OP(insn.code) == BPF_ADD)
+ insn.code = BPF_CLASS(insn.code) | BPF_SUB;
+ else if (BPF_OP(insn.code) == BPF_SUB)
+ insn.code = BPF_CLASS(insn.code) | BPF_ADD;
+ else
+ continue;
+
+ meta->insn.code = insn.code | BPF_K;
+ }
+
+ meta->insn.imm = -insn.imm;
+ }
+}
+
/* Remove masking after load since our load guarantees this is not needed */
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
@@ -3212,6 +3218,7 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{
nfp_bpf_opt_reg_init(nfp_prog);
+ nfp_bpf_opt_neg_add_sub(nfp_prog);
nfp_bpf_opt_ld_mask(nfp_prog);
nfp_bpf_opt_ld_shift(nfp_prog);
nfp_bpf_opt_ldst_gather(nfp_prog);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 4981c8944ca3..68b5d326483d 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -236,6 +236,7 @@ struct nfp_bpf_reg_state {
* @xadd_over_16bit: 16bit immediate is not guaranteed
* @xadd_maybe_16bit: 16bit immediate is possible
* @jmp_dst: destination info for jump instructions
+ * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
@@ -264,7 +265,10 @@ struct nfp_insn_meta {
bool xadd_maybe_16bit;
};
/* jump */
- struct nfp_insn_meta *jmp_dst;
+ struct {
+ struct nfp_insn_meta *jmp_dst;
+ bool jump_neg_op;
+ };
/* function calls */
struct {
u32 func_id;