diff options
Diffstat (limited to 'arch/riscv/net/bpf_jit_comp64.c')
-rw-r--r-- | arch/riscv/net/bpf_jit_comp64.c | 64 |
1 files changed, 44 insertions, 20 deletions
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index d208a9fd6c52..6cfd164cbe88 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -515,7 +515,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU64 | BPF_LSH | BPF_X: emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_RSH | BPF_X: @@ -542,13 +542,21 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, /* dst = BSWAP##imm(dst) */ case BPF_ALU | BPF_END | BPF_FROM_LE: - { - int shift = 64 - imm; - - emit(rv_slli(rd, rd, shift), ctx); - emit(rv_srli(rd, rd, shift), ctx); + switch (imm) { + case 16: + emit(rv_slli(rd, rd, 48), ctx); + emit(rv_srli(rd, rd, 48), ctx); + break; + case 32: + if (!aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case 64: + /* Do nothing */ + break; + } break; - } + case BPF_ALU | BPF_END | BPF_FROM_BE: emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx); @@ -692,19 +700,19 @@ out_be: case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU64 | BPF_LSH | BPF_K: emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_RSH | BPF_K: case BPF_ALU64 | BPF_RSH | BPF_K: emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_ARSH | BPF_K: case BPF_ALU64 | BPF_ARSH | BPF_K: emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx); - if (!is64) + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; @@ -784,11 +792,15 @@ out_be: case BPF_JMP32 | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP32 | BPF_JSLE | BPF_K: - case BPF_JMP | BPF_JSET | BPF_K: - case BPF_JMP32 | BPF_JSET | BPF_K: rvoff = rv_offset(i, off, ctx); s = ctx->ninsns; - emit_imm(RV_REG_T1, imm, ctx); + if (imm) { + emit_imm(RV_REG_T1, imm, ctx); + rs = RV_REG_T1; + } else { + /* If imm is 0, simply use zero register. */ + rs = RV_REG_ZERO; + } if (!is64) { if (is_signed_bpf_cond(BPF_OP(code))) emit_sext_32_rd(&rd, ctx); @@ -799,16 +811,28 @@ out_be: /* Adjust for extra insns */ rvoff -= (e - s) << 2; + emit_branch(BPF_OP(code), rd, rs, rvoff, ctx); + break; - if (BPF_OP(code) == BPF_JSET) { - /* Adjust for and */ - rvoff -= 4; - emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx); - emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, - ctx); + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + rvoff = rv_offset(i, off, ctx); + s = ctx->ninsns; + if (is_12b_int(imm)) { + emit(rv_andi(RV_REG_T1, rd, imm), ctx); } else { - emit_branch(BPF_OP(code), rd, RV_REG_T1, rvoff, ctx); + emit_imm(RV_REG_T1, imm, ctx); + emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx); } + /* For jset32, we should clear the upper 32 bits of t1, but + * sign-extension is sufficient here and saves one instruction, + * as t1 is used only in comparison against zero. + */ + if (!is64 && imm < 0) + emit(rv_addiw(RV_REG_T1, RV_REG_T1, 0), ctx); + e = ctx->ninsns; + rvoff -= (e - s) << 2; + emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx); break; /* function call */ |