diff options
author | Raghuram Subramani <raghus2247@gmail.com> | 2024-10-17 17:33:46 +0530 |
---|---|---|
committer | Raghuram Subramani <raghus2247@gmail.com> | 2024-10-17 17:33:46 +0530 |
commit | 321337c9e82f016a0cd64f81573c18b5731ffa8d (patch) | |
tree | e9874bb042e851fec1e19bb8dfca694ef885456a /arch/arm64/net/bpf_jit_comp.c | |
parent | cc57cb4ee3b7918b74d30604735d353b9a5fa23b (diff) |
Merge remote-tracking branch 'msm8998/lineage-20' into lineage-20
Change-Id: I126075a330f305c85f8fe1b8c9d408f368be95d1
Diffstat (limited to 'arch/arm64/net/bpf_jit_comp.c')
-rw-r--r-- | arch/arm64/net/bpf_jit_comp.c | 63 |
1 files changed, 48 insertions, 15 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 6da00264d1fb..4cb127b2f10e 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -33,6 +33,7 @@ #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) #define TCALL_CNT (MAX_BPF_JIT_REG + 2) +#define TMP_REG_3 (MAX_BPF_JIT_REG + 3) /* Map BPF registers to A64 registers */ static const int bpf2a64[] = { @@ -54,6 +55,7 @@ static const int bpf2a64[] = { /* temporary registers for internal BPF JIT */ [TMP_REG_1] = A64_R(10), [TMP_REG_2] = A64_R(11), + [TMP_REG_3] = A64_R(12), /* tail_call_cnt */ [TCALL_CNT] = A64_R(26), /* temporary register for blinding constants */ @@ -65,7 +67,7 @@ struct jit_ctx { int idx; int epilogue_offset; int *offset; - u32 *image; + __le32 *image; }; static inline void emit(const u32 insn, struct jit_ctx *ctx) @@ -125,7 +127,7 @@ static inline int bpf2a64_offset(int bpf_to, int bpf_from, static void jit_fill_hole(void *area, unsigned int size) { - u32 *ptr; + __le32 *ptr; /* We are guaranteed to have aligned memory. */ for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT); @@ -317,11 +319,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const u8 src = bpf2a64[insn->src_reg]; const u8 tmp = bpf2a64[TMP_REG_1]; const u8 tmp2 = bpf2a64[TMP_REG_2]; + const u8 tmp3 = bpf2a64[TMP_REG_3]; const s16 off = insn->off; const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; const bool is64 = BPF_CLASS(code) == BPF_ALU64; - u8 jmp_cond; + const bool isdw = BPF_SIZE(code) == BPF_DW; + u8 jmp_cond, reg; s32 jmp_offset; #define check_imm(bits, imm) do { \ @@ -521,10 +525,14 @@ emit_bswap_uxt: /* IF (dst COND src) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: emit(A64_CMP(1, dst, src), ctx); emit_cond_jmp: jmp_offset = bpf2a64_offset(i + off, i, ctx); @@ -536,9 +544,15 @@ emit_cond_jmp: case BPF_JGT: jmp_cond = A64_COND_HI; break; + case BPF_JLT: + jmp_cond = A64_COND_CC; + break; case BPF_JGE: jmp_cond = A64_COND_CS; break; + case BPF_JLE: + jmp_cond = A64_COND_LS; + break; case BPF_JSET: case BPF_JNE: jmp_cond = A64_COND_NE; @@ -546,9 +560,15 @@ emit_cond_jmp: case BPF_JSGT: jmp_cond = A64_COND_GT; break; + case BPF_JSLT: + jmp_cond = A64_COND_LT; + break; case BPF_JSGE: jmp_cond = A64_COND_GE; break; + case BPF_JSLE: + jmp_cond = A64_COND_LE; + break; default: return -EFAULT; } @@ -560,10 +580,14 @@ emit_cond_jmp: /* IF (dst COND imm) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: emit_a64_mov_i(1, tmp, imm, ctx); emit(A64_CMP(1, dst, tmp), ctx); goto emit_cond_jmp; @@ -583,7 +607,7 @@ emit_cond_jmp: break; } /* tail call */ - case BPF_JMP | BPF_CALL | BPF_X: + case BPF_JMP | BPF_TAIL_CALL: if (emit_bpf_tail_call(ctx)) return -EFAULT; break; @@ -686,11 +710,29 @@ emit_cond_jmp: break; } break; + /* STX XADD: lock *(u32 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_W: /* STX XADD: lock *(u64 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_DW: - goto notyet; + if (!off) { + reg = dst; + } else { + emit_a64_mov_i(1, tmp, off, ctx); + emit(A64_ADD(1, tmp, tmp, dst), ctx); + reg = tmp; + } + if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) { + emit(A64_STADD(isdw, reg, src), ctx); + } else { + emit(A64_LDXR(isdw, tmp2, reg), ctx); + emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); + emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx); + jmp_offset = -3; + check_imm19(jmp_offset); + emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); + } + break; /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ case BPF_LD | BPF_ABS | BPF_W: @@ -757,10 +799,6 @@ emit_cond_jmp: } break; } -notyet: - pr_info_once("*** NOT YET: opcode %02x ***\n", code); - return -EFAULT; - default: pr_err_once("unknown opcode %02x\n", code); return -EINVAL; @@ -813,11 +851,6 @@ static inline void bpf_flush_icache(void *start, void *end) flush_icache_range((unsigned long)start, (unsigned long)end); } -void bpf_jit_compile(struct bpf_prog *prog) -{ - /* Nothing to do here. We support Internal BPF. */ -} - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_prog *tmp, *orig_prog = prog; @@ -877,7 +910,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* 2. Now, the actual pass. */ - ctx.image = (u32 *)image_ptr; + ctx.image = (__le32 *)image_ptr; ctx.idx = 0; build_prologue(&ctx); |