diff options
author | Pu Lehui | 2023-02-15 21:52:04 +0800 |
---|---|---|
committer | Daniel Borkmann | 2023-02-17 21:45:30 +0100 |
commit | 596f2e6f9cf41436a5512a3f278c86da5c5598fb (patch) | |
tree | 379ec18fcd887274967b6b0d31c1ab3941b412c5 /arch/riscv | |
parent | 0fd1fd0104954380477353aea29c347e85dff16d (diff) |
riscv, bpf: Add bpf_arch_text_poke support for RV64
Implement bpf_arch_text_poke for RV64. For call scenario, to make BPF
trampoline compatible with the kernel and BPF context, we follow the
framework of RV64 ftrace to reserve 4 nops for BPF programs as function
entry, and use auipc+jalr instructions for function call. However, since
auipc+jalr call instruction is non-atomic operation, we need to use
stop-machine to make sure instructions patching in atomic context. Also,
we use auipc+jalr pair and need to patch in stop-machine context for
jump scenario.
Signed-off-by: Pu Lehui <pulehui@huawei.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Tested-by: Björn Töpel <bjorn@rivosinc.com>
Acked-by: Björn Töpel <bjorn@rivosinc.com>
Link: https://lore.kernel.org/bpf/20230215135205.1411105-4-pulehui@huaweicloud.com
Diffstat (limited to 'arch/riscv')
-rw-r--r-- | arch/riscv/net/bpf_jit.h | 5 | ||||
-rw-r--r-- | arch/riscv/net/bpf_jit_comp64.c | 88 |
2 files changed, 91 insertions, 2 deletions
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h index d926e0f7ef57..bf9802a63061 100644 --- a/arch/riscv/net/bpf_jit.h +++ b/arch/riscv/net/bpf_jit.h @@ -573,6 +573,11 @@ static inline u32 rv_fence(u8 pred, u8 succ) return rv_i_insn(imm11_0, 0, 0, 0, 0xf); } +static inline u32 rv_nop(void) +{ + return rv_i_insn(0, 0, 0, 0, 0x13); +} + /* RVC instrutions. */ static inline u16 rvc_addi4spn(u8 rd, u32 imm10) diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 69ebab81d935..b6b9bbcc977a 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -8,6 +8,8 @@ #include <linux/bitfield.h> #include <linux/bpf.h> #include <linux/filter.h> +#include <linux/memory.h> +#include <linux/stop_machine.h> #include "bpf_jit.h" #define RV_REG_TCC RV_REG_A6 @@ -238,7 +240,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx) if (!is_tail_call) emit_mv(RV_REG_A0, RV_REG_A5, ctx); emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA, - is_tail_call ? 4 : 0, /* skip TCC init */ + is_tail_call ? 20 : 0, /* skip reserved nops and TCC init */ ctx); } @@ -615,6 +617,84 @@ static int add_exception_handler(const struct bpf_insn *insn, return 0; } +static int gen_call_or_nops(void *target, void *ip, u32 *insns) +{ + s64 rvoff; + int i, ret; + struct rv_jit_context ctx; + + ctx.ninsns = 0; + ctx.insns = (u16 *)insns; + + if (!target) { + for (i = 0; i < 4; i++) + emit(rv_nop(), &ctx); + return 0; + } + + rvoff = (s64)(target - (ip + 4)); + emit(rv_sd(RV_REG_SP, -8, RV_REG_RA), &ctx); + ret = emit_jump_and_link(RV_REG_RA, rvoff, false, &ctx); + if (ret) + return ret; + emit(rv_ld(RV_REG_RA, -8, RV_REG_SP), &ctx); + + return 0; +} + +static int gen_jump_or_nops(void *target, void *ip, u32 *insns) +{ + s64 rvoff; + struct rv_jit_context ctx; + + ctx.ninsns = 0; + ctx.insns = (u16 *)insns; + + if (!target) { + emit(rv_nop(), &ctx); + emit(rv_nop(), &ctx); + return 0; + } + + rvoff = (s64)(target - ip); + return emit_jump_and_link(RV_REG_ZERO, rvoff, false, &ctx); +} + +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, + void *old_addr, void *new_addr) +{ + u32 old_insns[4], new_insns[4]; + bool is_call = poke_type == BPF_MOD_CALL; + int (*gen_insns)(void *target, void *ip, u32 *insns); + int ninsns = is_call ? 4 : 2; + int ret; + + if (!is_bpf_text_address((unsigned long)ip)) + return -ENOTSUPP; + + gen_insns = is_call ? gen_call_or_nops : gen_jump_or_nops; + + ret = gen_insns(old_addr, ip, old_insns); + if (ret) + return ret; + + if (memcmp(ip, old_insns, ninsns * 4)) + return -EFAULT; + + ret = gen_insns(new_addr, ip, new_insns); + if (ret) + return ret; + + cpus_read_lock(); + mutex_lock(&text_mutex); + if (memcmp(ip, new_insns, ninsns * 4)) + ret = patch_text(ip, new_insns, ninsns); + mutex_unlock(&text_mutex); + cpus_read_unlock(); + + return ret; +} + int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, bool extra_pass) { @@ -1266,7 +1346,7 @@ out_be: void bpf_jit_build_prologue(struct rv_jit_context *ctx) { - int stack_adjust = 0, store_offset, bpf_stack_adjust; + int i, stack_adjust = 0, store_offset, bpf_stack_adjust; bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); if (bpf_stack_adjust) @@ -1293,6 +1373,10 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx) store_offset = stack_adjust - 8; + /* reserve 4 nop insns */ + for (i = 0; i < 4; i++) + emit(rv_nop(), ctx); + /* First instruction is always setting the tail-call-counter * (TCC) register. This instruction is skipped for tail calls. * Force using a 4-byte (non-compressed) instruction. |