[PATCH 03/13] powerpc/bpf: Update ldimm64 instructions during extra pass
Christophe Leroy
christophe.leroy at csgroup.eu
Mon Jan 10 20:27:33 AEDT 2022
Le 06/01/2022 à 12:45, Naveen N. Rao a écrit :
> These instructions are updated after the initial JIT, so redo codegen
> during the extra pass. Rename bpf_jit_fixup_subprog_calls() to clarify
> that this is more than just subprog calls.
>
> Fixes: 69c087ba6225b5 ("bpf: Add bpf_for_each_map_elem() helper")
> Cc: stable at vger.kernel.org # v5.15
> Signed-off-by: Naveen N. Rao <naveen.n.rao at linux.vnet.ibm.com>
> ---
> arch/powerpc/net/bpf_jit_comp.c | 29 +++++++++++++++++++++++------
> arch/powerpc/net/bpf_jit_comp32.c | 6 ++++++
> arch/powerpc/net/bpf_jit_comp64.c | 7 ++++++-
> 3 files changed, 35 insertions(+), 7 deletions(-)
>
> diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
> index d6ffdd0f2309d0..56dd1f4e3e4447 100644
> --- a/arch/powerpc/net/bpf_jit_comp.c
> +++ b/arch/powerpc/net/bpf_jit_comp.c
> @@ -23,15 +23,15 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
> memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
> }
>
> -/* Fix the branch target addresses for subprog calls */
> -static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
> - struct codegen_context *ctx, u32 *addrs)
> +/* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */
> +static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
> + struct codegen_context *ctx, u32 *addrs)
> {
> const struct bpf_insn *insn = fp->insnsi;
> bool func_addr_fixed;
> u64 func_addr;
> u32 tmp_idx;
> - int i, ret;
> + int i, j, ret;
>
> for (i = 0; i < fp->len; i++) {
> /*
> @@ -66,6 +66,23 @@ static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
> * of the JITed sequence remains unchanged.
> */
> ctx->idx = tmp_idx;
> + } else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) {
> + tmp_idx = ctx->idx;
> + ctx->idx = addrs[i] / 4;
> +#ifdef CONFIG_PPC32
> + PPC_LI32(ctx->b2p[insn[i].dst_reg] - 1, (u32)insn[i + 1].imm);
> + PPC_LI32(ctx->b2p[insn[i].dst_reg], (u32)insn[i].imm);
> + for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
> + EMIT(PPC_RAW_NOP());
> +#else
> + func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
> + PPC_LI64(b2p[insn[i].dst_reg], func_addr);
> + /* overwrite rest with nops */
> + for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
> + EMIT(PPC_RAW_NOP());
> +#endif
#ifdefs should be avoided as much as possible.
Here it seems we could easily do an
if (IS_ENABLED(CONFIG_PPC32)) {
} else {
}
And it looks like the CONFIG_PPC64 alternative would in fact also work
on PPC32, wouldn't it ?
> + ctx->idx = tmp_idx;
> + i++;
> }
> }
>
> @@ -200,13 +217,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
> /*
> * Do not touch the prologue and epilogue as they will remain
> * unchanged. Only fix the branch target address for subprog
> - * calls in the body.
> + * calls in the body, and ldimm64 instructions.
> *
> * This does not change the offsets and lengths of the subprog
> * call instruction sequences and hence, the size of the JITed
> * image as well.
> */
> - bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
> + bpf_jit_fixup_addresses(fp, code_base, &cgctx, addrs);
>
> /* There is no need to perform the usual passes. */
> goto skip_codegen_passes;
> diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
> index 997a47fa615b30..2258d3886d02ec 100644
> --- a/arch/powerpc/net/bpf_jit_comp32.c
> +++ b/arch/powerpc/net/bpf_jit_comp32.c
> @@ -293,6 +293,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
> bool func_addr_fixed;
> u64 func_addr;
> u32 true_cond;
> + u32 tmp_idx;
> + int j;
>
> /*
> * addrs[] maps a BPF bytecode address into a real offset from
> @@ -908,8 +910,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
> * 16 byte instruction that uses two 'struct bpf_insn'
> */
> case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
> + tmp_idx = ctx->idx;
> PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
> PPC_LI32(dst_reg, (u32)insn[i].imm);
> + /* padding to allow full 4 instructions for later patching */
> + for (j = ctx->idx - tmp_idx; j < 4; j++)
> + EMIT(PPC_RAW_NOP());
> /* Adjust for two bpf instructions */
> addrs[++i] = ctx->idx * 4;
> break;
> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
> index 472d4a551945dd..3d018ecc475b2b 100644
> --- a/arch/powerpc/net/bpf_jit_comp64.c
> +++ b/arch/powerpc/net/bpf_jit_comp64.c
> @@ -319,6 +319,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
> u64 imm64;
> u32 true_cond;
> u32 tmp_idx;
> + int j;
>
> /*
> * addrs[] maps a BPF bytecode address into a real offset from
> @@ -848,9 +849,13 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
> case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
> imm64 = ((u64)(u32) insn[i].imm) |
> (((u64)(u32) insn[i+1].imm) << 32);
> + tmp_idx = ctx->idx;
> + PPC_LI64(dst_reg, imm64);
> + /* padding to allow full 5 instructions for later patching */
> + for (j = ctx->idx - tmp_idx; j < 5; j++)
> + EMIT(PPC_RAW_NOP());
> /* Adjust for two bpf instructions */
> addrs[++i] = ctx->idx * 4;
> - PPC_LI64(dst_reg, imm64);
> break;
>
> /*
More information about the Linuxppc-dev
mailing list