diff options
| author | Jiri Olsa <jolsa@kernel.org> | 2022-01-20 15:17:43 +0100 |
|---|---|---|
| committer | Jiri Olsa <jolsa@kernel.org> | 2022-02-12 21:35:06 +0100 |
| commit | 6944d13cf0af8628c0b80eb90359d90cbba0d69c (patch) | |
| tree | 5c77acfe3557727630588f2dd8b47435387b3401 | |
| parent | 32adbf1760d4cccb9702863371d50b625ae7ee1d (diff) | |
| download | perf-6944d13cf0af8628c0b80eb90359d90cbba0d69c.tar.gz | |
bpf: Add bpf_get_func_ip kprobe helper for fprobe link
Adding support to call get_func_ip_fprobe helper from kprobe
programs attached by fprobe link.
Also adding support to inline it, because it's single load
instruction.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
| -rw-r--r-- | kernel/bpf/verifier.c | 21 | ||||
| -rw-r--r-- | kernel/trace/bpf_trace.c | 16 |
2 files changed, 35 insertions, 2 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bbef86cb4e722..16022cd53fb44 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13626,7 +13626,7 @@ patch_map_ops_generic: continue; } - /* Implement bpf_get_func_ip inline. */ + /* Implement tracing bpf_get_func_ip inline. */ if (prog_type == BPF_PROG_TYPE_TRACING && insn->imm == BPF_FUNC_get_func_ip) { /* Load IP address from ctx - 16 */ @@ -13641,6 +13641,25 @@ patch_map_ops_generic: continue; } +#ifdef CONFIG_X86 + /* Implement kprobe/fprobe bpf_get_func_ip inline. */ + if (prog_type == BPF_PROG_TYPE_KPROBE && + eatype == BPF_TRACE_FPROBE && + insn->imm == BPF_FUNC_get_func_ip) { + /* Load IP address from ctx (struct pt_regs) ip */ + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct pt_regs, ip)); + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); + if (!new_prog) + return -ENOMEM; + + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + continue; + } +#endif + patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0c68306350d68..e5cee68eec176 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1037,6 +1037,19 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_func_ip_fprobe, struct pt_regs *, regs) +{ + /* This helper call is inlined by verifier. */ + return regs->ip; +} + +static const struct bpf_func_proto bpf_get_func_ip_proto_fprobe = { + .func = bpf_get_func_ip_fprobe, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) { struct bpf_trace_run_ctx *run_ctx; @@ -1280,7 +1293,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_override_return_proto; #endif case BPF_FUNC_get_func_ip: - return &bpf_get_func_ip_proto_kprobe; + return prog->expected_attach_type == BPF_TRACE_FPROBE ? + &bpf_get_func_ip_proto_fprobe : &bpf_get_func_ip_proto_kprobe; case BPF_FUNC_get_attach_cookie: return &bpf_get_attach_cookie_proto_trace; default: |
