aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@kernel.org>2020-10-15 14:54:04 +0200
committerJiri Olsa <jolsa@kernel.org>2020-10-18 18:34:10 +0200
commit74674c0d67be89d274546c46f8715315099c2b9d (patch)
tree3f05b86102cb70ce86619ba0cd3dcf3efeb55748
parente8b5c28a605cb34f63d756f59f964b0c5accc057 (diff)
downloadperf-74674c0d67be89d274546c46f8715315099c2b9d.tar.gz
bpf: Add BPF_TRAMPOLINE_BATCH_ATTACH support
-rw-r--r--include/linux/bpf.h15
-rw-r--r--include/uapi/linux/bpf.h7
-rw-r--r--kernel/bpf/syscall.c87
-rw-r--r--kernel/bpf/trampoline.c69
4 files changed, 163 insertions, 15 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 2b16bf48aab61..d28c7ac3af3f4 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -583,6 +583,13 @@ enum bpf_tramp_prog_type {
BPF_TRAMP_REPLACE, /* more than MAX */
};
+struct bpf_trampoline_batch {
+ int count;
+ int idx;
+ unsigned long *ips;
+ unsigned long *addrs;
+};
+
struct bpf_trampoline {
/* hlist for trampoline_table */
struct hlist_node hlist;
@@ -644,11 +651,14 @@ static __always_inline unsigned int bpf_dispatcher_nop_func(
return bpf_func(ctx, insnsi);
}
#ifdef CONFIG_BPF_JIT
-int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
+int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr,
+ struct bpf_trampoline_batch *batch);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
+struct bpf_trampoline_batch *bpf_trampoline_batch_alloc(int count);
+void bpf_trampoline_batch_free(struct bpf_trampoline_batch *batch);
#define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
.func = &_name##_func, \
@@ -693,7 +703,8 @@ void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
#else
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
- struct bpf_trampoline *tr)
+ struct bpf_trampoline *tr,
+ struct bpf_trampoline_batch *batch)
{
return -ENOTSUPP;
}
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index bf5a99d803e48..04df4d576fd43 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -125,6 +125,7 @@ enum bpf_cmd {
BPF_ITER_CREATE,
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
+ BPF_TRAMPOLINE_BATCH_ATTACH,
};
enum bpf_map_type {
@@ -631,6 +632,12 @@ union bpf_attr {
__u32 prog_fd;
} raw_tracepoint;
+ struct { /* anonymous struct used by BPF_TRAMPOLINE_BATCH_ATTACH */
+ __aligned_u64 in;
+ __aligned_u64 out;
+ __u32 count;
+ } trampoline_batch;
+
struct { /* anonymous struct for BPF_BTF_LOAD */
__aligned_u64 btf;
__aligned_u64 btf_log_buf;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 61ef29f9177d2..758d211a05672 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2553,7 +2553,8 @@ static const struct bpf_link_ops bpf_tracing_link_lops = {
static int bpf_tracing_prog_attach(struct bpf_prog *prog,
int tgt_prog_fd,
- u32 btf_id)
+ u32 btf_id,
+ struct bpf_trampoline_batch *batch)
{
struct bpf_link_primer link_primer;
struct bpf_prog *tgt_prog = NULL;
@@ -2678,7 +2679,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
if (err)
goto out_unlock;
- err = bpf_trampoline_link_prog(prog, tr);
+ err = bpf_trampoline_link_prog(prog, tr, batch);
if (err) {
bpf_link_cleanup(&link_primer);
link = NULL;
@@ -2826,7 +2827,7 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
tp_name = prog->aux->attach_func_name;
break;
}
- return bpf_tracing_prog_attach(prog, 0, 0);
+ return bpf_tracing_prog_attach(prog, 0, 0, NULL);
case BPF_PROG_TYPE_RAW_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
if (strncpy_from_user(buf,
@@ -2879,6 +2880,80 @@ out_put_prog:
return err;
}
+#define BPF_RAW_TRACEPOINT_OPEN_BATCH_LAST_FIELD trampoline_batch.count
+
+static int bpf_trampoline_batch(const union bpf_attr *attr, int cmd)
+{
+ void __user *uout = u64_to_user_ptr(attr->trampoline_batch.out);
+ void __user *uin = u64_to_user_ptr(attr->trampoline_batch.in);
+ struct bpf_trampoline_batch *batch = NULL;
+ struct bpf_prog *prog;
+ int count, ret, i, fd;
+ u32 *in, *out;
+
+ if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN_BATCH))
+ return -EINVAL;
+
+ if (!uin || !uout)
+ return -EINVAL;
+
+ count = attr->trampoline_batch.count;
+
+ in = kcalloc(count, sizeof(u32), GFP_KERNEL);
+ out = kcalloc(count, sizeof(u32), GFP_KERNEL);
+ if (!in || !out) {
+ kfree(in);
+ kfree(out);
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(in, uin, count * sizeof(u32));
+ if (ret)
+ goto out_clean;
+
+ /* test read out array */
+ ret = copy_to_user(uout, out, count * sizeof(u32));
+ if (ret)
+ goto out_clean;
+
+ batch = bpf_trampoline_batch_alloc(count);
+ if (!batch)
+ goto out_clean;
+
+ for (i = 0; i < count; i++) {
+ if (cmd == BPF_TRAMPOLINE_BATCH_ATTACH) {
+ prog = bpf_prog_get(in[i]);
+ if (IS_ERR(prog)) {
+ ret = PTR_ERR(prog);
+ goto out_clean;
+ }
+
+ ret = -EINVAL;
+ if (prog->type != BPF_PROG_TYPE_TRACING)
+ goto out_clean;
+ if (prog->type == BPF_PROG_TYPE_TRACING &&
+ prog->expected_attach_type == BPF_TRACE_RAW_TP)
+ goto out_clean;
+
+ fd = bpf_tracing_prog_attach(prog, 0, 0, batch);
+ if (fd < 0)
+ goto out_clean;
+
+ out[i] = fd;
+ }
+ }
+
+ ret = register_ftrace_direct_ips(batch->ips, batch->addrs, batch->idx);
+ if (!ret)
+ WARN_ON_ONCE(copy_to_user(uout, out, count * sizeof(u32)));
+
+out_clean:
+ bpf_trampoline_batch_free(batch);
+ kfree(in);
+ kfree(out);
+ return ret;
+}
+
static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
enum bpf_attach_type attach_type)
{
@@ -4018,7 +4093,8 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *
else if (prog->type == BPF_PROG_TYPE_EXT)
return bpf_tracing_prog_attach(prog,
attr->link_create.target_fd,
- attr->link_create.target_btf_id);
+ attr->link_create.target_btf_id,
+ NULL);
return -EINVAL;
}
@@ -4437,6 +4513,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_RAW_TRACEPOINT_OPEN:
err = bpf_raw_tracepoint_open(&attr);
break;
+ case BPF_TRAMPOLINE_BATCH_ATTACH:
+ err = bpf_trampoline_batch(&attr, cmd);
+ break;
case BPF_BTF_LOAD:
err = bpf_btf_load(&attr);
break;
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 35c5887d82ffe..ba2ba28a56c96 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -107,6 +107,51 @@ out:
return tr;
}
+static int bpf_trampoline_batch_add(struct bpf_trampoline_batch *batch,
+ unsigned long ip, unsigned long addr)
+{
+ int idx = batch->idx;
+
+ if (idx >= batch->count)
+ return -EINVAL;
+
+ batch->ips[idx] = ip;
+ batch->addrs[idx] = addr;
+ batch->idx++;
+ return 0;
+}
+
+struct bpf_trampoline_batch *bpf_trampoline_batch_alloc(int count)
+{
+ struct bpf_trampoline_batch *batch;
+
+ batch = kmalloc(sizeof(*batch), GFP_KERNEL);
+ if (!batch)
+ return NULL;;
+
+ batch->ips = kcalloc(count, sizeof(batch->ips[0]), GFP_KERNEL);
+ batch->addrs = kcalloc(count, sizeof(batch->addrs[0]), GFP_KERNEL);
+ if (!batch->ips || !batch->addrs) {
+ kfree(batch->ips);
+ kfree(batch->addrs);
+ kfree(batch);
+ return NULL;
+ }
+
+ batch->count = count;
+ batch->idx = 0;
+ return batch;
+}
+
+void bpf_trampoline_batch_free(struct bpf_trampoline_batch *batch)
+{
+ if (!batch)
+ return;
+ kfree(batch->ips);
+ kfree(batch->addrs);
+ kfree(batch);
+}
+
static int is_ftrace_location(void *ip)
{
long addr;
@@ -144,7 +189,8 @@ static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad
}
/* first time registering */
-static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
+static int register_fentry(struct bpf_trampoline *tr, void *new_addr,
+ struct bpf_trampoline_batch *batch)
{
void *ip = tr->func.addr;
int ret;
@@ -154,9 +200,12 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
return ret;
tr->func.ftrace_managed = ret;
- if (tr->func.ftrace_managed)
- ret = register_ftrace_direct((long)ip, (long)new_addr);
- else
+ if (tr->func.ftrace_managed) {
+ if (batch)
+ ret = bpf_trampoline_batch_add(batch, (long)ip, (long)new_addr);
+ else
+ ret = register_ftrace_direct((long)ip, (long)new_addr);
+ } else
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
return ret;
}
@@ -185,7 +234,8 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
return tprogs;
}
-static int bpf_trampoline_update(struct bpf_trampoline *tr)
+static int bpf_trampoline_update(struct bpf_trampoline *tr,
+ struct bpf_trampoline_batch *batch)
{
void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2;
void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2;
@@ -230,7 +280,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
err = modify_fentry(tr, old_image, new_image);
else
/* first time registering */
- err = register_fentry(tr, new_image);
+ err = register_fentry(tr, new_image, batch);
if (err)
goto out;
tr->selector++;
@@ -261,7 +311,8 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
}
}
-int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
+int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr,
+ struct bpf_trampoline_batch *batch)
{
enum bpf_tramp_prog_type kind;
int err = 0;
@@ -299,7 +350,7 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
}
hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
tr->progs_cnt[kind]++;
- err = bpf_trampoline_update(tr);
+ err = bpf_trampoline_update(tr, batch);
if (err) {
hlist_del(&prog->aux->tramp_hlist);
tr->progs_cnt[kind]--;
@@ -326,7 +377,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
}
hlist_del(&prog->aux->tramp_hlist);
tr->progs_cnt[kind]--;
- err = bpf_trampoline_update(tr);
+ err = bpf_trampoline_update(tr, NULL);
out:
mutex_unlock(&tr->mutex);
return err;