mirror of
https://github.com/torvalds/linux.git
synced 2025-04-09 14:45:27 +00:00
libbpf: Split bpf object load into prepare/load
Introduce bpf_object__prepare API: additional intermediate preparation step that performs ELF processing, relocations, prepares final state of BPF program instructions (accessible with bpf_program__insns()), creates and (potentially) pins maps, and stops short of loading BPF programs. We anticipate few use cases for this API, such as: * Use prepare to initialize bpf_token, without loading freplace programs, unlocking possibility to lookup BTF of other programs. * Execute prepare to obtain finalized BPF program instructions without loading programs, enabling tools like veristat to process one program at a time, without incurring cost of ELF parsing and processing. Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20250303135752.158343-4-mykyta.yatsenko5@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
9a9e347835
commit
1315c28ed8
@ -7901,13 +7901,6 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
|
||||
size_t i;
|
||||
int err;
|
||||
|
||||
for (i = 0; i < obj->nr_programs; i++) {
|
||||
prog = &obj->programs[i];
|
||||
err = bpf_object__sanitize_prog(obj, prog);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
for (i = 0; i < obj->nr_programs; i++) {
|
||||
prog = &obj->programs[i];
|
||||
if (prog_is_subprog(obj, prog))
|
||||
@ -7933,6 +7926,21 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_object_prepare_progs(struct bpf_object *obj)
|
||||
{
|
||||
struct bpf_program *prog;
|
||||
size_t i;
|
||||
int err;
|
||||
|
||||
for (i = 0; i < obj->nr_programs; i++) {
|
||||
prog = &obj->programs[i];
|
||||
err = bpf_object__sanitize_prog(obj, prog);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct bpf_sec_def *find_sec_def(const char *sec_name);
|
||||
|
||||
static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
|
||||
@ -8549,9 +8557,72 @@ static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_object_unpin(struct bpf_object *obj)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* unpin any maps that were auto-pinned during load */
|
||||
for (i = 0; i < obj->nr_maps; i++)
|
||||
if (obj->maps[i].pinned && !obj->maps[i].reused)
|
||||
bpf_map__unpin(&obj->maps[i], NULL);
|
||||
}
|
||||
|
||||
static void bpf_object_post_load_cleanup(struct bpf_object *obj)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* clean up fd_array */
|
||||
zfree(&obj->fd_array);
|
||||
|
||||
/* clean up module BTFs */
|
||||
for (i = 0; i < obj->btf_module_cnt; i++) {
|
||||
close(obj->btf_modules[i].fd);
|
||||
btf__free(obj->btf_modules[i].btf);
|
||||
free(obj->btf_modules[i].name);
|
||||
}
|
||||
obj->btf_module_cnt = 0;
|
||||
zfree(&obj->btf_modules);
|
||||
|
||||
/* clean up vmlinux BTF */
|
||||
btf__free(obj->btf_vmlinux);
|
||||
obj->btf_vmlinux = NULL;
|
||||
}
|
||||
|
||||
static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (obj->state >= OBJ_PREPARED) {
|
||||
pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = bpf_object_prepare_token(obj);
|
||||
err = err ? : bpf_object__probe_loading(obj);
|
||||
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
|
||||
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
|
||||
err = err ? : bpf_object__sanitize_maps(obj);
|
||||
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
|
||||
err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
|
||||
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
|
||||
err = err ? : bpf_object__sanitize_and_load_btf(obj);
|
||||
err = err ? : bpf_object__create_maps(obj);
|
||||
err = err ? : bpf_object_prepare_progs(obj);
|
||||
|
||||
if (err) {
|
||||
bpf_object_unpin(obj);
|
||||
bpf_object_unload(obj);
|
||||
obj->state = OBJ_LOADED;
|
||||
return err;
|
||||
}
|
||||
|
||||
obj->state = OBJ_PREPARED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
|
||||
{
|
||||
int err, i;
|
||||
int err;
|
||||
|
||||
if (!obj)
|
||||
return libbpf_err(-EINVAL);
|
||||
@ -8571,17 +8642,12 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
|
||||
return libbpf_err(-LIBBPF_ERRNO__ENDIAN);
|
||||
}
|
||||
|
||||
err = bpf_object_prepare_token(obj);
|
||||
err = err ? : bpf_object__probe_loading(obj);
|
||||
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
|
||||
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
|
||||
err = err ? : bpf_object__sanitize_maps(obj);
|
||||
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
|
||||
err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
|
||||
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
|
||||
err = err ? : bpf_object__sanitize_and_load_btf(obj);
|
||||
err = err ? : bpf_object__create_maps(obj);
|
||||
err = err ? : bpf_object__load_progs(obj, extra_log_level);
|
||||
if (obj->state < OBJ_PREPARED) {
|
||||
err = bpf_object_prepare(obj, target_btf_path);
|
||||
if (err)
|
||||
return libbpf_err(err);
|
||||
}
|
||||
err = bpf_object__load_progs(obj, extra_log_level);
|
||||
err = err ? : bpf_object_init_prog_arrays(obj);
|
||||
err = err ? : bpf_object_prepare_struct_ops(obj);
|
||||
|
||||
@ -8593,35 +8659,22 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
|
||||
err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
|
||||
}
|
||||
|
||||
/* clean up fd_array */
|
||||
zfree(&obj->fd_array);
|
||||
|
||||
/* clean up module BTFs */
|
||||
for (i = 0; i < obj->btf_module_cnt; i++) {
|
||||
close(obj->btf_modules[i].fd);
|
||||
btf__free(obj->btf_modules[i].btf);
|
||||
free(obj->btf_modules[i].name);
|
||||
}
|
||||
free(obj->btf_modules);
|
||||
|
||||
/* clean up vmlinux BTF */
|
||||
btf__free(obj->btf_vmlinux);
|
||||
obj->btf_vmlinux = NULL;
|
||||
|
||||
bpf_object_post_load_cleanup(obj);
|
||||
obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (err) {
|
||||
bpf_object_unpin(obj);
|
||||
bpf_object_unload(obj);
|
||||
pr_warn("failed to load object '%s'\n", obj->path);
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
/* unpin any maps that were auto-pinned during load */
|
||||
for (i = 0; i < obj->nr_maps; i++)
|
||||
if (obj->maps[i].pinned && !obj->maps[i].reused)
|
||||
bpf_map__unpin(&obj->maps[i], NULL);
|
||||
}
|
||||
|
||||
bpf_object_unload(obj);
|
||||
pr_warn("failed to load object '%s'\n", obj->path);
|
||||
return libbpf_err(err);
|
||||
int bpf_object__prepare(struct bpf_object *obj)
|
||||
{
|
||||
return libbpf_err(bpf_object_prepare(obj, NULL));
|
||||
}
|
||||
|
||||
int bpf_object__load(struct bpf_object *obj)
|
||||
@ -9069,6 +9122,13 @@ void bpf_object__close(struct bpf_object *obj)
|
||||
if (IS_ERR_OR_NULL(obj))
|
||||
return;
|
||||
|
||||
/*
|
||||
* if user called bpf_object__prepare() without ever getting to
|
||||
* bpf_object__load(), we need to clean up stuff that is normally
|
||||
* cleaned up at the end of loading step
|
||||
*/
|
||||
bpf_object_post_load_cleanup(obj);
|
||||
|
||||
usdt_manager_free(obj->usdt_man);
|
||||
obj->usdt_man = NULL;
|
||||
|
||||
|
@ -241,6 +241,19 @@ LIBBPF_API struct bpf_object *
|
||||
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
|
||||
const struct bpf_object_open_opts *opts);
|
||||
|
||||
/**
|
||||
* @brief **bpf_object__prepare()** prepares BPF object for loading:
|
||||
* performs ELF processing, relocations, prepares final state of BPF program
|
||||
* instructions (accessible with bpf_program__insns()), creates and
|
||||
* (potentially) pins maps. Leaves BPF object in the state ready for program
|
||||
* loading.
|
||||
* @param obj Pointer to a valid BPF object instance returned by
|
||||
* **bpf_object__open*()** API
|
||||
* @return 0, on success; negative error code, otherwise, error code is
|
||||
* stored in errno
|
||||
*/
|
||||
int bpf_object__prepare(struct bpf_object *obj);
|
||||
|
||||
/**
|
||||
* @brief **bpf_object__load()** loads BPF object into kernel.
|
||||
* @param obj Pointer to a valid BPF object instance returned by
|
||||
|
@ -436,6 +436,7 @@ LIBBPF_1.6.0 {
|
||||
bpf_linker__add_buf;
|
||||
bpf_linker__add_fd;
|
||||
bpf_linker__new_fd;
|
||||
bpf_object__prepare;
|
||||
btf__add_decl_attr;
|
||||
btf__add_type_attr;
|
||||
} LIBBPF_1.5.0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user