libbpf: Add BTF permutation support for type reordering

Introduce btf__permute() API to allow in-place rearrangement of BTF types.
This function reorganizes BTF type order according to a provided array of
type IDs, updating all type references to maintain consistency.

Signed-off-by: Donglin Peng <pengdonglin@xiaomi.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/bpf/20260109130003.3313716-2-dolinux.peng@gmail.com
This commit is contained in:
Donglin Peng
2026-01-09 20:59:53 +08:00
committed by Andrii Nakryiko
parent 2c5038dcf4
commit b7c6c02b5f
3 changed files with 176 additions and 0 deletions

133
src/btf.c
View File

@@ -5887,3 +5887,136 @@ int btf__relocate(struct btf *btf, const struct btf *base_btf)
btf->owns_base = false;
return libbpf_err(err);
}
struct btf_permute {
struct btf *btf;
__u32 *id_map;
__u32 start_offs;
};
/* Callback function to remap individual type ID references */
static int btf_permute_remap_type_id(__u32 *type_id, void *ctx)
{
struct btf_permute *p = ctx;
__u32 new_id = *type_id;
/* refer to the base BTF or VOID type */
if (new_id < p->btf->start_id)
return 0;
if (new_id >= btf__type_cnt(p->btf))
return -EINVAL;
*type_id = p->id_map[new_id - p->btf->start_id + p->start_offs];
return 0;
}
int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
const struct btf_permute_opts *opts)
{
struct btf_permute p;
struct btf_ext *btf_ext;
void *nt, *new_types = NULL;
__u32 *order_map = NULL;
int err = 0, i;
__u32 n, id, start_offs = 0;
if (!OPTS_VALID(opts, btf_permute_opts))
return libbpf_err(-EINVAL);
if (btf__base_btf(btf)) {
n = btf->nr_types;
} else {
if (id_map[0] != 0)
return libbpf_err(-EINVAL);
n = btf__type_cnt(btf);
start_offs = 1;
}
if (id_map_cnt != n)
return libbpf_err(-EINVAL);
/* record the sequence of types */
order_map = calloc(id_map_cnt, sizeof(*id_map));
if (!order_map) {
err = -ENOMEM;
goto done;
}
new_types = calloc(btf->hdr->type_len, 1);
if (!new_types) {
err = -ENOMEM;
goto done;
}
if (btf_ensure_modifiable(btf)) {
err = -ENOMEM;
goto done;
}
for (i = start_offs; i < id_map_cnt; i++) {
id = id_map[i];
if (id < btf->start_id || id >= btf__type_cnt(btf)) {
err = -EINVAL;
goto done;
}
id -= btf->start_id - start_offs;
/* cannot be mapped to the same ID */
if (order_map[id]) {
err = -EINVAL;
goto done;
}
order_map[id] = i + btf->start_id - start_offs;
}
p.btf = btf;
p.id_map = id_map;
p.start_offs = start_offs;
nt = new_types;
for (i = start_offs; i < id_map_cnt; i++) {
struct btf_field_iter it;
const struct btf_type *t;
__u32 *type_id;
int type_size;
id = order_map[i];
t = btf__type_by_id(btf, id);
type_size = btf_type_size(t);
memcpy(nt, t, type_size);
/* fix up referenced IDs for BTF */
err = btf_field_iter_init(&it, nt, BTF_FIELD_ITER_IDS);
if (err)
goto done;
while ((type_id = btf_field_iter_next(&it))) {
err = btf_permute_remap_type_id(type_id, &p);
if (err)
goto done;
}
nt += type_size;
}
/* fix up referenced IDs for btf_ext */
btf_ext = OPTS_GET(opts, btf_ext, NULL);
if (btf_ext) {
err = btf_ext_visit_type_ids(btf_ext, btf_permute_remap_type_id, &p);
if (err)
goto done;
}
for (nt = new_types, i = 0; i < id_map_cnt - start_offs; i++) {
btf->type_offs[i] = nt - new_types;
nt += btf_type_size(nt);
}
free(order_map);
free(btf->types_data);
btf->types_data = new_types;
return 0;
done:
free(order_map);
free(new_types);
return libbpf_err(err);
}