Compare commits

..

48 Commits

Author SHA1 Message Date
thiagoftsm
057f85d000 Merge branch 'libbpf:master' into master
Some checks failed
libbpf-build / Debian Build (${{ matrix.name }}) (ASan+UBSan, RUN_ASAN) (push) Has been cancelled
libbpf-build / Debian Build (${{ matrix.name }}) (clang ASan+UBSan, RUN_CLANG_ASAN) (push) Has been cancelled
libbpf-build / Debian Build (${{ matrix.name }}) (clang, RUN_CLANG) (push) Has been cancelled
libbpf-build / Debian Build (${{ matrix.name }}) (clang-14, RUN_CLANG14) (push) Has been cancelled
libbpf-build / Debian Build (${{ matrix.name }}) (clang-15, RUN_CLANG15) (push) Has been cancelled
libbpf-build / Debian Build (${{ matrix.name }}) (clang-16, RUN_CLANG16) (push) Has been cancelled
libbpf-build / Debian Build (${{ matrix.name }}) (default, RUN) (push) Has been cancelled
libbpf-build / Debian Build (${{ matrix.name }}) (gcc-10 ASan+UBSan, RUN_GCC10_ASAN) (push) Has been cancelled
libbpf-build / Debian Build (${{ matrix.name }}) (gcc-10, RUN_GCC10) (push) Has been cancelled
libbpf-build / Debian Build (${{ matrix.name }}) (gcc-11, RUN_GCC11) (push) Has been cancelled
libbpf-build / Debian Build (${{ matrix.name }}) (gcc-12, RUN_GCC12) (push) Has been cancelled
libbpf-ci / Kernel ${{ matrix.kernel }} on ${{ matrix.arch }} + selftests (s390x, LATEST, [s390x docker-noble-main]) (push) Has been cancelled
libbpf-ci / Kernel ${{ matrix.kernel }} on ${{ matrix.arch }} + selftests (x86_64, 4.9.0, ubuntu-24.04) (push) Has been cancelled
libbpf-ci / Kernel ${{ matrix.kernel }} on ${{ matrix.arch }} + selftests (x86_64, 5.5.0, ubuntu-24.04) (push) Has been cancelled
libbpf-ci / Kernel ${{ matrix.kernel }} on ${{ matrix.arch }} + selftests (x86_64, LATEST, ubuntu-24.04) (push) Has been cancelled
libbpf-build / Ubuntu Build (${{ matrix.arch }}) (aarch64) (push) Has been cancelled
libbpf-build / Ubuntu Build (${{ matrix.arch }}) (ppc64le) (push) Has been cancelled
libbpf-build / Ubuntu Build (${{ matrix.arch }}) (s390x) (push) Has been cancelled
libbpf-build / Ubuntu Build (${{ matrix.arch }}) (x86) (push) Has been cancelled
CIFuzz / Fuzzing (address) (push) Has been cancelled
CIFuzz / Fuzzing (memory) (push) Has been cancelled
CIFuzz / Fuzzing (undefined) (push) Has been cancelled
CodeQL / Analyze (cpp) (push) Has been cancelled
CodeQL / Analyze (python) (push) Has been cancelled
lint / ShellCheck (push) Has been cancelled
pahole-staging / Kernel LATEST + staging pahole (push) Has been cancelled
libbpf-ci-coverity / Coverity (push) Has been cancelled
2024-09-04 01:21:11 +00:00
Andrii Nakryiko
caa17bdcbf ci: regenerate vmlinux.h
Regenerated latest vmlinux.h for old kernels.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2024-08-30 16:29:01 -07:00
Andrii Nakryiko
76c9f50f3e sync: latest libbpf changes from kernel
Syncing latest libbpf commits from kernel repository.
Baseline bpf-next commit:   ec5b8c76ab1c6d163762d60cfbedcd27e7527144
Checkpoint bpf-next commit: 2ad6d23f465a4f851e3bcf6d74c315ce7b2c205b
Baseline bpf commit:        e1533b6319ab9c3a97dad314dd88b3783bc41b69
Checkpoint bpf commit:      b408473ea01b2e499d23503e2bf898416da9d7ac

Alan Maguire (1):
  libbpf: Fix license for btf_relocate.c

Andrii Nakryiko (2):
  libbpf: Fix no-args func prototype BTF dumping syntax
  libbpf: Fix bpf_object__open_skeleton()'s mishandling of options

David Vernet (1):
  libbpf: Don't take direct pointers into BTF data from st_ops

Jordan Rome (1):
  bpf: Add bpf_copy_from_user_str kfunc

Kan Liang (1):
  perf/x86/intel: Support new data source for Lunar Lake

Sam James (1):
  libbpf: Workaround -Wmaybe-uninitialized false positive

Stanislav Fomichev (1):
  selftests/bpf: Add XDP_UMEM_TX_METADATA_LEN to XSK TX metadata test

Tony Ambardar (1):
  libbpf: Ensure new BTF objects inherit input endianness

 include/uapi/linux/bpf.h        |  9 ++++
 include/uapi/linux/if_xdp.h     |  4 ++
 include/uapi/linux/perf_event.h |  6 ++-
 src/btf.c                       |  4 ++
 src/btf_dump.c                  |  8 ++--
 src/btf_relocate.c              |  2 +-
 src/elf.c                       |  3 ++
 src/libbpf.c                    | 75 ++++++++++++++-------------------
 8 files changed, 62 insertions(+), 49 deletions(-)

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2024-08-30 16:29:01 -07:00
Tony Ambardar
fe28fae57a libbpf: Ensure new BTF objects inherit input endianness
New split BTF needs to preserve base's endianness. Similarly, when
creating a distilled BTF, we need to preserve original endianness.

Fix by updating libbpf's btf__distill_base() and btf_new_empty() to retain
the byte order of any source BTF objects when creating new ones.

Fixes: ba451366bf44 ("libbpf: Implement basic split BTF support")
Fixes: 58e185a0dc35 ("libbpf: Add btf__distill_base() creating split BTF with distilled base BTF")
Reported-by: Song Liu <song@kernel.org>
Reported-by: Eduard Zingerman <eddyz87@gmail.com>
Suggested-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Tony Ambardar <tony.ambardar@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Tested-by: Alan Maguire <alan.maguire@oracle.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/bpf/6358db36c5f68b07873a0a5be2d062b1af5ea5f8.camel@gmail.com/
Link: https://lore.kernel.org/bpf/20240830095150.278881-1-tony.ambardar@gmail.com
2024-08-30 16:29:01 -07:00
Andrii Nakryiko
f6f24022d3 libbpf: Fix bpf_object__open_skeleton()'s mishandling of options
We do an ugly copying of options in bpf_object__open_skeleton() just to
be able to set object name from skeleton's recorded name (while still
allowing user to override it through opts->object_name).

This is not just ugly, but it also is broken due to memcpy() that
doesn't take into account potential skel_opts' and user-provided opts'
sizes differences due to backward and forward compatibility. This leads
to copying over extra bytes and then failing to validate options
properly. It could, technically, lead also to SIGSEGV, if we are unlucky.

So just get rid of that memory copy completely and instead pass
default object name into bpf_object_open() directly, simplifying all
this significantly. The rule now is that obj_name should be non-NULL for
bpf_object_open() when called with in-memory buffer, so validate that
explicitly as well.

We adopt bpf_object__open_mem() to this as well and generate default
name (based on buffer memory address and size) outside of bpf_object_open().

Fixes: d66562fba1ce ("libbpf: Add BPF object skeleton support")
Reported-by: Daniel Müller <deso@posteo.net>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Daniel Müller <deso@posteo.net>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/bpf/20240827203721.1145494-1-andrii@kernel.org
2024-08-30 16:29:01 -07:00
Jordan Rome
4bd31a1044 bpf: Add bpf_copy_from_user_str kfunc
This adds a kfunc wrapper around strncpy_from_user,
which can be called from sleepable BPF programs.

This matches the non-sleepable 'bpf_probe_read_user_str'
helper except it includes an additional 'flags'
param, which allows consumers to clear the entire
destination buffer on success or failure.

Signed-off-by: Jordan Rome <linux@jordanrome.com>
Link: https://lore.kernel.org/r/20240823195101.3621028-1-linux@jordanrome.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2024-08-30 16:29:01 -07:00
Sam James
33b22671c2 libbpf: Workaround -Wmaybe-uninitialized false positive
In `elf_close`, we get this with GCC 15 -O3 (at least):
```
In function ‘elf_close’,
    inlined from ‘elf_close’ at elf.c:53:6,
    inlined from ‘elf_find_func_offset_from_file’ at elf.c:384:2:
elf.c:57:9: warning: ‘elf_fd.elf’ may be used uninitialized [-Wmaybe-uninitialized]
   57 |         elf_end(elf_fd->elf);
      |         ^~~~~~~~~~~~~~~~~~~~
elf.c: In function ‘elf_find_func_offset_from_file’:
elf.c:377:23: note: ‘elf_fd.elf’ was declared here
  377 |         struct elf_fd elf_fd;
      |                       ^~~~~~
In function ‘elf_close’,
    inlined from ‘elf_close’ at elf.c:53:6,
    inlined from ‘elf_find_func_offset_from_file’ at elf.c:384:2:
elf.c:58:9: warning: ‘elf_fd.fd’ may be used uninitialized [-Wmaybe-uninitialized]
   58 |         close(elf_fd->fd);
      |         ^~~~~~~~~~~~~~~~~
elf.c: In function ‘elf_find_func_offset_from_file’:
elf.c:377:23: note: ‘elf_fd.fd’ was declared here
  377 |         struct elf_fd elf_fd;
      |                       ^~~~~~
```

In reality, our use is fine, it's just that GCC doesn't model errno
here (see linked GCC bug). Suppress -Wmaybe-uninitialized accordingly
by initializing elf_fd.fd to -1 and elf_fd.elf to NULL.

I've done this in two other functions as well given it could easily
occur there too (same access/use pattern).

Signed-off-by: Sam James <sam@gentoo.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://gcc.gnu.org/PR114952
Link: https://lore.kernel.org/bpf/14ec488a1cac02794c2fa2b83ae0cef1bce2cb36.1723578546.git.sam@gentoo.org
2024-08-30 16:29:01 -07:00
Alan Maguire
8b29484790 libbpf: Fix license for btf_relocate.c
License should be

// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)

...as with other libbpf files.

Fixes: 19e00c897d50 ("libbpf: Split BTF relocation")
Reported-by: Neill Kapron <nkapron@google.com>
Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/bpf/20240810093504.2111134-1-alan.maguire@oracle.com
2024-08-30 16:29:01 -07:00
David Vernet
7b5237996a libbpf: Don't take direct pointers into BTF data from st_ops
In struct bpf_struct_ops, we have take a pointer to a BTF type name, and
a struct btf_type. This was presumably done for convenience, but can
actually result in subtle and confusing bugs given that BTF data can be
invalidated before a program is loaded. For example, in sched_ext, we
may sometimes resize a data section after a skeleton has been opened,
but before the struct_ops scheduler map has been loaded. This may cause
the BTF data to be realloc'd, which can then cause a UAF when loading
the program because the struct_ops map has pointers directly into the
BTF data.

We're already storing the BTF type_id in struct bpf_struct_ops. Because
type_id is stable, we can therefore just update the places where we were
looking at those pointers to instead do the lookups we need from the
type_id.

Fixes: 590a00888250 ("bpf: libbpf: Add STRUCT_OPS support")
Signed-off-by: David Vernet <void@manifault.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240724171459.281234-1-void@manifault.com
2024-08-30 16:29:01 -07:00
Stanislav Fomichev
a89e519b40 selftests/bpf: Add XDP_UMEM_TX_METADATA_LEN to XSK TX metadata test
This flag is now required to use tx_metadata_len.

Fixes: 40808a237d9c ("selftests/bpf: Add TX side to xdp_metadata")
Reported-by: Julian Schindel <mail@arctic-alpaca.de>
Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Link: https://lore.kernel.org/bpf/20240713015253.121248-3-sdf@fomichev.me
2024-08-30 16:29:01 -07:00
Andrii Nakryiko
205e86de8b libbpf: Fix no-args func prototype BTF dumping syntax
For all these years libbpf's BTF dumper has been emitting not strictly
valid syntax for function prototypes that have no input arguments.

Instead of `int (*blah)()` we should emit `int (*blah)(void)`.

This is not normally a problem, but it manifests when we get kfuncs in
vmlinux.h that have no input arguments. Due to compiler internal
specifics, we get no BTF information for such kfuncs, if they are not
declared with proper `(void)`.

The fix is trivial. We also need to adjust a few ancient tests that
happily assumed `()` is correct.

Fixes: 351131b51c7a ("libbpf: add btf_dump API for BTF-to-C conversion")
Reported-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Link: https://lore.kernel.org/bpf/20240712224442.282823-1-andrii@kernel.org
2024-08-30 16:29:01 -07:00
Kan Liang
86fc78bd2b perf/x86/intel: Support new data source for Lunar Lake
A new PEBS data source format is introduced for the p-core of Lunar
Lake. The data source field is extended to 8 bits with new encodings.

A new layout is introduced into the union intel_x86_pebs_dse.
Introduce the lnl_latency_data() to parse the new format.
Enlarge the pebs_data_source[] accordingly to include new encodings.

Only the mem load and the mem store events can generate the data source.
Introduce INTEL_HYBRID_LDLAT_CONSTRAINT and
INTEL_HYBRID_STLAT_CONSTRAINT to mark them.

Add two new bits for the new cache-related data src, L2_MHB and MSC.
The L2_MHB is short for L2 Miss Handling Buffer, which is similar to
LFB (Line Fill Buffer), but to track the L2 Cache misses.
The MSC stands for the memory-side cache.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Link: https://lkml.kernel.org/r/20240626143545.480761-6-kan.liang@linux.intel.com
2024-08-30 16:29:01 -07:00
Andrii Nakryiko
20ccbb303a ci: take into account common local DENYLIST/ALLOWLIST
Similar to naming convention in BPF selftests.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2024-08-30 09:14:00 -07:00
chantra
26443a6d43 ci: fix test job names
* use the architecture name in job name instead of `runs_on` labels

Signed-off-by: Manu Bretelle <chantr4@gmail.com>
2024-08-29 10:58:52 +01:00
Andrii Nakryiko
22ec3eb15d ci: deny verify_pkcs7_sig as it keeps failing
This has nothing to do with libbpf and is probably failing due to
environment setup.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2024-08-27 12:51:55 -07:00
Manu Bretelle
bc24cd126a ci: run test on Ubuntu 24.04
Signed-off-by: Manu Bretelle <chantr4@gmail.com>
2024-08-22 12:59:18 -07:00
Manu Bretelle
92316f5072 ci: Pass llvm-version as an input and enforce passing it to build-selftests action
Signed-off-by: Manu Bretelle <chantr4@gmail.com>
2024-08-21 16:04:36 -07:00
Manu Bretelle
a73c6f7f80 ci: Use llvm repositories matching the host we are running on
As this will change to a Ubuntu 24.04 runner, we want this to automatically detect
which ubuntu version it is running on.

Signed-off-by: Manu Bretelle <chantr4@gmail.com>
2024-08-21 16:04:36 -07:00
Manu Bretelle
8e47e755cd ci: bump default llvm version to 17
Ubuntu 24.04's minimum llvm version is 17. Bumping this now to limit changes later.

Signed-off-by: Manu Bretelle <chantr4@gmail.com>
2024-08-21 16:04:36 -07:00
Manu Bretelle
ec0d0fda8b ci: lock down s390x CI to Ubuntu 20.04 runners
I am working on upgrading to 24.04 runners. In order to make sure that current jobs are scheduled
on Ubuntu 20.04, we need to ask for runners with tag `docker-main`, which is currently
set by those old runners.
Later, we will be able to switch this tag to `docker-noble-main` which are Ubuntu 24.04 runners.

Signed-off-by: Manu Bretelle <chantr4@gmail.com>
2024-08-21 16:04:36 -07:00
Ivan Shapovalov
b07dfe3b2a Makefile: ensure $(OBJDIR) is created before writing to it
Signed-off-by: Ivan Shapovalov <intelfx@intelfx.name>
2024-07-29 14:05:05 -07:00
thiagoftsm
6923eb970e Merge branch 'libbpf:master' into master 2024-07-12 00:47:44 +00:00
Andrii Nakryiko
686f600bca sync: latest libbpf changes from kernel
Syncing latest libbpf commits from kernel repository.
Baseline bpf-next commit:   a12978712d9001b060bcc10eaae42ad5102abe2b
Checkpoint bpf-next commit: ec5b8c76ab1c6d163762d60cfbedcd27e7527144
Baseline bpf commit:        b1c4b4d45263241ec6c2405a8df8265d4b58e707
Checkpoint bpf commit:      e1533b6319ab9c3a97dad314dd88b3783bc41b69

Alan Maguire (1):
  libbpf: Fix error handling in btf__distill_base()

Andreas Ziegler (1):
  libbpf: Add NULL checks to bpf_object__{prev_map,next_map}

Andrii Nakryiko (2):
  libbpf: fix BPF skeleton forward/backward compat handling
  libbpf: improve old BPF skeleton handling for map auto-attach

 src/btf.c    |  2 +-
 src/libbpf.c | 75 +++++++++++++++++++++++++++++-----------------------
 2 files changed, 43 insertions(+), 34 deletions(-)

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2024-07-10 14:22:00 -07:00
Andrii Nakryiko
726d7f3722 sync: update .mailmap
Update .mailmap based on libbpf's list of contributors and on the latest
.mailmap version in the upstream repository.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2024-07-10 14:22:00 -07:00
Andrii Nakryiko
e6f1ae2557 libbpf: improve old BPF skeleton handling for map auto-attach
Improve how we handle old BPF skeletons when it comes to BPF map
auto-attachment. Emit one warn-level message per each struct_ops map
that could have been auto-attached, if user provided recent enough BPF
skeleton version. Don't spam log if there are no relevant struct_ops
maps, though.

This should help users realize that they probably need to regenerate BPF
skeleton header with more recent bpftool/libbpf-cargo (or whatever other
means of BPF skeleton generation).

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20240708204540.4188946-4-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2024-07-10 14:22:00 -07:00
Andrii Nakryiko
bf7ddbef99 libbpf: fix BPF skeleton forward/backward compat handling
BPF skeleton was designed from day one to be extensible. Generated BPF
skeleton code specifies actual sizes of map/prog/variable skeletons for
that reason and libbpf is supposed to work with newer/older versions
correctly.

Unfortunately, it was missed that we implicitly embed hard-coded most
up-to-date (according to libbpf's version of libbpf.h header used to
compile BPF skeleton header) sizes of those structs, which can differ
from the actual sizes at runtime when libbpf is used as a shared
library.

We have a few places were we just index array of maps/progs/vars, which
implicitly uses these potentially invalid sizes of structs.

This patch aims to fix this problem going forward. Once this lands,
we'll backport these changes in Github repo to create patched releases
for older libbpfs.

Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Reviewed-by: Alan Maguire <alan.maguire@oracle.com>
Fixes: d66562fba1ce ("libbpf: Add BPF object skeleton support")
Fixes: 430025e5dca5 ("libbpf: Add subskeleton scaffolding")
Fixes: 08ac454e258e ("libbpf: Auto-attach struct_ops BPF maps in BPF skeleton")
Co-developed-by: Mykyta Yatsenko <yatsenko@meta.com>
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20240708204540.4188946-3-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2024-07-10 14:22:00 -07:00
Andreas Ziegler
1867490d8f libbpf: Add NULL checks to bpf_object__{prev_map,next_map}
In the current state, an erroneous call to
bpf_object__find_map_by_name(NULL, ...) leads to a segmentation
fault through the following call chain:

  bpf_object__find_map_by_name(obj = NULL, ...)
  -> bpf_object__for_each_map(pos, obj = NULL)
  -> bpf_object__next_map((obj = NULL), NULL)
  -> return (obj = NULL)->maps

While calling bpf_object__find_map_by_name with obj = NULL is
obviously incorrect, this should not lead to a segmentation
fault but rather be handled gracefully.

As __bpf_map__iter already handles this situation correctly, we
can delegate the check for the regular case there and only add
a check in case the prev or next parameter is NULL.

Signed-off-by: Andreas Ziegler <ziegler.andreas@siemens.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20240703083436.505124-1-ziegler.andreas@siemens.com
2024-07-10 14:22:00 -07:00
Alan Maguire
24aca0740b libbpf: Fix error handling in btf__distill_base()
Coverity points out that after calling btf__new_empty_split() the wrong
value is checked for error.

Fixes: 58e185a0dc35 ("libbpf: Add btf__distill_base() creating split BTF with distilled base BTF")
Reported-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20240629100058.2866763-1-alan.maguire@oracle.com
2024-07-10 14:22:00 -07:00
Andrii Nakryiko
c1a6c770c4 libbpf: add btf_iter.o and btf_relocate.o to Makefile
Upstream libbpf got two new .c files, make sure they are built with
Github Makefile as well.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2024-06-27 10:01:42 -07:00
Andrii Nakryiko
223cd2273e sync: latest libbpf changes from kernel
Syncing latest libbpf commits from kernel repository.
Baseline bpf-next commit:   531876c80004ecff7bfdbd8ba6c6b48835ef5e22
Checkpoint bpf-next commit: a12978712d9001b060bcc10eaae42ad5102abe2b
Baseline bpf commit:        62da3acd28955e7299babebdfcb14243b789e773
Checkpoint bpf commit:      b1c4b4d45263241ec6c2405a8df8265d4b58e707

Alan Maguire (6):
  libbpf: Add btf__distill_base() creating split BTF with distilled base
    BTF
  libbpf: Split BTF relocation
  libbpf: BTF relocation followup fixing naming, loop logic
  libbpf: Split field iter code into its own file kernel
  libbpf,bpf: Share BTF relocate-related code with kernel
  libbpf: Fix clang compilation error in btf_relocate.c

Andrii Nakryiko (4):
  libbpf: Add BTF field iterator
  libbpf: Make use of BTF field iterator in BPF linker code
  libbpf: Make use of BTF field iterator in BTF handling code
  libbpf: Remove callback-based type/string BTF field visitor helpers

Antoine Tenart (1):
  libbpf: Skip base btf sanity checks

Donglin Peng (1):
  libbpf: Checking the btf_type kind when fixing variable offsets

Eduard Zingerman (1):
  libbpf: Make btf_parse_elf process .BTF.base transparently

Mykyta Yatsenko (1):
  libbpf: Auto-attach struct_ops BPF maps in BPF skeleton

Vadim Fedorenko (1):
  bpf: Add CHECKSUM_COMPLETE to bpf test progs

 include/uapi/linux/bpf.h |   2 +
 src/btf.c                | 696 +++++++++++++++++++++++++++------------
 src/btf.h                |  36 ++
 src/btf_iter.c           | 177 ++++++++++
 src/btf_relocate.c       | 519 +++++++++++++++++++++++++++++
 src/libbpf.c             |  64 +++-
 src/libbpf.h             |  18 +
 src/libbpf.map           |   4 +
 src/libbpf_internal.h    |  29 +-
 src/linker.c             |  69 ++--
 10 files changed, 1378 insertions(+), 236 deletions(-)
 create mode 100644 src/btf_iter.c
 create mode 100644 src/btf_relocate.c

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2024-06-27 10:01:42 -07:00
Andrii Nakryiko
dcd076347c sync: update .mailmap
Update .mailmap based on libbpf's list of contributors and on the latest
.mailmap version in the upstream repository.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2024-06-27 10:01:42 -07:00
Alan Maguire
e4982342e7 libbpf: Fix clang compilation error in btf_relocate.c
When building with clang for ARCH=i386, the following errors are
observed:

  CC      kernel/bpf/btf_relocate.o
./tools/lib/bpf/btf_relocate.c:206:23: error: implicit truncation from 'int' to a one-bit wide bit-field changes value from 1 to -1 [-Werror,-Wsingle-bit-bitfield-constant-conversion]
  206 |                 info[id].needs_size = true;
      |                                     ^ ~
./tools/lib/bpf/btf_relocate.c:256:25: error: implicit truncation from 'int' to a one-bit wide bit-field changes value from 1 to -1 [-Werror,-Wsingle-bit-bitfield-constant-conversion]
  256 |                         base_info.needs_size = true;
      |                                              ^ ~
2 errors generated.

The problem is we use 1-bit, 31-bit bitfields in a signed int.
Changing to

	bool needs_size: 1;
	unsigned int size:31;

...resolves the error and pahole reports that 4 bytes are used
for the underlying representation:

$ pahole btf_name_info tools/lib/bpf/btf_relocate.o
struct btf_name_info {
	const char  *              name;                 /*     0     8 */
	unsigned int               needs_size:1;         /*     8: 0  4 */
	unsigned int               size:31;              /*     8: 1  4 */
	__u32                      id;                   /*    12     4 */

	/* size: 16, cachelines: 1, members: 4 */
	/* last cacheline: 16 bytes */
};

Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240624192903.854261-1-alan.maguire@oracle.com
2024-06-27 10:01:42 -07:00
Antoine Tenart
95c63a08f2 libbpf: Skip base btf sanity checks
When upgrading to libbpf 1.3 we noticed a big performance hit while
loading programs using CORE on non base-BTF symbols. This was tracked
down to the new BTF sanity check logic. The issue is the base BTF
definitions are checked first for the base BTF and then again for every
module BTF.

Loading 5 dummy programs (using libbpf-rs) that are using CORE on a
non-base BTF symbol on my system:
- Before this fix: 3s.
- With this fix: 0.1s.

Fix this by only checking the types starting at the BTF start id. This
should ensure the base BTF is still checked as expected but only once
(btf->start_id == 1 when creating the base BTF), and then only
additional types are checked for each module BTF.

Fixes: 3903802bb99a ("libbpf: Add basic BTF sanity validation")
Signed-off-by: Antoine Tenart <atenart@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Reviewed-by: Alan Maguire <alan.maguire@oracle.com>
Link: https://lore.kernel.org/bpf/20240624090908.171231-1-atenart@kernel.org
2024-06-27 10:01:42 -07:00
Alan Maguire
27f0169332 libbpf,bpf: Share BTF relocate-related code with kernel
Share relocation implementation with the kernel.  As part of this,
we also need the type/string iteration functions so also share
btf_iter.c file. Relocation code in kernel and userspace is identical
save for the impementation of the reparenting of split BTF to the
relocated base BTF and retrieval of the BTF header from "struct btf";
these small functions need separate user-space and kernel implementations
for the separate "struct btf"s they operate upon.

One other wrinkle on the kernel side is we have to map .BTF.ids in
modules as they were generated with the type ids used at BTF encoding
time. btf_relocate() optionally returns an array mapping from old BTF
ids to relocated ids, so we use that to fix up these references where
needed for kfuncs.

Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/bpf/20240620091733.1967885-5-alan.maguire@oracle.com
2024-06-27 10:01:42 -07:00
Alan Maguire
4ffb92e204 libbpf: Split field iter code into its own file kernel
This will allow it to be shared with the kernel.  No functional change.

Suggested-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240620091733.1967885-4-alan.maguire@oracle.com
2024-06-27 10:01:42 -07:00
Alan Maguire
bc021a8b42 libbpf: BTF relocation followup fixing naming, loop logic
Use less verbose names in BTF relocation code and fix off-by-one error
and typo in btf_relocate.c.  Simplify loop over matching distilled
types, moving from assigning a _next value in loop body to moving
match check conditions into the guard.

Suggested-by: Andrii Nakryiko <andrii.nakryiko@gmail.com>
Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/bpf/20240620091733.1967885-2-alan.maguire@oracle.com
2024-06-27 10:01:42 -07:00
Donglin Peng
88a0787335 libbpf: Checking the btf_type kind when fixing variable offsets
I encountered an issue when building the test_progs from the repository [1]:

  $ pwd
  /work/Qemu/x86_64/linux-6.10-rc2/tools/testing/selftests/bpf/

  $ make test_progs V=1
  [...]
  ./tools/sbin/bpftool gen object ./ip_check_defrag.bpf.linked2.o ./ip_check_defrag.bpf.linked1.o
  libbpf: failed to find symbol for variable 'bpf_dynptr_slice' in section '.ksyms'
  Error: failed to link './ip_check_defrag.bpf.linked1.o': No such file or directory (2)
  [...]

Upon investigation, I discovered that the btf_types referenced in the '.ksyms'
section had a kind of BTF_KIND_FUNC instead of BTF_KIND_VAR:

  $ bpftool btf dump file ./ip_check_defrag.bpf.linked1.o
  [...]
  [2] DATASEC '.ksyms' size=0 vlen=2
        type_id=16 offset=0 size=0 (FUNC 'bpf_dynptr_from_skb')
        type_id=17 offset=0 size=0 (FUNC 'bpf_dynptr_slice')
  [...]
  [16] FUNC 'bpf_dynptr_from_skb' type_id=82 linkage=extern
  [17] FUNC 'bpf_dynptr_slice' type_id=85 linkage=extern
  [...]

For a detailed analysis, please refer to [2]. We can add a kind checking to
fix the issue.

  [1] https://github.com/eddyz87/bpf/tree/binsort-btf-dedup
  [2] https://lore.kernel.org/all/0c0ef20c-c05e-4db9-bad7-2cbc0d6dfae7@oracle.com/

Fixes: 8fd27bf69b86 ("libbpf: Add BPF static linker BTF and BTF.ext support")
Signed-off-by: Donglin Peng <dolinux.peng@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Alan Maguire <alan.maguire@oracle.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/bpf/20240619122355.426405-1-dolinux.peng@gmail.com
2024-06-27 10:01:42 -07:00
Eduard Zingerman
4bc5a64933 libbpf: Make btf_parse_elf process .BTF.base transparently
Update btf_parse_elf() to check if .BTF.base section is present.
The logic is as follows:

  if .BTF.base section exists:
     distilled_base := btf_new(.BTF.base)
  if distilled_base:
     btf := btf_new(.BTF, .base_btf=distilled_base)
     if base_btf:
        btf_relocate(btf, base_btf)
  else:
     btf := btf_new(.BTF)
  return btf

In other words:
- if .BTF.base section exists, load BTF from it and use it as a base
  for .BTF load;
- if base_btf is specified and .BTF.base section exist, relocate newly
  loaded .BTF against base_btf.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240613095014.357981-6-alan.maguire@oracle.com
2024-06-27 10:01:42 -07:00
Alan Maguire
2afe409348 libbpf: Split BTF relocation
Map distilled base BTF type ids referenced in split BTF and their
references to the base BTF passed in, and if the mapping succeeds,
reparent the split BTF to the base BTF.

Relocation is done by first verifying that distilled base BTF
only consists of named INT, FLOAT, ENUM, FWD, STRUCT and
UNION kinds; then we sort these to speed lookups.  Once sorted,
the base BTF is iterated, and for each relevant kind we check
for an equivalent in distilled base BTF.  When found, the
mapping from distilled -> base BTF id and string offset is recorded.
In establishing mappings, we need to ensure we check STRUCT/UNION
size when the STRUCT/UNION is embedded in a split BTF STRUCT/UNION,
and when duplicate names exist for the same STRUCT/UNION.  Otherwise
size is ignored in matching STRUCT/UNIONs.

Once all mappings are established, we can update type ids
and string offsets in split BTF and reparent it to the new base.

Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/bpf/20240613095014.357981-4-alan.maguire@oracle.com
2024-06-27 10:01:42 -07:00
Alan Maguire
36cb1ad3ae libbpf: Add btf__distill_base() creating split BTF with distilled base BTF
To support more robust split BTF, adding supplemental context for the
base BTF type ids that split BTF refers to is required.  Without such
references, a simple shuffling of base BTF type ids (without any other
significant change) invalidates the split BTF.  Here the attempt is made
to store additional context to make split BTF more robust.

This context comes in the form of distilled base BTF providing minimal
information (name and - in some cases - size) for base INTs, FLOATs,
STRUCTs, UNIONs, ENUMs and ENUM64s along with modified split BTF that
points at that base and contains any additional types needed (such as
TYPEDEF, PTR and anonymous STRUCT/UNION declarations).  This
information constitutes the minimal BTF representation needed to
disambiguate or remove split BTF references to base BTF.  The rules
are as follows:

- INT, FLOAT, FWD are recorded in full.
- if a named base BTF STRUCT or UNION is referred to from split BTF, it
  will be encoded as a zero-member sized STRUCT/UNION (preserving
  size for later relocation checks).  Only base BTF STRUCT/UNIONs
  that are either embedded in split BTF STRUCT/UNIONs or that have
  multiple STRUCT/UNION instances of the same name will _need_ size
  checks at relocation time, but as it is possible a different set of
  types will be duplicates in the later to-be-resolved base BTF,
  we preserve size information for all named STRUCT/UNIONs.
- if an ENUM[64] is named, a ENUM forward representation (an ENUM
  with no values) of the same size is used.
- in all other cases, the type is added to the new split BTF.

Avoiding struct/union/enum/enum64 expansion is important to keep the
distilled base BTF representation to a minimum size.

When successful, new representations of the distilled base BTF and new
split BTF that refers to it are returned.  Both need to be freed by the
caller.

So to take a simple example, with split BTF with a type referring
to "struct sk_buff", we will generate distilled base BTF with a
0-member STRUCT sk_buff of the appropriate size, and the split BTF
will refer to it instead.

Tools like pahole can utilize such split BTF to populate the .BTF
section (split BTF) and an additional .BTF.base section.  Then
when the split BTF is loaded, the distilled base BTF can be used
to relocate split BTF to reference the current (and possibly changed)
base BTF.

So for example if "struct sk_buff" was id 502 when the split BTF was
originally generated,  we can use the distilled base BTF to see that
id 502 refers to a "struct sk_buff" and replace instances of id 502
with the current (relocated) base BTF sk_buff type id.

Distilled base BTF is small; when building a kernel with all modules
using distilled base BTF as a test, overall module size grew by only
5.3Mb total across ~2700 modules.

Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/bpf/20240613095014.357981-2-alan.maguire@oracle.com
2024-06-27 10:01:42 -07:00
Vadim Fedorenko
0a66859bf1 bpf: Add CHECKSUM_COMPLETE to bpf test progs
Add special flag to validate that TC BPF program properly updates
checksum information in skb.

Signed-off-by: Vadim Fedorenko <vadfed@meta.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Jakub Kicinski <kuba@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20240606145851.229116-1-vadfed@meta.com
2024-06-27 10:01:42 -07:00
Mykyta Yatsenko
be998aa3d4 libbpf: Auto-attach struct_ops BPF maps in BPF skeleton
Similarly to `bpf_program`, support `bpf_map` automatic attachment in
`bpf_object__attach_skeleton`. Currently only struct_ops maps could be
attached.

On bpftool side, code-generate links in skeleton struct for struct_ops maps.
Similarly to `bpf_program_skeleton`, set links in `bpf_map_skeleton`.

On libbpf side, extend `bpf_map` with new `autoattach` field to support
enabling or disabling autoattach functionality, introducing
getter/setter for this field.

`bpf_object__(attach|detach)_skeleton` is extended with
attaching/detaching struct_ops maps logic.

Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240605175135.117127-1-yatsenko@meta.com
2024-06-27 10:01:42 -07:00
Andrii Nakryiko
78c78e90cd libbpf: Remove callback-based type/string BTF field visitor helpers
Now that all libbpf/bpftool code switched to btf_field_iter, remove
btf_type_visit_type_ids() and btf_type_visit_str_offs() callback-based
helpers as not needed anymore.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Tested-by: Alan Maguire <alan.maguire@oracle.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/bpf/20240605001629.4061937-6-andrii@kernel.org
2024-06-27 10:01:42 -07:00
Andrii Nakryiko
dd19c7ef77 libbpf: Make use of BTF field iterator in BTF handling code
Use new BTF field iterator logic to replace all the callback-based
visitor calls. There is still a .BTF.ext callback-based visitor APIs
that should be converted, which will happens as a follow up.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Tested-by: Alan Maguire <alan.maguire@oracle.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/bpf/20240605001629.4061937-4-andrii@kernel.org
2024-06-27 10:01:42 -07:00
Andrii Nakryiko
13182b94f3 libbpf: Make use of BTF field iterator in BPF linker code
Switch all BPF linker code dealing with iterating BTF type ID and string
offset fields to new btf_field_iter facilities.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Tested-by: Alan Maguire <alan.maguire@oracle.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/bpf/20240605001629.4061937-3-andrii@kernel.org
2024-06-27 10:01:42 -07:00
Andrii Nakryiko
cece3242fb libbpf: Add BTF field iterator
Implement iterator-based type ID and string offset BTF field iterator.
This is used extensively in BTF-handling code and BPF linker code for
various sanity checks, rewriting IDs/offsets, etc. Currently this is
implemented as visitor pattern calling custom callbacks, which makes the
logic (especially in simple cases) unnecessarily obscure and harder to
follow.

Having equivalent functionality using iterator pattern makes for simpler
to understand and maintain code. As we add more code for BTF processing
logic in libbpf, it's best to switch to iterator pattern before adding
more callback-based code.

The idea for iterator-based implementation is to record offsets of
necessary fields within fixed btf_type parts (which should be iterated
just once), and, for kinds that have multiple members (based on vlen
field), record where in each member necessary fields are located.

Generic iteration code then just keeps track of last offset that was
returned and handles N members correctly. Return type is just u32
pointer, where NULL is returned when all relevant fields were already
iterated.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Tested-by: Alan Maguire <alan.maguire@oracle.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/bpf/20240605001629.4061937-2-andrii@kernel.org
2024-06-27 10:01:42 -07:00
Andrii Nakryiko
42065ea662 ci: make pahole-staging workflow manually triggerable
Allow to manually trigger pahole-staging workflow.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2024-06-06 14:39:09 -07:00
Andrii Nakryiko
764d19da07 ci: revert switching to ubuntu-latest for pahole-staging workflow
pahole staging workflow is using the same old VM image as BPF selftests
stages. It doesn't have recent enough glibc, so we can't yet switch to
newer Ubuntu, unfortunately.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
2024-06-06 14:32:23 -07:00
27 changed files with 3966 additions and 1609 deletions

View File

@@ -12,6 +12,9 @@ inputs:
description: 'where is vmlinux file'
required: true
default: '${{ github.workspace }}/vmlinux'
llvm-version:
description: 'llvm version'
required: true
runs:
using: "composite"
@@ -28,4 +31,6 @@ runs:
export REPO_ROOT="${{ github.workspace }}"
export REPO_PATH="${{ inputs.repo-path }}"
export VMLINUX_BTF="${{ inputs.vmlinux }}"
export LLVM_VERSION="${{ inputs.llvm-version }}"
${{ github.action_path }}/build_selftests.sh

View File

@@ -10,22 +10,21 @@ foldable start prepare_selftests "Building selftests"
LIBBPF_PATH="${REPO_ROOT}"
llvm_default_version() {
echo "16"
}
llvm_latest_version() {
echo "17"
echo "19"
}
LLVM_VERSION=$(llvm_default_version)
if [[ "${LLVM_VERSION}" == $(llvm_latest_version) ]]; then
REPO_DISTRO_SUFFIX=""
else
REPO_DISTRO_SUFFIX="-${LLVM_VERSION}"
fi
echo "deb https://apt.llvm.org/focal/ llvm-toolchain-focal${REPO_DISTRO_SUFFIX} main" \
DISTRIB_CODENAME="noble"
test -f /etc/lsb-release && . /etc/lsb-release
echo "${DISTRIB_CODENAME}"
echo "deb https://apt.llvm.org/${DISTRIB_CODENAME}/ llvm-toolchain-${DISTRIB_CODENAME}${REPO_DISTRO_SUFFIX} main" \
| sudo tee /etc/apt/sources.list.d/llvm.list
PREPARE_SELFTESTS_SCRIPT=${THISDIR}/prepare_selftests-${KERNEL}.sh

File diff suppressed because it is too large Load Diff

View File

@@ -13,6 +13,10 @@ inputs:
description: 'pahole rev or master'
required: true
default: 'master'
llvm-version:
description: 'llvm version'
required: false
default: '17'
runs:
using: "composite"
steps:
@@ -38,6 +42,7 @@ runs:
with:
pahole: ${{ inputs.pahole }}
arch: ${{ inputs.arch }}
llvm-version: ${{ inputs.llvm-version }}
# 1. download CHECKPOINT kernel source
- name: Get checkpoint commit
shell: bash
@@ -93,6 +98,7 @@ runs:
with:
repo-path: '.kernel'
kernel: ${{ inputs.kernel }}
llvm-version: ${{ inputs.llvm-version }}
# 4. prepare rootfs
- name: prepare rootfs
uses: libbpf/ci/prepare-rootfs@main

View File

@@ -1,13 +1,13 @@
name: pahole-staging
on:
workflow_dispatch:
schedule:
- cron: '0 18 * * *'
jobs:
vmtest:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
name: Kernel LATEST + staging pahole
env:
STAGING: tmp.master

View File

@@ -13,22 +13,22 @@ concurrency:
jobs:
vmtest:
runs-on: ${{ matrix.runs_on }}
name: Kernel ${{ matrix.kernel }} on ${{ matrix.runs_on }} + selftests
name: Kernel ${{ matrix.kernel }} on ${{ matrix.arch }} + selftests
strategy:
fail-fast: false
matrix:
include:
- kernel: 'LATEST'
runs_on: ubuntu-20.04
runs_on: ubuntu-24.04
arch: 'x86_64'
- kernel: '5.5.0'
runs_on: ubuntu-20.04
runs_on: ubuntu-24.04
arch: 'x86_64'
- kernel: '4.9.0'
runs_on: ubuntu-20.04
runs_on: ubuntu-24.04
arch: 'x86_64'
- kernel: 'LATEST'
runs_on: s390x
runs_on: ["s390x", "docker-noble-main"]
arch: 's390x'
steps:
- uses: actions/checkout@v4

View File

@@ -8,6 +8,7 @@ Dan Carpenter <error27@gmail.com> <dan.carpenter@oracle.com>
Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
Herbert Xu <herbert@gondor.apana.org.au>
Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
Kees Cook <kees@kernel.org> <keescook@chromium.org>
Leo Yan <leo.yan@linux.dev> <leo.yan@linaro.org>
Mark Starovoytov <mstarovo@pm.me> <mstarovoitov@marvell.com>
Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@mellanox.com>
@@ -15,4 +16,6 @@ Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@nvidia.com>
Puranjay Mohan <puranjay@kernel.org> <puranjay12@gmail.com>
Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
Vadim Fedorenko <vadim.fedorenko@linux.dev> <vadfed@meta.com>
Vadim Fedorenko <vadim.fedorenko@linux.dev> <vfedorenko@novek.ru>

View File

@@ -1 +1 @@
62da3acd28955e7299babebdfcb14243b789e773
b408473ea01b2e499d23503e2bf898416da9d7ac

View File

@@ -1 +1 @@
531876c80004ecff7bfdbd8ba6c6b48835ef5e22
2ad6d23f465a4f851e3bcf6d74c315ce7b2c205b

View File

@@ -12,3 +12,4 @@ xdp_bonding/xdp_bonding_features # started failing after net merge from 359e
tc_redirect/tc_redirect_dtime # uapi breakage after net-next commit 885c36e59f46 ("net: Re-use and set mono_delivery_time bit for userspace tstamp packets")
migrate_reuseport/IPv4 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation
migrate_reuseport/IPv6 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation
verify_pkcs7_sig # keeps failing

View File

@@ -67,12 +67,14 @@ local_configs_path=${PROJECT_NAME}/vmtest/configs
DENYLIST=$(read_lists \
"$configs_path/DENYLIST" \
"$configs_path/DENYLIST.${ARCH}" \
"$local_configs_path/DENYLIST" \
"$local_configs_path/DENYLIST-${KERNEL}" \
"$local_configs_path/DENYLIST-${KERNEL}.${ARCH}" \
)
ALLOWLIST=$(read_lists \
"$configs_path/ALLOWLIST" \
"$configs_path/ALLOWLIST.${ARCH}" \
"$local_configs_path/ALLOWLIST" \
"$local_configs_path/ALLOWLIST-${KERNEL}" \
"$local_configs_path/ALLOWLIST-${KERNEL}.${ARCH}" \
)

View File

@@ -1425,6 +1425,8 @@ enum {
#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
/* If set, XDP frames will be transmitted after processing */
#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
/* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */
#define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2)
/* type for BPF_ENABLE_STATS */
enum bpf_stats_type {
@@ -7510,4 +7512,13 @@ struct bpf_iter_num {
__u64 __opaque[1];
} __attribute__((aligned(8)));
/*
* Flags to control BPF kfunc behaviour.
* - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective
* helper documentation for details.)
*/
enum bpf_kfunc_flags {
BPF_F_PAD_ZEROS = (1ULL << 0),
};
#endif /* _UAPI__LINUX_BPF_H__ */

View File

@@ -41,6 +41,10 @@
*/
#define XDP_UMEM_TX_SW_CSUM (1 << 1)
/* Request to reserve tx_metadata_len bytes of per-chunk metadata.
*/
#define XDP_UMEM_TX_METADATA_LEN (1 << 2)
struct sockaddr_xdp {
__u16 sxdp_family;
__u16 sxdp_flags;

View File

@@ -1349,12 +1349,14 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
/* 5-0x7 available */
#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */
/* 0x7 available */
#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */

View File

@@ -55,7 +55,7 @@ STATIC_OBJDIR := $(OBJDIR)/staticobjs
OBJS := bpf.o btf.o libbpf.o libbpf_errno.o netlink.o \
nlattr.o str_error.o libbpf_probes.o bpf_prog_linfo.o \
btf_dump.o hashmap.o ringbuf.o strset.o linker.o gen_loader.o \
relo_core.o usdt.o zip.o elf.o features.o
relo_core.o usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o
SHARED_OBJS := $(addprefix $(SHARED_OBJDIR)/,$(OBJS))
STATIC_OBJS := $(addprefix $(STATIC_OBJDIR)/,$(OBJS))
@@ -119,13 +119,13 @@ $(OBJDIR)/libbpf.so.$(LIBBPF_VERSION): $(SHARED_OBJS)
-Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
$^ $(ALL_LDFLAGS) -o $@
$(OBJDIR)/libbpf.pc: force
$(OBJDIR)/libbpf.pc: force | $(OBJDIR)
$(Q)sed -e "s|@PREFIX@|$(PREFIX)|" \
-e "s|@LIBDIR@|$(LIBDIR_PC)|" \
-e "s|@VERSION@|$(LIBBPF_VERSION)|" \
< libbpf.pc.template > $@
$(STATIC_OBJDIR) $(SHARED_OBJDIR):
$(OBJDIR) $(STATIC_OBJDIR) $(SHARED_OBJDIR):
$(call msg,MKDIR,$@)
$(Q)mkdir -p $@

728
src/btf.c
View File

@@ -116,6 +116,9 @@ struct btf {
/* whether strings are already deduplicated */
bool strs_deduped;
/* whether base_btf should be freed in btf_free for this instance */
bool owns_base;
/* BTF object FD, if loaded into kernel */
int fd;
@@ -598,7 +601,7 @@ static int btf_sanity_check(const struct btf *btf)
__u32 i, n = btf__type_cnt(btf);
int err;
for (i = 1; i < n; i++) {
for (i = btf->start_id; i < n; i++) {
t = btf_type_by_id(btf, i);
err = btf_validate_type(btf, t, i);
if (err)
@@ -969,6 +972,8 @@ void btf__free(struct btf *btf)
free(btf->raw_data);
free(btf->raw_data_swapped);
free(btf->type_offs);
if (btf->owns_base)
btf__free(btf->base_btf);
free(btf);
}
@@ -991,6 +996,7 @@ static struct btf *btf_new_empty(struct btf *base_btf)
btf->base_btf = base_btf;
btf->start_id = btf__type_cnt(base_btf);
btf->start_str_off = base_btf->hdr->str_len;
btf->swapped_endian = base_btf->swapped_endian;
}
/* +1 for empty string at offset 0 */
@@ -1084,16 +1090,86 @@ struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
return libbpf_ptr(btf_new(data, size, base_btf));
}
struct btf_elf_secs {
Elf_Data *btf_data;
Elf_Data *btf_ext_data;
Elf_Data *btf_base_data;
};
static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs)
{
Elf_Scn *scn = NULL;
Elf_Data *data;
GElf_Ehdr ehdr;
size_t shstrndx;
int idx = 0;
if (!gelf_getehdr(elf, &ehdr)) {
pr_warn("failed to get EHDR from %s\n", path);
goto err;
}
if (elf_getshdrstrndx(elf, &shstrndx)) {
pr_warn("failed to get section names section index for %s\n",
path);
goto err;
}
if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
pr_warn("failed to get e_shstrndx from %s\n", path);
goto err;
}
while ((scn = elf_nextscn(elf, scn)) != NULL) {
Elf_Data **field;
GElf_Shdr sh;
char *name;
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
pr_warn("failed to get section(%d) header from %s\n",
idx, path);
goto err;
}
name = elf_strptr(elf, shstrndx, sh.sh_name);
if (!name) {
pr_warn("failed to get section(%d) name from %s\n",
idx, path);
goto err;
}
if (strcmp(name, BTF_ELF_SEC) == 0)
field = &secs->btf_data;
else if (strcmp(name, BTF_EXT_ELF_SEC) == 0)
field = &secs->btf_ext_data;
else if (strcmp(name, BTF_BASE_ELF_SEC) == 0)
field = &secs->btf_base_data;
else
continue;
data = elf_getdata(scn, 0);
if (!data) {
pr_warn("failed to get section(%d, %s) data from %s\n",
idx, name, path);
goto err;
}
*field = data;
}
return 0;
err:
return -LIBBPF_ERRNO__FORMAT;
}
static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
struct btf_ext **btf_ext)
{
Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
int err = 0, fd = -1, idx = 0;
struct btf_elf_secs secs = {};
struct btf *dist_base_btf = NULL;
struct btf *btf = NULL;
Elf_Scn *scn = NULL;
int err = 0, fd = -1;
Elf *elf = NULL;
GElf_Ehdr ehdr;
size_t shstrndx;
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("failed to init libelf for %s\n", path);
@@ -1107,73 +1183,48 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
return ERR_PTR(err);
}
err = -LIBBPF_ERRNO__FORMAT;
elf = elf_begin(fd, ELF_C_READ, NULL);
if (!elf) {
pr_warn("failed to open %s as ELF file\n", path);
goto done;
}
if (!gelf_getehdr(elf, &ehdr)) {
pr_warn("failed to get EHDR from %s\n", path);
err = btf_find_elf_sections(elf, path, &secs);
if (err)
goto done;
}
if (elf_getshdrstrndx(elf, &shstrndx)) {
pr_warn("failed to get section names section index for %s\n",
path);
goto done;
}
if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
pr_warn("failed to get e_shstrndx from %s\n", path);
goto done;
}
while ((scn = elf_nextscn(elf, scn)) != NULL) {
GElf_Shdr sh;
char *name;
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
pr_warn("failed to get section(%d) header from %s\n",
idx, path);
goto done;
}
name = elf_strptr(elf, shstrndx, sh.sh_name);
if (!name) {
pr_warn("failed to get section(%d) name from %s\n",
idx, path);
goto done;
}
if (strcmp(name, BTF_ELF_SEC) == 0) {
btf_data = elf_getdata(scn, 0);
if (!btf_data) {
pr_warn("failed to get section(%d, %s) data from %s\n",
idx, name, path);
goto done;
}
continue;
} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
btf_ext_data = elf_getdata(scn, 0);
if (!btf_ext_data) {
pr_warn("failed to get section(%d, %s) data from %s\n",
idx, name, path);
goto done;
}
continue;
}
}
if (!btf_data) {
if (!secs.btf_data) {
pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
err = -ENODATA;
goto done;
}
btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
err = libbpf_get_error(btf);
if (err)
if (secs.btf_base_data) {
dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size,
NULL);
if (IS_ERR(dist_base_btf)) {
err = PTR_ERR(dist_base_btf);
dist_base_btf = NULL;
goto done;
}
}
btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size,
dist_base_btf ?: base_btf);
if (IS_ERR(btf)) {
err = PTR_ERR(btf);
goto done;
}
if (dist_base_btf && base_btf) {
err = btf__relocate(btf, base_btf);
if (err)
goto done;
btf__free(dist_base_btf);
dist_base_btf = NULL;
}
if (dist_base_btf)
btf->owns_base = true;
switch (gelf_getclass(elf)) {
case ELFCLASS32:
@@ -1187,11 +1238,12 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
break;
}
if (btf_ext && btf_ext_data) {
*btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
err = libbpf_get_error(*btf_ext);
if (err)
if (btf_ext && secs.btf_ext_data) {
*btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size);
if (IS_ERR(*btf_ext)) {
err = PTR_ERR(*btf_ext);
goto done;
}
} else if (btf_ext) {
*btf_ext = NULL;
}
@@ -1205,6 +1257,7 @@ done:
if (btf_ext)
btf_ext__free(*btf_ext);
btf__free(dist_base_btf);
btf__free(btf);
return ERR_PTR(err);
@@ -1739,9 +1792,8 @@ struct btf_pipe {
struct hashmap *str_off_map; /* map string offsets from src to dst */
};
static int btf_rewrite_str(__u32 *str_off, void *ctx)
static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
{
struct btf_pipe *p = ctx;
long mapped_off;
int off, err;
@@ -1771,10 +1823,11 @@ static int btf_rewrite_str(__u32 *str_off, void *ctx)
return 0;
}
int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
{
struct btf_pipe p = { .src = src_btf, .dst = btf };
struct btf_field_iter it;
struct btf_type *t;
__u32 *str_off;
int sz, err;
sz = btf_type_size(src_type);
@@ -1782,35 +1835,33 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t
return libbpf_err(sz);
/* deconstruct BTF, if necessary, and invalidate raw_data */
if (btf_ensure_modifiable(btf))
if (btf_ensure_modifiable(p->dst))
return libbpf_err(-ENOMEM);
t = btf_add_type_mem(btf, sz);
t = btf_add_type_mem(p->dst, sz);
if (!t)
return libbpf_err(-ENOMEM);
memcpy(t, src_type, sz);
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
return libbpf_err(err);
return btf_commit_type(btf, sz);
while ((str_off = btf_field_iter_next(&it))) {
err = btf_rewrite_str(p, str_off);
if (err)
return libbpf_err(err);
}
return btf_commit_type(p->dst, sz);
}
static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
{
struct btf *btf = ctx;
struct btf_pipe p = { .src = src_btf, .dst = btf };
if (!*type_id) /* nothing to do for VOID references */
return 0;
/* we haven't updated btf's type count yet, so
* btf->start_id + btf->nr_types - 1 is the type ID offset we should
* add to all newly added BTF types
*/
*type_id += btf->start_id + btf->nr_types - 1;
return 0;
return btf_add_type(&p, src_type);
}
static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
@@ -1858,6 +1909,9 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
memcpy(t, src_btf->types_data, data_sz);
for (i = 0; i < cnt; i++) {
struct btf_field_iter it;
__u32 *type_id, *str_off;
sz = btf_type_size(t);
if (sz < 0) {
/* unlikely, has to be corrupted src_btf */
@@ -1869,14 +1923,30 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
*off = t - btf->types_data;
/* add, dedup, and remap strings referenced by this BTF type */
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
goto err_out;
while ((str_off = btf_field_iter_next(&it))) {
err = btf_rewrite_str(&p, str_off);
if (err)
goto err_out;
}
/* remap all type IDs referenced from this BTF type */
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (err)
goto err_out;
/* remap all type IDs referenced from this BTF type */
err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
if (err)
goto err_out;
while ((type_id = btf_field_iter_next(&it))) {
if (!*type_id) /* nothing to do for VOID references */
continue;
/* we haven't updated btf's type count yet, so
* btf->start_id + btf->nr_types - 1 is the type ID offset we should
* add to all newly added BTF types
*/
*type_id += btf->start_id + btf->nr_types - 1;
}
/* go to next type data and type offset index entry */
t += sz;
@@ -3453,11 +3523,19 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *
int i, r;
for (i = 0; i < d->btf->nr_types; i++) {
struct btf_field_iter it;
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
__u32 *str_off;
r = btf_type_visit_str_offs(t, fn, ctx);
r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (r)
return r;
while ((str_off = btf_field_iter_next(&it))) {
r = fn(str_off, ctx);
if (r)
return r;
}
}
if (!d->btf_ext)
@@ -4919,10 +4997,23 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
for (i = 0; i < d->btf->nr_types; i++) {
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
struct btf_field_iter it;
__u32 *type_id;
r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (r)
return r;
while ((type_id = btf_field_iter_next(&it))) {
__u32 resolved_id, new_id;
resolved_id = resolve_type_id(d, *type_id);
new_id = d->hypot_map[resolved_id];
if (new_id > BTF_MAX_NR_TYPES)
return -EINVAL;
*type_id = new_id;
}
}
if (!d->btf_ext)
@@ -5003,136 +5094,6 @@ struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_bt
return btf__parse_split(path, vmlinux_btf);
}
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
{
int i, n, err;
switch (btf_kind(t)) {
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
return 0;
case BTF_KIND_FWD:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
return visit(&t->type, ctx);
case BTF_KIND_ARRAY: {
struct btf_array *a = btf_array(t);
err = visit(&a->type, ctx);
err = err ?: visit(&a->index_type, ctx);
return err;
}
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
struct btf_member *m = btf_members(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->type, ctx);
if (err)
return err;
}
return 0;
}
case BTF_KIND_FUNC_PROTO: {
struct btf_param *m = btf_params(t);
err = visit(&t->type, ctx);
if (err)
return err;
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->type, ctx);
if (err)
return err;
}
return 0;
}
case BTF_KIND_DATASEC: {
struct btf_var_secinfo *m = btf_var_secinfos(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->type, ctx);
if (err)
return err;
}
return 0;
}
default:
return -EINVAL;
}
}
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
{
int i, n, err;
err = visit(&t->name_off, ctx);
if (err)
return err;
switch (btf_kind(t)) {
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
struct btf_member *m = btf_members(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->name_off, ctx);
if (err)
return err;
}
break;
}
case BTF_KIND_ENUM: {
struct btf_enum *m = btf_enum(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->name_off, ctx);
if (err)
return err;
}
break;
}
case BTF_KIND_ENUM64: {
struct btf_enum64 *m = btf_enum64(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->name_off, ctx);
if (err)
return err;
}
break;
}
case BTF_KIND_FUNC_PROTO: {
struct btf_param *m = btf_params(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->name_off, ctx);
if (err)
return err;
}
break;
}
default:
break;
}
return 0;
}
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
{
const struct btf_ext_info *seg;
@@ -5212,3 +5173,328 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void
return 0;
}
struct btf_distill {
struct btf_pipe pipe;
int *id_map;
unsigned int split_start_id;
unsigned int split_start_str;
int diff_id;
};
static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i)
{
struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i);
struct btf_field_iter it;
__u32 *id;
int err;
err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS);
if (err)
return err;
while ((id = btf_field_iter_next(&it))) {
struct btf_type *base_t;
if (!*id)
continue;
/* split BTF id, not needed */
if (*id >= dist->split_start_id)
continue;
/* already added ? */
if (dist->id_map[*id] > 0)
continue;
/* only a subset of base BTF types should be referenced from
* split BTF; ensure nothing unexpected is referenced.
*/
base_t = btf_type_by_id(dist->pipe.src, *id);
switch (btf_kind(base_t)) {
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_FWD:
case BTF_KIND_ARRAY:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_TYPEDEF:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
case BTF_KIND_PTR:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_VOLATILE:
case BTF_KIND_FUNC_PROTO:
case BTF_KIND_TYPE_TAG:
dist->id_map[*id] = *id;
break;
default:
pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n",
*id, btf_kind(base_t));
return -EINVAL;
}
/* If a base type is used, ensure types it refers to are
* marked as used also; so for example if we find a PTR to INT
* we need both the PTR and INT.
*
* The only exception is named struct/unions, since distilled
* base BTF composite types have no members.
*/
if (btf_is_composite(base_t) && base_t->name_off)
continue;
err = btf_add_distilled_type_ids(dist, *id);
if (err)
return err;
}
return 0;
}
static int btf_add_distilled_types(struct btf_distill *dist)
{
bool adding_to_base = dist->pipe.dst->start_id == 1;
int id = btf__type_cnt(dist->pipe.dst);
struct btf_type *t;
int i, err = 0;
/* Add types for each of the required references to either distilled
* base or split BTF, depending on type characteristics.
*/
for (i = 1; i < dist->split_start_id; i++) {
const char *name;
int kind;
if (!dist->id_map[i])
continue;
t = btf_type_by_id(dist->pipe.src, i);
kind = btf_kind(t);
name = btf__name_by_offset(dist->pipe.src, t->name_off);
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_FWD:
/* Named int, float, fwd are added to base. */
if (!adding_to_base)
continue;
err = btf_add_type(&dist->pipe, t);
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
/* Named struct/union are added to base as 0-vlen
* struct/union of same size. Anonymous struct/unions
* are added to split BTF as-is.
*/
if (adding_to_base) {
if (!t->name_off)
continue;
err = btf_add_composite(dist->pipe.dst, kind, name, t->size);
} else {
if (t->name_off)
continue;
err = btf_add_type(&dist->pipe, t);
}
break;
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
/* Named enum[64]s are added to base as a sized
* enum; relocation will match with appropriately-named
* and sized enum or enum64.
*
* Anonymous enums are added to split BTF as-is.
*/
if (adding_to_base) {
if (!t->name_off)
continue;
err = btf__add_enum(dist->pipe.dst, name, t->size);
} else {
if (t->name_off)
continue;
err = btf_add_type(&dist->pipe, t);
}
break;
case BTF_KIND_ARRAY:
case BTF_KIND_TYPEDEF:
case BTF_KIND_PTR:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_VOLATILE:
case BTF_KIND_FUNC_PROTO:
case BTF_KIND_TYPE_TAG:
/* All other types are added to split BTF. */
if (adding_to_base)
continue;
err = btf_add_type(&dist->pipe, t);
break;
default:
pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n",
name, i, kind);
return -EINVAL;
}
if (err < 0)
break;
dist->id_map[i] = id++;
}
return err;
}
/* Split BTF ids without a mapping will be shifted downwards since distilled
* base BTF is smaller than the original base BTF. For those that have a
* mapping (either to base or updated split BTF), update the id based on
* that mapping.
*/
static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i)
{
struct btf_type *t = btf_type_by_id(dist->pipe.dst, i);
struct btf_field_iter it;
__u32 *id;
int err;
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (err)
return err;
while ((id = btf_field_iter_next(&it))) {
if (dist->id_map[*id])
*id = dist->id_map[*id];
else if (*id >= dist->split_start_id)
*id -= dist->diff_id;
}
return 0;
}
/* Create updated split BTF with distilled base BTF; distilled base BTF
* consists of BTF information required to clarify the types that split
* BTF refers to, omitting unneeded details. Specifically it will contain
* base types and memberless definitions of named structs, unions and enumerated
* types. Associated reference types like pointers, arrays and anonymous
* structs, unions and enumerated types will be added to split BTF.
* Size is recorded for named struct/unions to help guide matching to the
* target base BTF during later relocation.
*
* The only case where structs, unions or enumerated types are fully represented
* is when they are anonymous; in such cases, the anonymous type is added to
* split BTF in full.
*
* We return newly-created split BTF where the split BTF refers to a newly-created
* distilled base BTF. Both must be freed separately by the caller.
*/
int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
struct btf **new_split_btf)
{
struct btf *new_base = NULL, *new_split = NULL;
const struct btf *old_base;
unsigned int n = btf__type_cnt(src_btf);
struct btf_distill dist = {};
struct btf_type *t;
int i, err = 0;
/* src BTF must be split BTF. */
old_base = btf__base_btf(src_btf);
if (!new_base_btf || !new_split_btf || !old_base)
return libbpf_err(-EINVAL);
new_base = btf__new_empty();
if (!new_base)
return libbpf_err(-ENOMEM);
btf__set_endianness(new_base, btf__endianness(src_btf));
dist.id_map = calloc(n, sizeof(*dist.id_map));
if (!dist.id_map) {
err = -ENOMEM;
goto done;
}
dist.pipe.src = src_btf;
dist.pipe.dst = new_base;
dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
if (IS_ERR(dist.pipe.str_off_map)) {
err = -ENOMEM;
goto done;
}
dist.split_start_id = btf__type_cnt(old_base);
dist.split_start_str = old_base->hdr->str_len;
/* Pass over src split BTF; generate the list of base BTF type ids it
* references; these will constitute our distilled BTF set to be
* distributed over base and split BTF as appropriate.
*/
for (i = src_btf->start_id; i < n; i++) {
err = btf_add_distilled_type_ids(&dist, i);
if (err < 0)
goto done;
}
/* Next add types for each of the required references to base BTF and split BTF
* in turn.
*/
err = btf_add_distilled_types(&dist);
if (err < 0)
goto done;
/* Create new split BTF with distilled base BTF as its base; the final
* state is split BTF with distilled base BTF that represents enough
* about its base references to allow it to be relocated with the base
* BTF available.
*/
new_split = btf__new_empty_split(new_base);
if (!new_split) {
err = -errno;
goto done;
}
dist.pipe.dst = new_split;
/* First add all split types */
for (i = src_btf->start_id; i < n; i++) {
t = btf_type_by_id(src_btf, i);
err = btf_add_type(&dist.pipe, t);
if (err < 0)
goto done;
}
/* Now add distilled types to split BTF that are not added to base. */
err = btf_add_distilled_types(&dist);
if (err < 0)
goto done;
/* All split BTF ids will be shifted downwards since there are less base
* BTF ids in distilled base BTF.
*/
dist.diff_id = dist.split_start_id - btf__type_cnt(new_base);
n = btf__type_cnt(new_split);
/* Now update base/split BTF ids. */
for (i = 1; i < n; i++) {
err = btf_update_distilled_type_ids(&dist, i);
if (err < 0)
break;
}
done:
free(dist.id_map);
hashmap__free(dist.pipe.str_off_map);
if (err) {
btf__free(new_split);
btf__free(new_base);
return libbpf_err(err);
}
*new_base_btf = new_base;
*new_split_btf = new_split;
return 0;
}
const struct btf_header *btf_header(const struct btf *btf)
{
return btf->hdr;
}
void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
{
btf->base_btf = (struct btf *)base_btf;
btf->start_id = btf__type_cnt(base_btf);
btf->start_str_off = base_btf->hdr->str_len;
}
int btf__relocate(struct btf *btf, const struct btf *base_btf)
{
int err = btf_relocate(btf, base_btf, NULL);
if (!err)
btf->owns_base = false;
return libbpf_err(err);
}

View File

@@ -18,6 +18,7 @@ extern "C" {
#define BTF_ELF_SEC ".BTF"
#define BTF_EXT_ELF_SEC ".BTF.ext"
#define BTF_BASE_ELF_SEC ".BTF.base"
#define MAPS_ELF_SEC ".maps"
struct btf;
@@ -107,6 +108,27 @@ LIBBPF_API struct btf *btf__new_empty(void);
*/
LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
/**
* @brief **btf__distill_base()** creates new versions of the split BTF
* *src_btf* and its base BTF. The new base BTF will only contain the types
* needed to improve robustness of the split BTF to small changes in base BTF.
* When that split BTF is loaded against a (possibly changed) base, this
* distilled base BTF will help update references to that (possibly changed)
* base BTF.
*
* Both the new split and its associated new base BTF must be freed by
* the caller.
*
* If successful, 0 is returned and **new_base_btf** and **new_split_btf**
* will point at new base/split BTF. Both the new split and its associated
* new base BTF must be freed by the caller.
*
* A negative value is returned on error and the thread-local `errno` variable
* is set to the error code as well.
*/
LIBBPF_API int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
struct btf **new_split_btf);
LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf);
LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext);
@@ -231,6 +253,20 @@ struct btf_dedup_opts {
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
/**
* @brief **btf__relocate()** will check the split BTF *btf* for references
* to base BTF kinds, and verify those references are compatible with
* *base_btf*; if they are, *btf* is adjusted such that is re-parented to
* *base_btf* and type ids and strings are adjusted to accommodate this.
*
* If successful, 0 is returned and **btf** now has **base_btf** as its
* base.
*
* A negative value is returned on error and the thread-local `errno` variable
* is set to the error code as well.
*/
LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf);
struct btf_dump;
struct btf_dump_opts {

View File

@@ -1559,10 +1559,12 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
* Clang for BPF target generates func_proto with no
* args as a func_proto with a single void arg (e.g.,
* `int (*f)(void)` vs just `int (*f)()`). We are
* going to pretend there are no args for such case.
* going to emit valid empty args (void) syntax for
* such case. Similarly and conveniently, valid
* no args case can be special-cased here as well.
*/
if (vlen == 1 && p->type == 0) {
btf_dump_printf(d, ")");
if (vlen == 0 || (vlen == 1 && p->type == 0)) {
btf_dump_printf(d, "void)");
return;
}

177
src/btf_iter.c Normal file
View File

@@ -0,0 +1,177 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2021 Facebook */
/* Copyright (c) 2024, Oracle and/or its affiliates. */
#ifdef __KERNEL__
#include <linux/bpf.h>
#include <linux/btf.h>
#define btf_var_secinfos(t) (struct btf_var_secinfo *)btf_type_var_secinfo(t)
#else
#include "btf.h"
#include "libbpf_internal.h"
#endif
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
enum btf_field_iter_kind iter_kind)
{
it->p = NULL;
it->m_idx = -1;
it->off_idx = 0;
it->vlen = 0;
switch (iter_kind) {
case BTF_FIELD_ITER_IDS:
switch (btf_kind(t)) {
case BTF_KIND_UNKN:
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
it->desc = (struct btf_field_desc) {};
break;
case BTF_KIND_FWD:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} };
break;
case BTF_KIND_ARRAY:
it->desc = (struct btf_field_desc) {
2, {sizeof(struct btf_type) + offsetof(struct btf_array, type),
sizeof(struct btf_type) + offsetof(struct btf_array, index_type)}
};
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
it->desc = (struct btf_field_desc) {
0, {},
sizeof(struct btf_member),
1, {offsetof(struct btf_member, type)}
};
break;
case BTF_KIND_FUNC_PROTO:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, type)},
sizeof(struct btf_param),
1, {offsetof(struct btf_param, type)}
};
break;
case BTF_KIND_DATASEC:
it->desc = (struct btf_field_desc) {
0, {},
sizeof(struct btf_var_secinfo),
1, {offsetof(struct btf_var_secinfo, type)}
};
break;
default:
return -EINVAL;
}
break;
case BTF_FIELD_ITER_STRS:
switch (btf_kind(t)) {
case BTF_KIND_UNKN:
it->desc = (struct btf_field_desc) {};
break;
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_FWD:
case BTF_KIND_ARRAY:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
case BTF_KIND_DATASEC:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)}
};
break;
case BTF_KIND_ENUM:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_enum),
1, {offsetof(struct btf_enum, name_off)}
};
break;
case BTF_KIND_ENUM64:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_enum64),
1, {offsetof(struct btf_enum64, name_off)}
};
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_member),
1, {offsetof(struct btf_member, name_off)}
};
break;
case BTF_KIND_FUNC_PROTO:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_param),
1, {offsetof(struct btf_param, name_off)}
};
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
if (it->desc.m_sz)
it->vlen = btf_vlen(t);
it->p = t;
return 0;
}
__u32 *btf_field_iter_next(struct btf_field_iter *it)
{
if (!it->p)
return NULL;
if (it->m_idx < 0) {
if (it->off_idx < it->desc.t_off_cnt)
return it->p + it->desc.t_offs[it->off_idx++];
/* move to per-member iteration */
it->m_idx = 0;
it->p += sizeof(struct btf_type);
it->off_idx = 0;
}
/* if type doesn't have members, stop */
if (it->desc.m_sz == 0) {
it->p = NULL;
return NULL;
}
if (it->off_idx >= it->desc.m_off_cnt) {
/* exhausted this member's fields, go to the next member */
it->m_idx++;
it->p += it->desc.m_sz;
it->off_idx = 0;
}
if (it->m_idx < it->vlen)
return it->p + it->desc.m_offs[it->off_idx++];
it->p = NULL;
return NULL;
}

519
src/btf_relocate.c Normal file
View File

@@ -0,0 +1,519 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2024, Oracle and/or its affiliates. */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#ifdef __KERNEL__
#include <linux/bpf.h>
#include <linux/bsearch.h>
#include <linux/btf.h>
#include <linux/sort.h>
#include <linux/string.h>
#include <linux/bpf_verifier.h>
#define btf_type_by_id (struct btf_type *)btf_type_by_id
#define btf__type_cnt btf_nr_types
#define btf__base_btf btf_base_btf
#define btf__name_by_offset btf_name_by_offset
#define btf__str_by_offset btf_str_by_offset
#define btf_kflag btf_type_kflag
#define calloc(nmemb, sz) kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN)
#define free(ptr) kvfree(ptr)
#define qsort(base, num, sz, cmp) sort(base, num, sz, cmp, NULL)
#else
#include "btf.h"
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
#endif /* __KERNEL__ */
struct btf;
struct btf_relocate {
struct btf *btf;
const struct btf *base_btf;
const struct btf *dist_base_btf;
unsigned int nr_base_types;
unsigned int nr_split_types;
unsigned int nr_dist_base_types;
int dist_str_len;
int base_str_len;
__u32 *id_map;
__u32 *str_map;
};
/* Set temporarily in relocation id_map if distilled base struct/union is
* embedded in a split BTF struct/union; in such a case, size information must
* match between distilled base BTF and base BTF representation of type.
*/
#define BTF_IS_EMBEDDED ((__u32)-1)
/* <name, size, id> triple used in sorting/searching distilled base BTF. */
struct btf_name_info {
const char *name;
/* set when search requires a size match */
bool needs_size: 1;
unsigned int size: 31;
__u32 id;
};
static int btf_relocate_rewrite_type_id(struct btf_relocate *r, __u32 i)
{
struct btf_type *t = btf_type_by_id(r->btf, i);
struct btf_field_iter it;
__u32 *id;
int err;
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (err)
return err;
while ((id = btf_field_iter_next(&it)))
*id = r->id_map[*id];
return 0;
}
/* Simple string comparison used for sorting within BTF, since all distilled
* types are named. If strings match, and size is non-zero for both elements
* fall back to using size for ordering.
*/
static int cmp_btf_name_size(const void *n1, const void *n2)
{
const struct btf_name_info *ni1 = n1;
const struct btf_name_info *ni2 = n2;
int name_diff = strcmp(ni1->name, ni2->name);
if (!name_diff && ni1->needs_size && ni2->needs_size)
return ni2->size - ni1->size;
return name_diff;
}
/* Binary search with a small twist; find leftmost element that matches
* so that we can then iterate through all exact matches. So for example
* searching { "a", "bb", "bb", "c" } we would always match on the
* leftmost "bb".
*/
static struct btf_name_info *search_btf_name_size(struct btf_name_info *key,
struct btf_name_info *vals,
int nelems)
{
struct btf_name_info *ret = NULL;
int high = nelems - 1;
int low = 0;
while (low <= high) {
int mid = (low + high)/2;
struct btf_name_info *val = &vals[mid];
int diff = cmp_btf_name_size(key, val);
if (diff == 0)
ret = val;
/* even if found, keep searching for leftmost match */
if (diff <= 0)
high = mid - 1;
else
low = mid + 1;
}
return ret;
}
/* If a member of a split BTF struct/union refers to a base BTF
* struct/union, mark that struct/union id temporarily in the id_map
* with BTF_IS_EMBEDDED. Members can be const/restrict/volatile/typedef
* reference types, but if a pointer is encountered, the type is no longer
* considered embedded.
*/
static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i)
{
struct btf_type *t = btf_type_by_id(r->btf, i);
struct btf_field_iter it;
__u32 *id;
int err;
if (!btf_is_composite(t))
return 0;
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (err)
return err;
while ((id = btf_field_iter_next(&it))) {
__u32 next_id = *id;
while (next_id) {
t = btf_type_by_id(r->btf, next_id);
switch (btf_kind(t)) {
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_VOLATILE:
case BTF_KIND_TYPEDEF:
case BTF_KIND_TYPE_TAG:
next_id = t->type;
break;
case BTF_KIND_ARRAY: {
struct btf_array *a = btf_array(t);
next_id = a->type;
break;
}
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
if (next_id < r->nr_dist_base_types)
r->id_map[next_id] = BTF_IS_EMBEDDED;
next_id = 0;
break;
default:
next_id = 0;
break;
}
}
}
return 0;
}
/* Build a map from distilled base BTF ids to base BTF ids. To do so, iterate
* through base BTF looking up distilled type (using binary search) equivalents.
*/
static int btf_relocate_map_distilled_base(struct btf_relocate *r)
{
struct btf_name_info *info, *info_end;
struct btf_type *base_t, *dist_t;
__u8 *base_name_cnt = NULL;
int err = 0;
__u32 id;
/* generate a sort index array of name/type ids sorted by name for
* distilled base BTF to speed name-based lookups.
*/
info = calloc(r->nr_dist_base_types, sizeof(*info));
if (!info) {
err = -ENOMEM;
goto done;
}
info_end = info + r->nr_dist_base_types;
for (id = 0; id < r->nr_dist_base_types; id++) {
dist_t = btf_type_by_id(r->dist_base_btf, id);
info[id].name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
info[id].id = id;
info[id].size = dist_t->size;
info[id].needs_size = true;
}
qsort(info, r->nr_dist_base_types, sizeof(*info), cmp_btf_name_size);
/* Mark distilled base struct/union members of split BTF structs/unions
* in id_map with BTF_IS_EMBEDDED; this signals that these types
* need to match both name and size, otherwise embedding the base
* struct/union in the split type is invalid.
*/
for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) {
err = btf_mark_embedded_composite_type_ids(r, id);
if (err)
goto done;
}
/* Collect name counts for composite types in base BTF. If multiple
* instances of a struct/union of the same name exist, we need to use
* size to determine which to map to since name alone is ambiguous.
*/
base_name_cnt = calloc(r->base_str_len, sizeof(*base_name_cnt));
if (!base_name_cnt) {
err = -ENOMEM;
goto done;
}
for (id = 1; id < r->nr_base_types; id++) {
base_t = btf_type_by_id(r->base_btf, id);
if (!btf_is_composite(base_t) || !base_t->name_off)
continue;
if (base_name_cnt[base_t->name_off] < 255)
base_name_cnt[base_t->name_off]++;
}
/* Now search base BTF for matching distilled base BTF types. */
for (id = 1; id < r->nr_base_types; id++) {
struct btf_name_info *dist_info, base_info = {};
int dist_kind, base_kind;
base_t = btf_type_by_id(r->base_btf, id);
/* distilled base consists of named types only. */
if (!base_t->name_off)
continue;
base_kind = btf_kind(base_t);
base_info.id = id;
base_info.name = btf__name_by_offset(r->base_btf, base_t->name_off);
switch (base_kind) {
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
/* These types should match both name and size */
base_info.needs_size = true;
base_info.size = base_t->size;
break;
case BTF_KIND_FWD:
/* No size considerations for fwds. */
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
/* Size only needs to be used for struct/union if there
* are multiple types in base BTF with the same name.
* If there are multiple _distilled_ types with the same
* name (a very unlikely scenario), that doesn't matter
* unless corresponding _base_ types to match them are
* missing.
*/
base_info.needs_size = base_name_cnt[base_t->name_off] > 1;
base_info.size = base_t->size;
break;
default:
continue;
}
/* iterate over all matching distilled base types */
for (dist_info = search_btf_name_size(&base_info, info, r->nr_dist_base_types);
dist_info != NULL && dist_info < info_end &&
cmp_btf_name_size(&base_info, dist_info) == 0;
dist_info++) {
if (!dist_info->id || dist_info->id >= r->nr_dist_base_types) {
pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n",
id, dist_info->id);
err = -EINVAL;
goto done;
}
dist_t = btf_type_by_id(r->dist_base_btf, dist_info->id);
dist_kind = btf_kind(dist_t);
/* Validate that the found distilled type is compatible.
* Do not error out on mismatch as another match may
* occur for an identically-named type.
*/
switch (dist_kind) {
case BTF_KIND_FWD:
switch (base_kind) {
case BTF_KIND_FWD:
if (btf_kflag(dist_t) != btf_kflag(base_t))
continue;
break;
case BTF_KIND_STRUCT:
if (btf_kflag(base_t))
continue;
break;
case BTF_KIND_UNION:
if (!btf_kflag(base_t))
continue;
break;
default:
continue;
}
break;
case BTF_KIND_INT:
if (dist_kind != base_kind ||
btf_int_encoding(base_t) != btf_int_encoding(dist_t))
continue;
break;
case BTF_KIND_FLOAT:
if (dist_kind != base_kind)
continue;
break;
case BTF_KIND_ENUM:
/* ENUM and ENUM64 are encoded as sized ENUM in
* distilled base BTF.
*/
if (base_kind != dist_kind && base_kind != BTF_KIND_ENUM64)
continue;
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
/* size verification is required for embedded
* struct/unions.
*/
if (r->id_map[dist_info->id] == BTF_IS_EMBEDDED &&
base_t->size != dist_t->size)
continue;
break;
default:
continue;
}
if (r->id_map[dist_info->id] &&
r->id_map[dist_info->id] != BTF_IS_EMBEDDED) {
/* we already have a match; this tells us that
* multiple base types of the same name
* have the same size, since for cases where
* multiple types have the same name we match
* on name and size. In this case, we have
* no way of determining which to relocate
* to in base BTF, so error out.
*/
pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n",
base_info.name, dist_info->id,
base_t->size, id, r->id_map[dist_info->id]);
err = -EINVAL;
goto done;
}
/* map id and name */
r->id_map[dist_info->id] = id;
r->str_map[dist_t->name_off] = base_t->name_off;
}
}
/* ensure all distilled BTF ids now have a mapping... */
for (id = 1; id < r->nr_dist_base_types; id++) {
const char *name;
if (r->id_map[id] && r->id_map[id] != BTF_IS_EMBEDDED)
continue;
dist_t = btf_type_by_id(r->dist_base_btf, id);
name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
pr_warn("distilled base BTF type '%s' [%d] is not mapped to base BTF id\n",
name, id);
err = -EINVAL;
break;
}
done:
free(base_name_cnt);
free(info);
return err;
}
/* distilled base should only have named int/float/enum/fwd/struct/union types. */
static int btf_relocate_validate_distilled_base(struct btf_relocate *r)
{
unsigned int i;
for (i = 1; i < r->nr_dist_base_types; i++) {
struct btf_type *t = btf_type_by_id(r->dist_base_btf, i);
int kind = btf_kind(t);
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_FWD:
if (t->name_off)
break;
pr_warn("type [%d], kind [%d] is invalid for distilled base BTF; it is anonymous\n",
i, kind);
return -EINVAL;
default:
pr_warn("type [%d] in distilled based BTF has unexpected kind [%d]\n",
i, kind);
return -EINVAL;
}
}
return 0;
}
static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i)
{
struct btf_type *t = btf_type_by_id(r->btf, i);
struct btf_field_iter it;
__u32 *str_off;
int off, err;
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
return err;
while ((str_off = btf_field_iter_next(&it))) {
if (!*str_off)
continue;
if (*str_off >= r->dist_str_len) {
*str_off += r->base_str_len - r->dist_str_len;
} else {
off = r->str_map[*str_off];
if (!off) {
pr_warn("string '%s' [offset %u] is not mapped to base BTF",
btf__str_by_offset(r->btf, off), *str_off);
return -ENOENT;
}
*str_off = off;
}
}
return 0;
}
/* If successful, output of relocation is updated BTF with base BTF pointing
* at base_btf, and type ids, strings adjusted accordingly.
*/
int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map)
{
unsigned int nr_types = btf__type_cnt(btf);
const struct btf_header *dist_base_hdr;
const struct btf_header *base_hdr;
struct btf_relocate r = {};
int err = 0;
__u32 id, i;
r.dist_base_btf = btf__base_btf(btf);
if (!base_btf || r.dist_base_btf == base_btf)
return -EINVAL;
r.nr_dist_base_types = btf__type_cnt(r.dist_base_btf);
r.nr_base_types = btf__type_cnt(base_btf);
r.nr_split_types = nr_types - r.nr_dist_base_types;
r.btf = btf;
r.base_btf = base_btf;
r.id_map = calloc(nr_types, sizeof(*r.id_map));
r.str_map = calloc(btf_header(r.dist_base_btf)->str_len, sizeof(*r.str_map));
dist_base_hdr = btf_header(r.dist_base_btf);
base_hdr = btf_header(r.base_btf);
r.dist_str_len = dist_base_hdr->str_len;
r.base_str_len = base_hdr->str_len;
if (!r.id_map || !r.str_map) {
err = -ENOMEM;
goto err_out;
}
err = btf_relocate_validate_distilled_base(&r);
if (err)
goto err_out;
/* Split BTF ids need to be adjusted as base and distilled base
* have different numbers of types, changing the start id of split
* BTF.
*/
for (id = r.nr_dist_base_types; id < nr_types; id++)
r.id_map[id] = id + r.nr_base_types - r.nr_dist_base_types;
/* Build a map from distilled base ids to actual base BTF ids; it is used
* to update split BTF id references. Also build a str_map mapping from
* distilled base BTF names to base BTF names.
*/
err = btf_relocate_map_distilled_base(&r);
if (err)
goto err_out;
/* Next, rewrite type ids in split BTF, replacing split ids with updated
* ids based on number of types in base BTF, and base ids with
* relocated ids from base_btf.
*/
for (i = 0, id = r.nr_dist_base_types; i < r.nr_split_types; i++, id++) {
err = btf_relocate_rewrite_type_id(&r, id);
if (err)
goto err_out;
}
/* String offsets now need to be updated using the str_map. */
for (i = 0; i < r.nr_split_types; i++) {
err = btf_relocate_rewrite_strs(&r, i + r.nr_dist_base_types);
if (err)
goto err_out;
}
/* Finally reset base BTF to be base_btf */
btf_set_base_btf(btf, base_btf);
if (id_map) {
*id_map = r.id_map;
r.id_map = NULL;
}
err_out:
free(r.id_map);
free(r.str_map);
return err;
}

View File

@@ -28,6 +28,9 @@ int elf_open(const char *binary_path, struct elf_fd *elf_fd)
int fd, ret;
Elf *elf;
elf_fd->elf = NULL;
elf_fd->fd = -1;
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("elf: failed to init libelf for %s\n", binary_path);
return -LIBBPF_ERRNO__LIBELF;

View File

@@ -496,8 +496,6 @@ struct bpf_program {
};
struct bpf_struct_ops {
const char *tname;
const struct btf_type *type;
struct bpf_program **progs;
__u32 *kern_func_off;
/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
@@ -572,6 +570,7 @@ struct bpf_map {
bool pinned;
bool reused;
bool autocreate;
bool autoattach;
__u64 map_extra;
};
@@ -1082,11 +1081,14 @@ static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
continue;
for (j = 0; j < obj->nr_maps; ++j) {
const struct btf_type *type;
map = &obj->maps[j];
if (!bpf_map__is_struct_ops(map))
continue;
vlen = btf_vlen(map->st_ops->type);
type = btf__type_by_id(obj->btf, map->st_ops->type_id);
vlen = btf_vlen(type);
for (k = 0; k < vlen; ++k) {
slot_prog = map->st_ops->progs[k];
if (prog != slot_prog)
@@ -1120,8 +1122,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
int err;
st_ops = map->st_ops;
type = st_ops->type;
tname = st_ops->tname;
type = btf__type_by_id(btf, st_ops->type_id);
tname = btf__name_by_offset(btf, type->name_off);
err = find_struct_ops_kern_types(obj, tname, &mod_btf,
&kern_type, &kern_type_id,
&kern_vtype, &kern_vtype_id,
@@ -1400,6 +1402,7 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
map->def.value_size = type->size;
map->def.max_entries = 1;
map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
map->autoattach = true;
map->st_ops = calloc(1, sizeof(*map->st_ops));
if (!map->st_ops)
@@ -1421,8 +1424,6 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
memcpy(st_ops->data,
data->d_buf + vsi->offset,
type->size);
st_ops->tname = tname;
st_ops->type = type;
st_ops->type_id = type_id;
pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
@@ -4819,6 +4820,20 @@ int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
return 0;
}
int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach)
{
if (!bpf_map__is_struct_ops(map))
return libbpf_err(-EINVAL);
map->autoattach = autoattach;
return 0;
}
bool bpf_map__autoattach(const struct bpf_map *map)
{
return map->autoattach;
}
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
{
struct bpf_map_info info;
@@ -7890,16 +7905,19 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
}
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
const char *obj_name,
const struct bpf_object_open_opts *opts)
{
const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
const char *kconfig, *btf_tmp_path, *token_path;
struct bpf_object *obj;
char tmp_name[64];
int err;
char *log_buf;
size_t log_size;
__u32 log_level;
if (obj_buf && !obj_name)
return ERR_PTR(-EINVAL);
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("failed to init libelf for %s\n",
path ? : "(mem buf)");
@@ -7909,16 +7927,12 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
if (!OPTS_VALID(opts, bpf_object_open_opts))
return ERR_PTR(-EINVAL);
obj_name = OPTS_GET(opts, object_name, NULL);
obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name;
if (obj_buf) {
if (!obj_name) {
snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
(unsigned long)obj_buf,
(unsigned long)obj_buf_sz);
obj_name = tmp_name;
}
path = obj_name;
pr_debug("loading object '%s' from buffer\n", obj_name);
} else {
pr_debug("loading object from %s\n", path);
}
log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
@@ -8002,9 +8016,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
if (!path)
return libbpf_err_ptr(-EINVAL);
pr_debug("loading %s\n", path);
return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts));
}
struct bpf_object *bpf_object__open(const char *path)
@@ -8016,10 +8028,15 @@ struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts)
{
char tmp_name[64];
if (!obj_buf || obj_buf_sz == 0)
return libbpf_err_ptr(-EINVAL);
return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
/* create a (quite useless) default "name" for this memory buffer object */
snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz);
return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts));
}
static int bpf_object_unload(struct bpf_object *obj)
@@ -8429,11 +8446,13 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
static void bpf_map_prepare_vdata(const struct bpf_map *map)
{
const struct btf_type *type;
struct bpf_struct_ops *st_ops;
__u32 i;
st_ops = map->st_ops;
for (i = 0; i < btf_vlen(st_ops->type); i++) {
type = btf__type_by_id(map->obj->btf, st_ops->type_id);
for (i = 0; i < btf_vlen(type); i++) {
struct bpf_program *prog = st_ops->progs[i];
void *kern_data;
int prog_fd;
@@ -9696,6 +9715,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
Elf64_Shdr *shdr, Elf_Data *data)
{
const struct btf_type *type;
const struct btf_member *member;
struct bpf_struct_ops *st_ops;
struct bpf_program *prog;
@@ -9755,13 +9775,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
}
insn_idx = sym->st_value / BPF_INSN_SZ;
member = find_member_by_offset(st_ops->type, moff * 8);
type = btf__type_by_id(btf, st_ops->type_id);
member = find_member_by_offset(type, moff * 8);
if (!member) {
pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
map->name, moff);
return -EINVAL;
}
member_idx = member - btf_members(st_ops->type);
member_idx = member - btf_members(type);
name = btf__name_by_offset(btf, member->name_off);
if (!resolve_func_ptr(btf, member->type, NULL)) {
@@ -10359,7 +10380,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
struct bpf_map *
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
{
if (prev == NULL)
if (prev == NULL && obj != NULL)
return obj->maps;
return __bpf_map__iter(prev, obj, 1);
@@ -10368,7 +10389,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
struct bpf_map *
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
{
if (next == NULL) {
if (next == NULL && obj != NULL) {
if (!obj->nr_maps)
return NULL;
return obj->maps + obj->nr_maps - 1;
@@ -12900,8 +12921,10 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
__u32 zero = 0;
int err, fd;
if (!bpf_map__is_struct_ops(map))
if (!bpf_map__is_struct_ops(map)) {
pr_warn("map '%s': can't attach non-struct_ops map\n", map->name);
return libbpf_err_ptr(-EINVAL);
}
if (map->fd < 0) {
pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
@@ -13694,14 +13717,15 @@ int libbpf_num_possible_cpus(void)
static int populate_skeleton_maps(const struct bpf_object *obj,
struct bpf_map_skeleton *maps,
size_t map_cnt)
size_t map_cnt, size_t map_skel_sz)
{
int i;
for (i = 0; i < map_cnt; i++) {
struct bpf_map **map = maps[i].map;
const char *name = maps[i].name;
void **mmaped = maps[i].mmaped;
struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz;
struct bpf_map **map = map_skel->map;
const char *name = map_skel->name;
void **mmaped = map_skel->mmaped;
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
@@ -13718,13 +13742,14 @@ static int populate_skeleton_maps(const struct bpf_object *obj,
static int populate_skeleton_progs(const struct bpf_object *obj,
struct bpf_prog_skeleton *progs,
size_t prog_cnt)
size_t prog_cnt, size_t prog_skel_sz)
{
int i;
for (i = 0; i < prog_cnt; i++) {
struct bpf_program **prog = progs[i].prog;
const char *name = progs[i].name;
struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz;
struct bpf_program **prog = prog_skel->prog;
const char *name = prog_skel->name;
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
@@ -13738,40 +13763,24 @@ static int populate_skeleton_progs(const struct bpf_object *obj,
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
const struct bpf_object_open_opts *opts)
{
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
.object_name = s->name,
);
struct bpf_object *obj;
int err;
/* Attempt to preserve opts->object_name, unless overriden by user
* explicitly. Overwriting object name for skeletons is discouraged,
* as it breaks global data maps, because they contain object name
* prefix as their own map name prefix. When skeleton is generated,
* bpftool is making an assumption that this name will stay the same.
*/
if (opts) {
memcpy(&skel_opts, opts, sizeof(*opts));
if (!opts->object_name)
skel_opts.object_name = s->name;
}
obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
err = libbpf_get_error(obj);
if (err) {
pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
s->name, err);
obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err);
return libbpf_err(err);
}
*s->obj = obj;
err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
return libbpf_err(err);
}
err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
return libbpf_err(err);
@@ -13801,20 +13810,20 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
return libbpf_err(-errno);
}
err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
var_skel = &s->vars[var_idx];
var_skel = (void *)s->vars + var_idx * s->var_skel_sz;
map = *var_skel->map;
map_type_id = bpf_map__btf_value_type_id(map);
map_type = btf__type_by_id(btf, map_type_id);
@@ -13861,10 +13870,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
}
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map *map = *s->maps[i].map;
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_map *map = *map_skel->map;
size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = map->fd;
void **mmaped = s->maps[i].mmaped;
void **mmaped = map_skel->mmaped;
if (!mmaped)
continue;
@@ -13912,8 +13922,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
int i, err;
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_program *prog = *s->progs[i].prog;
struct bpf_link **link = s->progs[i].link;
struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
struct bpf_program *prog = *prog_skel->prog;
struct bpf_link **link = prog_skel->link;
if (!prog->autoload || !prog->autoattach)
continue;
@@ -13945,6 +13956,38 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
*/
}
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_map *map = *map_skel->map;
struct bpf_link **link;
if (!map->autocreate || !map->autoattach)
continue;
/* only struct_ops maps can be attached */
if (!bpf_map__is_struct_ops(map))
continue;
/* skeleton is created with earlier version of bpftool, notify user */
if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) {
pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n",
bpf_map__name(map));
continue;
}
link = map_skel->link;
if (*link)
continue;
*link = bpf_map__attach_struct_ops(map);
if (!*link) {
err = -errno;
pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err);
return libbpf_err(err);
}
}
return 0;
}
@@ -13953,11 +13996,25 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
int i;
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_link **link = s->progs[i].link;
struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
struct bpf_link **link = prog_skel->link;
bpf_link__destroy(*link);
*link = NULL;
}
if (s->map_skel_sz < sizeof(struct bpf_map_skeleton))
return;
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_link **link = map_skel->link;
if (link) {
bpf_link__destroy(*link);
*link = NULL;
}
}
}
void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
@@ -13965,8 +14022,7 @@ void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
if (!s)
return;
if (s->progs)
bpf_object__detach_skeleton(s);
bpf_object__detach_skeleton(s);
if (s->obj)
bpf_object__close(*s->obj);
free(s->maps);

View File

@@ -978,6 +978,23 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
/**
* @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach
* map during BPF skeleton attach phase.
* @param map the BPF map instance
* @param autoattach whether to attach map during BPF skeleton attach phase
* @return 0 on success; negative error code, otherwise
*/
LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach);
/**
* @brief **bpf_map__autoattach()** returns whether BPF map is configured to
* auto-attach during BPF skeleton attach phase.
* @param map the BPF map instance
* @return true if map is set to auto-attach during skeleton attach phase; false, otherwise
*/
LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map);
/**
* @brief **bpf_map__fd()** gets the file descriptor of the passed
* BPF map
@@ -1672,6 +1689,7 @@ struct bpf_map_skeleton {
const char *name;
struct bpf_map **map;
void **mmaped;
struct bpf_link **link;
};
struct bpf_prog_skeleton {

View File

@@ -419,6 +419,10 @@ LIBBPF_1.4.0 {
LIBBPF_1.5.0 {
global:
btf__distill_base;
btf__relocate;
bpf_map__autoattach;
bpf_map__set_autoattach;
bpf_program__attach_sockmap;
ring__consume_n;
ring_buffer__consume_n;

View File

@@ -234,6 +234,9 @@ struct btf_type;
struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);
const char *btf_kind_str(const struct btf_type *t);
const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
const struct btf_header *btf_header(const struct btf *btf);
void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map);
static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
{
@@ -508,11 +511,33 @@ struct bpf_line_info_min {
__u32 line_col;
};
enum btf_field_iter_kind {
BTF_FIELD_ITER_IDS,
BTF_FIELD_ITER_STRS,
};
struct btf_field_desc {
/* once-per-type offsets */
int t_off_cnt, t_offs[2];
/* member struct size, or zero, if no members */
int m_sz;
/* repeated per-member offsets */
int m_off_cnt, m_offs[1];
};
struct btf_field_iter {
struct btf_field_desc desc;
void *p;
int m_idx;
int off_idx;
int vlen;
};
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind);
__u32 *btf_field_iter_next(struct btf_field_iter *it);
typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,

View File

@@ -97,9 +97,6 @@ __u32 get_kernel_version(void)
if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
return 0;
if (major == 4 && minor == 19 && patch > 255)
return KERNEL_VERSION(major, minor, 255);
return KERNEL_VERSION(major, minor, patch);
}

View File

@@ -957,19 +957,33 @@ static int check_btf_str_off(__u32 *str_off, void *ctx)
static int linker_sanity_check_btf(struct src_obj *obj)
{
struct btf_type *t;
int i, n, err = 0;
int i, n, err;
if (!obj->btf)
return 0;
n = btf__type_cnt(obj->btf);
for (i = 1; i < n; i++) {
struct btf_field_iter it;
__u32 *type_id, *str_off;
t = btf_type_by_id(obj->btf, i);
err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf);
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (err)
return err;
while ((type_id = btf_field_iter_next(&it))) {
if (*type_id >= n)
return -EINVAL;
}
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
return err;
while ((str_off = btf_field_iter_next(&it))) {
if (!btf__str_by_offset(obj->btf, *str_off))
return -EINVAL;
}
}
return 0;
@@ -2213,10 +2227,17 @@ static int linker_fixup_btf(struct src_obj *obj)
vi = btf_var_secinfos(t);
for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type);
const char *var_name = btf__str_by_offset(obj->btf, vt->name_off);
int var_linkage = btf_var(vt)->linkage;
const char *var_name;
int var_linkage;
Elf64_Sym *sym;
/* could be a variable or function */
if (!btf_is_var(vt))
continue;
var_name = btf__str_by_offset(obj->btf, vt->name_off);
var_linkage = btf_var(vt)->linkage;
/* no need to patch up static or extern vars */
if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)
continue;
@@ -2234,26 +2255,10 @@ static int linker_fixup_btf(struct src_obj *obj)
return 0;
}
static int remap_type_id(__u32 *type_id, void *ctx)
{
int *id_map = ctx;
int new_id = id_map[*type_id];
/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
if (new_id == 0 && *type_id != 0) {
pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id);
return -EINVAL;
}
*type_id = id_map[*type_id];
return 0;
}
static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
{
const struct btf_type *t;
int i, j, n, start_id, id;
int i, j, n, start_id, id, err;
const char *name;
if (!obj->btf)
@@ -2324,9 +2329,25 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
n = btf__type_cnt(linker->btf);
for (i = start_id; i < n; i++) {
struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
struct btf_field_iter it;
__u32 *type_id;
if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
return -EINVAL;
err = btf_field_iter_init(&it, dst_t, BTF_FIELD_ITER_IDS);
if (err)
return err;
while ((type_id = btf_field_iter_next(&it))) {
int new_id = obj->btf_type_map[*type_id];
/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
if (new_id == 0 && *type_id != 0) {
pr_warn("failed to find new ID mapping for original BTF type ID %u\n",
*type_id);
return -EINVAL;
}
*type_id = obj->btf_type_map[*type_id];
}
}
/* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's