mirror of
https://github.com/netdata/libbpf.git
synced 2026-03-14 13:29:06 +08:00
Compare commits
67 Commits
netdata_pa
...
detached
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
057f85d000 | ||
|
|
caa17bdcbf | ||
|
|
76c9f50f3e | ||
|
|
fe28fae57a | ||
|
|
f6f24022d3 | ||
|
|
4bd31a1044 | ||
|
|
33b22671c2 | ||
|
|
8b29484790 | ||
|
|
7b5237996a | ||
|
|
a89e519b40 | ||
|
|
205e86de8b | ||
|
|
86fc78bd2b | ||
|
|
20ccbb303a | ||
|
|
26443a6d43 | ||
|
|
22ec3eb15d | ||
|
|
bc24cd126a | ||
|
|
92316f5072 | ||
|
|
a73c6f7f80 | ||
|
|
8e47e755cd | ||
|
|
ec0d0fda8b | ||
|
|
b07dfe3b2a | ||
|
|
6923eb970e | ||
|
|
686f600bca | ||
|
|
726d7f3722 | ||
|
|
e6f1ae2557 | ||
|
|
bf7ddbef99 | ||
|
|
1867490d8f | ||
|
|
24aca0740b | ||
|
|
c1a6c770c4 | ||
|
|
223cd2273e | ||
|
|
dcd076347c | ||
|
|
e4982342e7 | ||
|
|
95c63a08f2 | ||
|
|
27f0169332 | ||
|
|
4ffb92e204 | ||
|
|
bc021a8b42 | ||
|
|
88a0787335 | ||
|
|
4bc5a64933 | ||
|
|
2afe409348 | ||
|
|
36cb1ad3ae | ||
|
|
0a66859bf1 | ||
|
|
be998aa3d4 | ||
|
|
78c78e90cd | ||
|
|
dd19c7ef77 | ||
|
|
13182b94f3 | ||
|
|
cece3242fb | ||
|
|
42065ea662 | ||
|
|
764d19da07 | ||
|
|
7d1fe77f65 | ||
|
|
fbcb2871fe | ||
|
|
61a6e8edd7 | ||
|
|
4ab7361e64 | ||
|
|
ff856238e2 | ||
|
|
c085e9c364 | ||
|
|
805b689cd2 | ||
|
|
9b789075a9 | ||
|
|
c22d662a95 | ||
|
|
074445067f | ||
|
|
9a1f1f28c6 | ||
|
|
0a519f87ee | ||
|
|
d9f9fd5b22 | ||
|
|
d4d3e68e8d | ||
|
|
0babfb126a | ||
|
|
89ed67d7ab | ||
|
|
8dfa981c53 | ||
|
|
15b461a608 | ||
|
|
ec3c369941 |
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,3 +0,0 @@
|
||||
Thank you for considering a contribution!
|
||||
|
||||
Please note that the `libbpf` authoritative source code is developed as part of bpf-next Linux source tree under tools/lib/bpf subdirectory and is periodically synced to Github. As such, all the libbpf changes should be sent to BPF mailing list, please don't open PRs here unless you are changing Github-specific parts of libbpf (e.g., Github-specific Makefile).
|
||||
5
.github/actions/build-selftests/action.yml
vendored
5
.github/actions/build-selftests/action.yml
vendored
@@ -12,6 +12,9 @@ inputs:
|
||||
description: 'where is vmlinux file'
|
||||
required: true
|
||||
default: '${{ github.workspace }}/vmlinux'
|
||||
llvm-version:
|
||||
description: 'llvm version'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
@@ -28,4 +31,6 @@ runs:
|
||||
export REPO_ROOT="${{ github.workspace }}"
|
||||
export REPO_PATH="${{ inputs.repo-path }}"
|
||||
export VMLINUX_BTF="${{ inputs.vmlinux }}"
|
||||
export LLVM_VERSION="${{ inputs.llvm-version }}"
|
||||
|
||||
${{ github.action_path }}/build_selftests.sh
|
||||
|
||||
@@ -10,22 +10,21 @@ foldable start prepare_selftests "Building selftests"
|
||||
|
||||
LIBBPF_PATH="${REPO_ROOT}"
|
||||
|
||||
llvm_default_version() {
|
||||
echo "16"
|
||||
}
|
||||
|
||||
llvm_latest_version() {
|
||||
echo "17"
|
||||
echo "19"
|
||||
}
|
||||
|
||||
LLVM_VERSION=$(llvm_default_version)
|
||||
if [[ "${LLVM_VERSION}" == $(llvm_latest_version) ]]; then
|
||||
REPO_DISTRO_SUFFIX=""
|
||||
else
|
||||
REPO_DISTRO_SUFFIX="-${LLVM_VERSION}"
|
||||
fi
|
||||
|
||||
echo "deb https://apt.llvm.org/focal/ llvm-toolchain-focal${REPO_DISTRO_SUFFIX} main" \
|
||||
DISTRIB_CODENAME="noble"
|
||||
test -f /etc/lsb-release && . /etc/lsb-release
|
||||
echo "${DISTRIB_CODENAME}"
|
||||
|
||||
echo "deb https://apt.llvm.org/${DISTRIB_CODENAME}/ llvm-toolchain-${DISTRIB_CODENAME}${REPO_DISTRO_SUFFIX} main" \
|
||||
| sudo tee /etc/apt/sources.list.d/llvm.list
|
||||
|
||||
PREPARE_SELFTESTS_SCRIPT=${THISDIR}/prepare_selftests-${KERNEL}.sh
|
||||
|
||||
188369
.github/actions/build-selftests/vmlinux.h
vendored
188369
.github/actions/build-selftests/vmlinux.h
vendored
File diff suppressed because it is too large
Load Diff
6
.github/actions/vmtest/action.yml
vendored
6
.github/actions/vmtest/action.yml
vendored
@@ -13,6 +13,10 @@ inputs:
|
||||
description: 'pahole rev or master'
|
||||
required: true
|
||||
default: 'master'
|
||||
llvm-version:
|
||||
description: 'llvm version'
|
||||
required: false
|
||||
default: '17'
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
@@ -38,6 +42,7 @@ runs:
|
||||
with:
|
||||
pahole: ${{ inputs.pahole }}
|
||||
arch: ${{ inputs.arch }}
|
||||
llvm-version: ${{ inputs.llvm-version }}
|
||||
# 1. download CHECKPOINT kernel source
|
||||
- name: Get checkpoint commit
|
||||
shell: bash
|
||||
@@ -93,6 +98,7 @@ runs:
|
||||
with:
|
||||
repo-path: '.kernel'
|
||||
kernel: ${{ inputs.kernel }}
|
||||
llvm-version: ${{ inputs.llvm-version }}
|
||||
# 4. prepare rootfs
|
||||
- name: prepare rootfs
|
||||
uses: libbpf/ci/prepare-rootfs@main
|
||||
|
||||
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@@ -53,7 +53,7 @@ jobs:
|
||||
|
||||
ubuntu:
|
||||
runs-on: ubuntu-latest
|
||||
name: Ubuntu Focal Build (${{ matrix.arch }})
|
||||
name: Ubuntu Build (${{ matrix.arch }})
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
if: matrix.arch != 'x86'
|
||||
with:
|
||||
distro:
|
||||
ubuntu20.04
|
||||
ubuntu22.04
|
||||
arch:
|
||||
${{ matrix.arch }}
|
||||
setup:
|
||||
|
||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -17,7 +17,7 @@ permissions:
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-latest
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ matrix.language }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
2
.github/workflows/pahole.yml
vendored
2
.github/workflows/pahole.yml
vendored
@@ -1,10 +1,10 @@
|
||||
name: pahole-staging
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 18 * * *'
|
||||
|
||||
|
||||
jobs:
|
||||
vmtest:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
10
.github/workflows/test.yml
vendored
10
.github/workflows/test.yml
vendored
@@ -13,22 +13,22 @@ concurrency:
|
||||
jobs:
|
||||
vmtest:
|
||||
runs-on: ${{ matrix.runs_on }}
|
||||
name: Kernel ${{ matrix.kernel }} on ${{ matrix.runs_on }} + selftests
|
||||
name: Kernel ${{ matrix.kernel }} on ${{ matrix.arch }} + selftests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- kernel: 'LATEST'
|
||||
runs_on: ubuntu-20.04
|
||||
runs_on: ubuntu-24.04
|
||||
arch: 'x86_64'
|
||||
- kernel: '5.5.0'
|
||||
runs_on: ubuntu-20.04
|
||||
runs_on: ubuntu-24.04
|
||||
arch: 'x86_64'
|
||||
- kernel: '4.9.0'
|
||||
runs_on: ubuntu-20.04
|
||||
runs_on: ubuntu-24.04
|
||||
arch: 'x86_64'
|
||||
- kernel: 'LATEST'
|
||||
runs_on: s390x
|
||||
runs_on: ["s390x", "docker-noble-main"]
|
||||
arch: 's390x'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
4
.mailmap
4
.mailmap
@@ -8,10 +8,14 @@ Dan Carpenter <error27@gmail.com> <dan.carpenter@oracle.com>
|
||||
Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
|
||||
Herbert Xu <herbert@gondor.apana.org.au>
|
||||
Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
|
||||
Kees Cook <kees@kernel.org> <keescook@chromium.org>
|
||||
Leo Yan <leo.yan@linux.dev> <leo.yan@linaro.org>
|
||||
Mark Starovoytov <mstarovo@pm.me> <mstarovoitov@marvell.com>
|
||||
Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@mellanox.com>
|
||||
Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@nvidia.com>
|
||||
Puranjay Mohan <puranjay@kernel.org> <puranjay12@gmail.com>
|
||||
Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
|
||||
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
|
||||
Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
|
||||
Vadim Fedorenko <vadim.fedorenko@linux.dev> <vadfed@meta.com>
|
||||
Vadim Fedorenko <vadim.fedorenko@linux.dev> <vfedorenko@novek.ru>
|
||||
|
||||
@@ -1 +1 @@
|
||||
3e9bc0472b910d4115e16e9c2d684c7757cb6c60
|
||||
b408473ea01b2e499d23503e2bf898416da9d7ac
|
||||
|
||||
@@ -1 +1 @@
|
||||
009367099eb61a4fc2af44d4eb06b6b4de7de6db
|
||||
2ad6d23f465a4f851e3bcf6d74c315ce7b2c205b
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
From 0daad0a615e687e1247230f3d0c31ae60ba32314 Mon Sep 17 00:00:00 2001
|
||||
From: Andrii Nakryiko <andrii@kernel.org>
|
||||
Date: Tue, 28 May 2024 15:29:38 -0700
|
||||
Subject: [PATCH bpf-next] selftests/bpf: fix inet_csk_accept prototype in
|
||||
test_sk_storage_tracing.c
|
||||
|
||||
Recent kernel change ([0]) changed inet_csk_accept() prototype. Adapt
|
||||
progs/test_sk_storage_tracing.c to take that into account.
|
||||
|
||||
[0] 92ef0fd55ac8 ("net: change proto and proto_ops accept type")
|
||||
|
||||
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
---
|
||||
tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
|
||||
index 02e718f06e0f..40531e56776e 100644
|
||||
--- a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
|
||||
+++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
|
||||
@@ -84,7 +84,7 @@ int BPF_PROG(trace_tcp_connect, struct sock *sk)
|
||||
}
|
||||
|
||||
SEC("fexit/inet_csk_accept")
|
||||
-int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern,
|
||||
+int BPF_PROG(inet_csk_accept, struct sock *sk, struct proto_accept_arg *arg,
|
||||
struct sock *accepted_sk)
|
||||
{
|
||||
set_task_info(accepted_sk);
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@@ -12,3 +12,4 @@ xdp_bonding/xdp_bonding_features # started failing after net merge from 359e
|
||||
tc_redirect/tc_redirect_dtime # uapi breakage after net-next commit 885c36e59f46 ("net: Re-use and set mono_delivery_time bit for userspace tstamp packets")
|
||||
migrate_reuseport/IPv4 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation
|
||||
migrate_reuseport/IPv6 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation
|
||||
verify_pkcs7_sig # keeps failing
|
||||
|
||||
@@ -67,12 +67,14 @@ local_configs_path=${PROJECT_NAME}/vmtest/configs
|
||||
DENYLIST=$(read_lists \
|
||||
"$configs_path/DENYLIST" \
|
||||
"$configs_path/DENYLIST.${ARCH}" \
|
||||
"$local_configs_path/DENYLIST" \
|
||||
"$local_configs_path/DENYLIST-${KERNEL}" \
|
||||
"$local_configs_path/DENYLIST-${KERNEL}.${ARCH}" \
|
||||
)
|
||||
ALLOWLIST=$(read_lists \
|
||||
"$configs_path/ALLOWLIST" \
|
||||
"$configs_path/ALLOWLIST.${ARCH}" \
|
||||
"$local_configs_path/ALLOWLIST" \
|
||||
"$local_configs_path/ALLOWLIST-${KERNEL}" \
|
||||
"$local_configs_path/ALLOWLIST-${KERNEL}.${ARCH}" \
|
||||
)
|
||||
|
||||
@@ -219,6 +219,14 @@ compilation and skeleton generation. Using Libbpf-rs will make building user
|
||||
space part of the BPF application easier. Note that the BPF program themselves
|
||||
must still be written in plain C.
|
||||
|
||||
libbpf logging
|
||||
==============
|
||||
|
||||
By default, libbpf logs informational and warning messages to stderr. The
|
||||
verbosity of these messages can be controlled by setting the environment
|
||||
variable LIBBPF_LOG_LEVEL to either warn, info, or debug. A custom log
|
||||
callback can be set using ``libbpf_set_print()``.
|
||||
|
||||
Additional Documentation
|
||||
========================
|
||||
|
||||
|
||||
@@ -1425,6 +1425,8 @@ enum {
|
||||
#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
|
||||
/* If set, XDP frames will be transmitted after processing */
|
||||
#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
|
||||
/* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */
|
||||
#define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2)
|
||||
|
||||
/* type for BPF_ENABLE_STATS */
|
||||
enum bpf_stats_type {
|
||||
@@ -6207,12 +6209,17 @@ union { \
|
||||
__u64 :64; \
|
||||
} __attribute__((aligned(8)))
|
||||
|
||||
/* The enum used in skb->tstamp_type. It specifies the clock type
|
||||
* of the time stored in the skb->tstamp.
|
||||
*/
|
||||
enum {
|
||||
BPF_SKB_TSTAMP_UNSPEC,
|
||||
BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
|
||||
/* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
|
||||
* the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
|
||||
* and try to deduce it by ingress, egress or skb->sk->sk_clockid.
|
||||
BPF_SKB_TSTAMP_UNSPEC = 0, /* DEPRECATED */
|
||||
BPF_SKB_TSTAMP_DELIVERY_MONO = 1, /* DEPRECATED */
|
||||
BPF_SKB_CLOCK_REALTIME = 0,
|
||||
BPF_SKB_CLOCK_MONOTONIC = 1,
|
||||
BPF_SKB_CLOCK_TAI = 2,
|
||||
/* For any future BPF_SKB_CLOCK_* that the bpf prog cannot handle,
|
||||
* the bpf prog can try to deduce it by ingress/egress/skb->sk->sk_clockid.
|
||||
*/
|
||||
};
|
||||
|
||||
@@ -7505,4 +7512,13 @@ struct bpf_iter_num {
|
||||
__u64 __opaque[1];
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
/*
|
||||
* Flags to control BPF kfunc behaviour.
|
||||
* - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective
|
||||
* helper documentation for details.)
|
||||
*/
|
||||
enum bpf_kfunc_flags {
|
||||
BPF_F_PAD_ZEROS = (1ULL << 0),
|
||||
};
|
||||
|
||||
#endif /* _UAPI__LINUX_BPF_H__ */
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
#ifndef _UAPI_LINUX_FCNTL_H
|
||||
#define _UAPI_LINUX_FCNTL_H
|
||||
|
||||
#include <asm/fcntl.h>
|
||||
#include <linux/openat2.h>
|
||||
|
||||
#define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0)
|
||||
#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
|
||||
|
||||
/*
|
||||
* Cancel a blocking posix lock; internal use only until we expose an
|
||||
* asynchronous lock api to userspace:
|
||||
*/
|
||||
#define F_CANCELLK (F_LINUX_SPECIFIC_BASE + 5)
|
||||
|
||||
/* Create a file descriptor with FD_CLOEXEC set. */
|
||||
#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6)
|
||||
|
||||
/*
|
||||
* Request nofications on a directory.
|
||||
* See below for events that may be notified.
|
||||
*/
|
||||
#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2)
|
||||
|
||||
/*
|
||||
* Set and get of pipe page size array
|
||||
*/
|
||||
#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
|
||||
#define F_GETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 8)
|
||||
|
||||
/*
|
||||
* Set/Get seals
|
||||
*/
|
||||
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
|
||||
#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
|
||||
|
||||
/*
|
||||
* Types of seals
|
||||
*/
|
||||
#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
|
||||
#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
|
||||
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
|
||||
#define F_SEAL_WRITE 0x0008 /* prevent writes */
|
||||
#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
|
||||
#define F_SEAL_EXEC 0x0020 /* prevent chmod modifying exec bits */
|
||||
/* (1U << 31) is reserved for signed error codes */
|
||||
|
||||
/*
|
||||
* Set/Get write life time hints. {GET,SET}_RW_HINT operate on the
|
||||
* underlying inode, while {GET,SET}_FILE_RW_HINT operate only on
|
||||
* the specific file.
|
||||
*/
|
||||
#define F_GET_RW_HINT (F_LINUX_SPECIFIC_BASE + 11)
|
||||
#define F_SET_RW_HINT (F_LINUX_SPECIFIC_BASE + 12)
|
||||
#define F_GET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 13)
|
||||
#define F_SET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 14)
|
||||
|
||||
/*
|
||||
* Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be
|
||||
* used to clear any hints previously set.
|
||||
*/
|
||||
#define RWH_WRITE_LIFE_NOT_SET 0
|
||||
#define RWH_WRITE_LIFE_NONE 1
|
||||
#define RWH_WRITE_LIFE_SHORT 2
|
||||
#define RWH_WRITE_LIFE_MEDIUM 3
|
||||
#define RWH_WRITE_LIFE_LONG 4
|
||||
#define RWH_WRITE_LIFE_EXTREME 5
|
||||
|
||||
/*
|
||||
* The originally introduced spelling is remained from the first
|
||||
* versions of the patch set that introduced the feature, see commit
|
||||
* v4.13-rc1~212^2~51.
|
||||
*/
|
||||
#define RWF_WRITE_LIFE_NOT_SET RWH_WRITE_LIFE_NOT_SET
|
||||
|
||||
/*
|
||||
* Types of directory notifications that may be requested.
|
||||
*/
|
||||
#define DN_ACCESS 0x00000001 /* File accessed */
|
||||
#define DN_MODIFY 0x00000002 /* File modified */
|
||||
#define DN_CREATE 0x00000004 /* File created */
|
||||
#define DN_DELETE 0x00000008 /* File removed */
|
||||
#define DN_RENAME 0x00000010 /* File renamed */
|
||||
#define DN_ATTRIB 0x00000020 /* File changed attibutes */
|
||||
#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
|
||||
|
||||
/*
|
||||
* The constants AT_REMOVEDIR and AT_EACCESS have the same value. AT_EACCESS is
|
||||
* meaningful only to faccessat, while AT_REMOVEDIR is meaningful only to
|
||||
* unlinkat. The two functions do completely different things and therefore,
|
||||
* the flags can be allowed to overlap. For example, passing AT_REMOVEDIR to
|
||||
* faccessat would be undefined behavior and thus treating it equivalent to
|
||||
* AT_EACCESS is valid undefined behavior.
|
||||
*/
|
||||
#define AT_FDCWD -100 /* Special value used to indicate
|
||||
openat should use the current
|
||||
working directory. */
|
||||
#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
|
||||
#define AT_EACCESS 0x200 /* Test access permitted for
|
||||
effective IDs, not real IDs. */
|
||||
#define AT_REMOVEDIR 0x200 /* Remove directory instead of
|
||||
unlinking file. */
|
||||
#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
|
||||
#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */
|
||||
#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
|
||||
|
||||
#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */
|
||||
#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */
|
||||
#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */
|
||||
#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */
|
||||
|
||||
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
|
||||
|
||||
/* Flags for name_to_handle_at(2). We reuse AT_ flag space to save bits... */
|
||||
#define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
|
||||
compare object identity and may not
|
||||
be usable to open_by_handle_at(2) */
|
||||
#if defined(__KERNEL__)
|
||||
#define AT_GETATTR_NOSEC 0x80000000
|
||||
#endif
|
||||
|
||||
#endif /* _UAPI_LINUX_FCNTL_H */
|
||||
@@ -41,6 +41,10 @@
|
||||
*/
|
||||
#define XDP_UMEM_TX_SW_CSUM (1 << 1)
|
||||
|
||||
/* Request to reserve tx_metadata_len bytes of per-chunk metadata.
|
||||
*/
|
||||
#define XDP_UMEM_TX_METADATA_LEN (1 << 2)
|
||||
|
||||
struct sockaddr_xdp {
|
||||
__u16 sxdp_family;
|
||||
__u16 sxdp_flags;
|
||||
|
||||
@@ -146,6 +146,28 @@ enum {
|
||||
NETDEV_A_QSTATS_TX_PACKETS,
|
||||
NETDEV_A_QSTATS_TX_BYTES,
|
||||
NETDEV_A_QSTATS_RX_ALLOC_FAIL,
|
||||
NETDEV_A_QSTATS_RX_HW_DROPS,
|
||||
NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS,
|
||||
NETDEV_A_QSTATS_RX_CSUM_COMPLETE,
|
||||
NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY,
|
||||
NETDEV_A_QSTATS_RX_CSUM_NONE,
|
||||
NETDEV_A_QSTATS_RX_CSUM_BAD,
|
||||
NETDEV_A_QSTATS_RX_HW_GRO_PACKETS,
|
||||
NETDEV_A_QSTATS_RX_HW_GRO_BYTES,
|
||||
NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS,
|
||||
NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES,
|
||||
NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS,
|
||||
NETDEV_A_QSTATS_TX_HW_DROPS,
|
||||
NETDEV_A_QSTATS_TX_HW_DROP_ERRORS,
|
||||
NETDEV_A_QSTATS_TX_CSUM_NONE,
|
||||
NETDEV_A_QSTATS_TX_NEEDS_CSUM,
|
||||
NETDEV_A_QSTATS_TX_HW_GSO_PACKETS,
|
||||
NETDEV_A_QSTATS_TX_HW_GSO_BYTES,
|
||||
NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS,
|
||||
NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES,
|
||||
NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS,
|
||||
NETDEV_A_QSTATS_TX_STOP,
|
||||
NETDEV_A_QSTATS_TX_WAKE,
|
||||
|
||||
__NETDEV_A_QSTATS_MAX,
|
||||
NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
#ifndef _UAPI_LINUX_OPENAT2_H
|
||||
#define _UAPI_LINUX_OPENAT2_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* Arguments for how openat2(2) should open the target path. If only @flags and
|
||||
* @mode are non-zero, then openat2(2) operates very similarly to openat(2).
|
||||
*
|
||||
* However, unlike openat(2), unknown or invalid bits in @flags result in
|
||||
* -EINVAL rather than being silently ignored. @mode must be zero unless one of
|
||||
* {O_CREAT, O_TMPFILE} are set.
|
||||
*
|
||||
* @flags: O_* flags.
|
||||
* @mode: O_CREAT/O_TMPFILE file mode.
|
||||
* @resolve: RESOLVE_* flags.
|
||||
*/
|
||||
struct open_how {
|
||||
__u64 flags;
|
||||
__u64 mode;
|
||||
__u64 resolve;
|
||||
};
|
||||
|
||||
/* how->resolve flags for openat2(2). */
|
||||
#define RESOLVE_NO_XDEV 0x01 /* Block mount-point crossings
|
||||
(includes bind-mounts). */
|
||||
#define RESOLVE_NO_MAGICLINKS 0x02 /* Block traversal through procfs-style
|
||||
"magic-links". */
|
||||
#define RESOLVE_NO_SYMLINKS 0x04 /* Block traversal through all symlinks
|
||||
(implies OEXT_NO_MAGICLINKS) */
|
||||
#define RESOLVE_BENEATH 0x08 /* Block "lexical" trickery like
|
||||
"..", symlinks, and absolute
|
||||
paths which escape the dirfd. */
|
||||
#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".."
|
||||
be scoped inside the dirfd
|
||||
(similar to chroot(2)). */
|
||||
#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be
|
||||
completed through cached lookup. May
|
||||
return -EAGAIN if that's not
|
||||
possible. */
|
||||
|
||||
#endif /* _UAPI_LINUX_OPENAT2_H */
|
||||
@@ -1349,12 +1349,14 @@ union perf_mem_data_src {
|
||||
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
|
||||
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
|
||||
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
|
||||
/* 5-0x7 available */
|
||||
#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */
|
||||
#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */
|
||||
/* 0x7 available */
|
||||
#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
|
||||
#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
|
||||
#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
|
||||
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
|
||||
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
|
||||
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */
|
||||
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
|
||||
#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
|
||||
#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
|
||||
|
||||
@@ -55,7 +55,7 @@ STATIC_OBJDIR := $(OBJDIR)/staticobjs
|
||||
OBJS := bpf.o btf.o libbpf.o libbpf_errno.o netlink.o \
|
||||
nlattr.o str_error.o libbpf_probes.o bpf_prog_linfo.o \
|
||||
btf_dump.o hashmap.o ringbuf.o strset.o linker.o gen_loader.o \
|
||||
relo_core.o usdt.o zip.o elf.o features.o
|
||||
relo_core.o usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o
|
||||
SHARED_OBJS := $(addprefix $(SHARED_OBJDIR)/,$(OBJS))
|
||||
STATIC_OBJS := $(addprefix $(STATIC_OBJDIR)/,$(OBJS))
|
||||
|
||||
@@ -119,13 +119,13 @@ $(OBJDIR)/libbpf.so.$(LIBBPF_VERSION): $(SHARED_OBJS)
|
||||
-Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
|
||||
$^ $(ALL_LDFLAGS) -o $@
|
||||
|
||||
$(OBJDIR)/libbpf.pc: force
|
||||
$(OBJDIR)/libbpf.pc: force | $(OBJDIR)
|
||||
$(Q)sed -e "s|@PREFIX@|$(PREFIX)|" \
|
||||
-e "s|@LIBDIR@|$(LIBDIR_PC)|" \
|
||||
-e "s|@VERSION@|$(LIBBPF_VERSION)|" \
|
||||
< libbpf.pc.template > $@
|
||||
|
||||
$(STATIC_OBJDIR) $(SHARED_OBJDIR):
|
||||
$(OBJDIR) $(STATIC_OBJDIR) $(SHARED_OBJDIR):
|
||||
$(call msg,MKDIR,$@)
|
||||
$(Q)mkdir -p $@
|
||||
|
||||
|
||||
@@ -105,7 +105,7 @@ int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
|
||||
*/
|
||||
int probe_memcg_account(int token_fd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_EXIT_INSN(),
|
||||
|
||||
728
src/btf.c
728
src/btf.c
@@ -116,6 +116,9 @@ struct btf {
|
||||
/* whether strings are already deduplicated */
|
||||
bool strs_deduped;
|
||||
|
||||
/* whether base_btf should be freed in btf_free for this instance */
|
||||
bool owns_base;
|
||||
|
||||
/* BTF object FD, if loaded into kernel */
|
||||
int fd;
|
||||
|
||||
@@ -598,7 +601,7 @@ static int btf_sanity_check(const struct btf *btf)
|
||||
__u32 i, n = btf__type_cnt(btf);
|
||||
int err;
|
||||
|
||||
for (i = 1; i < n; i++) {
|
||||
for (i = btf->start_id; i < n; i++) {
|
||||
t = btf_type_by_id(btf, i);
|
||||
err = btf_validate_type(btf, t, i);
|
||||
if (err)
|
||||
@@ -969,6 +972,8 @@ void btf__free(struct btf *btf)
|
||||
free(btf->raw_data);
|
||||
free(btf->raw_data_swapped);
|
||||
free(btf->type_offs);
|
||||
if (btf->owns_base)
|
||||
btf__free(btf->base_btf);
|
||||
free(btf);
|
||||
}
|
||||
|
||||
@@ -991,6 +996,7 @@ static struct btf *btf_new_empty(struct btf *base_btf)
|
||||
btf->base_btf = base_btf;
|
||||
btf->start_id = btf__type_cnt(base_btf);
|
||||
btf->start_str_off = base_btf->hdr->str_len;
|
||||
btf->swapped_endian = base_btf->swapped_endian;
|
||||
}
|
||||
|
||||
/* +1 for empty string at offset 0 */
|
||||
@@ -1084,16 +1090,86 @@ struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
|
||||
return libbpf_ptr(btf_new(data, size, base_btf));
|
||||
}
|
||||
|
||||
struct btf_elf_secs {
|
||||
Elf_Data *btf_data;
|
||||
Elf_Data *btf_ext_data;
|
||||
Elf_Data *btf_base_data;
|
||||
};
|
||||
|
||||
static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs)
|
||||
{
|
||||
Elf_Scn *scn = NULL;
|
||||
Elf_Data *data;
|
||||
GElf_Ehdr ehdr;
|
||||
size_t shstrndx;
|
||||
int idx = 0;
|
||||
|
||||
if (!gelf_getehdr(elf, &ehdr)) {
|
||||
pr_warn("failed to get EHDR from %s\n", path);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (elf_getshdrstrndx(elf, &shstrndx)) {
|
||||
pr_warn("failed to get section names section index for %s\n",
|
||||
path);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
|
||||
pr_warn("failed to get e_shstrndx from %s\n", path);
|
||||
goto err;
|
||||
}
|
||||
|
||||
while ((scn = elf_nextscn(elf, scn)) != NULL) {
|
||||
Elf_Data **field;
|
||||
GElf_Shdr sh;
|
||||
char *name;
|
||||
|
||||
idx++;
|
||||
if (gelf_getshdr(scn, &sh) != &sh) {
|
||||
pr_warn("failed to get section(%d) header from %s\n",
|
||||
idx, path);
|
||||
goto err;
|
||||
}
|
||||
name = elf_strptr(elf, shstrndx, sh.sh_name);
|
||||
if (!name) {
|
||||
pr_warn("failed to get section(%d) name from %s\n",
|
||||
idx, path);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (strcmp(name, BTF_ELF_SEC) == 0)
|
||||
field = &secs->btf_data;
|
||||
else if (strcmp(name, BTF_EXT_ELF_SEC) == 0)
|
||||
field = &secs->btf_ext_data;
|
||||
else if (strcmp(name, BTF_BASE_ELF_SEC) == 0)
|
||||
field = &secs->btf_base_data;
|
||||
else
|
||||
continue;
|
||||
|
||||
data = elf_getdata(scn, 0);
|
||||
if (!data) {
|
||||
pr_warn("failed to get section(%d, %s) data from %s\n",
|
||||
idx, name, path);
|
||||
goto err;
|
||||
}
|
||||
*field = data;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
return -LIBBPF_ERRNO__FORMAT;
|
||||
}
|
||||
|
||||
static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
|
||||
struct btf_ext **btf_ext)
|
||||
{
|
||||
Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
|
||||
int err = 0, fd = -1, idx = 0;
|
||||
struct btf_elf_secs secs = {};
|
||||
struct btf *dist_base_btf = NULL;
|
||||
struct btf *btf = NULL;
|
||||
Elf_Scn *scn = NULL;
|
||||
int err = 0, fd = -1;
|
||||
Elf *elf = NULL;
|
||||
GElf_Ehdr ehdr;
|
||||
size_t shstrndx;
|
||||
|
||||
if (elf_version(EV_CURRENT) == EV_NONE) {
|
||||
pr_warn("failed to init libelf for %s\n", path);
|
||||
@@ -1107,73 +1183,48 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
err = -LIBBPF_ERRNO__FORMAT;
|
||||
|
||||
elf = elf_begin(fd, ELF_C_READ, NULL);
|
||||
if (!elf) {
|
||||
pr_warn("failed to open %s as ELF file\n", path);
|
||||
goto done;
|
||||
}
|
||||
if (!gelf_getehdr(elf, &ehdr)) {
|
||||
pr_warn("failed to get EHDR from %s\n", path);
|
||||
|
||||
err = btf_find_elf_sections(elf, path, &secs);
|
||||
if (err)
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (elf_getshdrstrndx(elf, &shstrndx)) {
|
||||
pr_warn("failed to get section names section index for %s\n",
|
||||
path);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
|
||||
pr_warn("failed to get e_shstrndx from %s\n", path);
|
||||
goto done;
|
||||
}
|
||||
|
||||
while ((scn = elf_nextscn(elf, scn)) != NULL) {
|
||||
GElf_Shdr sh;
|
||||
char *name;
|
||||
|
||||
idx++;
|
||||
if (gelf_getshdr(scn, &sh) != &sh) {
|
||||
pr_warn("failed to get section(%d) header from %s\n",
|
||||
idx, path);
|
||||
goto done;
|
||||
}
|
||||
name = elf_strptr(elf, shstrndx, sh.sh_name);
|
||||
if (!name) {
|
||||
pr_warn("failed to get section(%d) name from %s\n",
|
||||
idx, path);
|
||||
goto done;
|
||||
}
|
||||
if (strcmp(name, BTF_ELF_SEC) == 0) {
|
||||
btf_data = elf_getdata(scn, 0);
|
||||
if (!btf_data) {
|
||||
pr_warn("failed to get section(%d, %s) data from %s\n",
|
||||
idx, name, path);
|
||||
goto done;
|
||||
}
|
||||
continue;
|
||||
} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
|
||||
btf_ext_data = elf_getdata(scn, 0);
|
||||
if (!btf_ext_data) {
|
||||
pr_warn("failed to get section(%d, %s) data from %s\n",
|
||||
idx, name, path);
|
||||
goto done;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!btf_data) {
|
||||
if (!secs.btf_data) {
|
||||
pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
|
||||
err = -ENODATA;
|
||||
goto done;
|
||||
}
|
||||
btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
|
||||
err = libbpf_get_error(btf);
|
||||
if (err)
|
||||
|
||||
if (secs.btf_base_data) {
|
||||
dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size,
|
||||
NULL);
|
||||
if (IS_ERR(dist_base_btf)) {
|
||||
err = PTR_ERR(dist_base_btf);
|
||||
dist_base_btf = NULL;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size,
|
||||
dist_base_btf ?: base_btf);
|
||||
if (IS_ERR(btf)) {
|
||||
err = PTR_ERR(btf);
|
||||
goto done;
|
||||
}
|
||||
if (dist_base_btf && base_btf) {
|
||||
err = btf__relocate(btf, base_btf);
|
||||
if (err)
|
||||
goto done;
|
||||
btf__free(dist_base_btf);
|
||||
dist_base_btf = NULL;
|
||||
}
|
||||
|
||||
if (dist_base_btf)
|
||||
btf->owns_base = true;
|
||||
|
||||
switch (gelf_getclass(elf)) {
|
||||
case ELFCLASS32:
|
||||
@@ -1187,11 +1238,12 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
|
||||
break;
|
||||
}
|
||||
|
||||
if (btf_ext && btf_ext_data) {
|
||||
*btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
|
||||
err = libbpf_get_error(*btf_ext);
|
||||
if (err)
|
||||
if (btf_ext && secs.btf_ext_data) {
|
||||
*btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size);
|
||||
if (IS_ERR(*btf_ext)) {
|
||||
err = PTR_ERR(*btf_ext);
|
||||
goto done;
|
||||
}
|
||||
} else if (btf_ext) {
|
||||
*btf_ext = NULL;
|
||||
}
|
||||
@@ -1205,6 +1257,7 @@ done:
|
||||
|
||||
if (btf_ext)
|
||||
btf_ext__free(*btf_ext);
|
||||
btf__free(dist_base_btf);
|
||||
btf__free(btf);
|
||||
|
||||
return ERR_PTR(err);
|
||||
@@ -1739,9 +1792,8 @@ struct btf_pipe {
|
||||
struct hashmap *str_off_map; /* map string offsets from src to dst */
|
||||
};
|
||||
|
||||
static int btf_rewrite_str(__u32 *str_off, void *ctx)
|
||||
static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
|
||||
{
|
||||
struct btf_pipe *p = ctx;
|
||||
long mapped_off;
|
||||
int off, err;
|
||||
|
||||
@@ -1771,10 +1823,11 @@ static int btf_rewrite_str(__u32 *str_off, void *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
|
||||
static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
|
||||
{
|
||||
struct btf_pipe p = { .src = src_btf, .dst = btf };
|
||||
struct btf_field_iter it;
|
||||
struct btf_type *t;
|
||||
__u32 *str_off;
|
||||
int sz, err;
|
||||
|
||||
sz = btf_type_size(src_type);
|
||||
@@ -1782,35 +1835,33 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t
|
||||
return libbpf_err(sz);
|
||||
|
||||
/* deconstruct BTF, if necessary, and invalidate raw_data */
|
||||
if (btf_ensure_modifiable(btf))
|
||||
if (btf_ensure_modifiable(p->dst))
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
t = btf_add_type_mem(btf, sz);
|
||||
t = btf_add_type_mem(p->dst, sz);
|
||||
if (!t)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
memcpy(t, src_type, sz);
|
||||
|
||||
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
|
||||
if (err)
|
||||
return libbpf_err(err);
|
||||
|
||||
return btf_commit_type(btf, sz);
|
||||
while ((str_off = btf_field_iter_next(&it))) {
|
||||
err = btf_rewrite_str(p, str_off);
|
||||
if (err)
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
return btf_commit_type(p->dst, sz);
|
||||
}
|
||||
|
||||
static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
|
||||
int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
|
||||
{
|
||||
struct btf *btf = ctx;
|
||||
struct btf_pipe p = { .src = src_btf, .dst = btf };
|
||||
|
||||
if (!*type_id) /* nothing to do for VOID references */
|
||||
return 0;
|
||||
|
||||
/* we haven't updated btf's type count yet, so
|
||||
* btf->start_id + btf->nr_types - 1 is the type ID offset we should
|
||||
* add to all newly added BTF types
|
||||
*/
|
||||
*type_id += btf->start_id + btf->nr_types - 1;
|
||||
return 0;
|
||||
return btf_add_type(&p, src_type);
|
||||
}
|
||||
|
||||
static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
|
||||
@@ -1858,6 +1909,9 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
|
||||
memcpy(t, src_btf->types_data, data_sz);
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
struct btf_field_iter it;
|
||||
__u32 *type_id, *str_off;
|
||||
|
||||
sz = btf_type_size(t);
|
||||
if (sz < 0) {
|
||||
/* unlikely, has to be corrupted src_btf */
|
||||
@@ -1869,14 +1923,30 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
|
||||
*off = t - btf->types_data;
|
||||
|
||||
/* add, dedup, and remap strings referenced by this BTF type */
|
||||
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
|
||||
if (err)
|
||||
goto err_out;
|
||||
while ((str_off = btf_field_iter_next(&it))) {
|
||||
err = btf_rewrite_str(&p, str_off);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* remap all type IDs referenced from this BTF type */
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
/* remap all type IDs referenced from this BTF type */
|
||||
err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
|
||||
if (err)
|
||||
goto err_out;
|
||||
while ((type_id = btf_field_iter_next(&it))) {
|
||||
if (!*type_id) /* nothing to do for VOID references */
|
||||
continue;
|
||||
|
||||
/* we haven't updated btf's type count yet, so
|
||||
* btf->start_id + btf->nr_types - 1 is the type ID offset we should
|
||||
* add to all newly added BTF types
|
||||
*/
|
||||
*type_id += btf->start_id + btf->nr_types - 1;
|
||||
}
|
||||
|
||||
/* go to next type data and type offset index entry */
|
||||
t += sz;
|
||||
@@ -3453,11 +3523,19 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < d->btf->nr_types; i++) {
|
||||
struct btf_field_iter it;
|
||||
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
|
||||
__u32 *str_off;
|
||||
|
||||
r = btf_type_visit_str_offs(t, fn, ctx);
|
||||
r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
while ((str_off = btf_field_iter_next(&it))) {
|
||||
r = fn(str_off, ctx);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
if (!d->btf_ext)
|
||||
@@ -4919,10 +4997,23 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
|
||||
|
||||
for (i = 0; i < d->btf->nr_types; i++) {
|
||||
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
|
||||
struct btf_field_iter it;
|
||||
__u32 *type_id;
|
||||
|
||||
r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
|
||||
r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
while ((type_id = btf_field_iter_next(&it))) {
|
||||
__u32 resolved_id, new_id;
|
||||
|
||||
resolved_id = resolve_type_id(d, *type_id);
|
||||
new_id = d->hypot_map[resolved_id];
|
||||
if (new_id > BTF_MAX_NR_TYPES)
|
||||
return -EINVAL;
|
||||
|
||||
*type_id = new_id;
|
||||
}
|
||||
}
|
||||
|
||||
if (!d->btf_ext)
|
||||
@@ -5003,136 +5094,6 @@ struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_bt
|
||||
return btf__parse_split(path, vmlinux_btf);
|
||||
}
|
||||
|
||||
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
|
||||
{
|
||||
int i, n, err;
|
||||
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
return 0;
|
||||
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
return visit(&t->type, ctx);
|
||||
|
||||
case BTF_KIND_ARRAY: {
|
||||
struct btf_array *a = btf_array(t);
|
||||
|
||||
err = visit(&a->type, ctx);
|
||||
err = err ?: visit(&a->index_type, ctx);
|
||||
return err;
|
||||
}
|
||||
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION: {
|
||||
struct btf_member *m = btf_members(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *m = btf_params(t);
|
||||
|
||||
err = visit(&t->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
case BTF_KIND_DATASEC: {
|
||||
struct btf_var_secinfo *m = btf_var_secinfos(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
|
||||
{
|
||||
int i, n, err;
|
||||
|
||||
err = visit(&t->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION: {
|
||||
struct btf_member *m = btf_members(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM: {
|
||||
struct btf_enum *m = btf_enum(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM64: {
|
||||
struct btf_enum64 *m = btf_enum64(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *m = btf_params(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
|
||||
{
|
||||
const struct btf_ext_info *seg;
|
||||
@@ -5212,3 +5173,328 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct btf_distill {
|
||||
struct btf_pipe pipe;
|
||||
int *id_map;
|
||||
unsigned int split_start_id;
|
||||
unsigned int split_start_str;
|
||||
int diff_id;
|
||||
};
|
||||
|
||||
static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i)
|
||||
{
|
||||
struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i);
|
||||
struct btf_field_iter it;
|
||||
__u32 *id;
|
||||
int err;
|
||||
|
||||
err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS);
|
||||
if (err)
|
||||
return err;
|
||||
while ((id = btf_field_iter_next(&it))) {
|
||||
struct btf_type *base_t;
|
||||
|
||||
if (!*id)
|
||||
continue;
|
||||
/* split BTF id, not needed */
|
||||
if (*id >= dist->split_start_id)
|
||||
continue;
|
||||
/* already added ? */
|
||||
if (dist->id_map[*id] > 0)
|
||||
continue;
|
||||
|
||||
/* only a subset of base BTF types should be referenced from
|
||||
* split BTF; ensure nothing unexpected is referenced.
|
||||
*/
|
||||
base_t = btf_type_by_id(dist->pipe.src, *id);
|
||||
switch (btf_kind(base_t)) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_ARRAY:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
dist->id_map[*id] = *id;
|
||||
break;
|
||||
default:
|
||||
pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n",
|
||||
*id, btf_kind(base_t));
|
||||
return -EINVAL;
|
||||
}
|
||||
/* If a base type is used, ensure types it refers to are
|
||||
* marked as used also; so for example if we find a PTR to INT
|
||||
* we need both the PTR and INT.
|
||||
*
|
||||
* The only exception is named struct/unions, since distilled
|
||||
* base BTF composite types have no members.
|
||||
*/
|
||||
if (btf_is_composite(base_t) && base_t->name_off)
|
||||
continue;
|
||||
err = btf_add_distilled_type_ids(dist, *id);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_add_distilled_types(struct btf_distill *dist)
|
||||
{
|
||||
bool adding_to_base = dist->pipe.dst->start_id == 1;
|
||||
int id = btf__type_cnt(dist->pipe.dst);
|
||||
struct btf_type *t;
|
||||
int i, err = 0;
|
||||
|
||||
|
||||
/* Add types for each of the required references to either distilled
|
||||
* base or split BTF, depending on type characteristics.
|
||||
*/
|
||||
for (i = 1; i < dist->split_start_id; i++) {
|
||||
const char *name;
|
||||
int kind;
|
||||
|
||||
if (!dist->id_map[i])
|
||||
continue;
|
||||
t = btf_type_by_id(dist->pipe.src, i);
|
||||
kind = btf_kind(t);
|
||||
name = btf__name_by_offset(dist->pipe.src, t->name_off);
|
||||
|
||||
switch (kind) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_FWD:
|
||||
/* Named int, float, fwd are added to base. */
|
||||
if (!adding_to_base)
|
||||
continue;
|
||||
err = btf_add_type(&dist->pipe, t);
|
||||
break;
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
/* Named struct/union are added to base as 0-vlen
|
||||
* struct/union of same size. Anonymous struct/unions
|
||||
* are added to split BTF as-is.
|
||||
*/
|
||||
if (adding_to_base) {
|
||||
if (!t->name_off)
|
||||
continue;
|
||||
err = btf_add_composite(dist->pipe.dst, kind, name, t->size);
|
||||
} else {
|
||||
if (t->name_off)
|
||||
continue;
|
||||
err = btf_add_type(&dist->pipe, t);
|
||||
}
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
/* Named enum[64]s are added to base as a sized
|
||||
* enum; relocation will match with appropriately-named
|
||||
* and sized enum or enum64.
|
||||
*
|
||||
* Anonymous enums are added to split BTF as-is.
|
||||
*/
|
||||
if (adding_to_base) {
|
||||
if (!t->name_off)
|
||||
continue;
|
||||
err = btf__add_enum(dist->pipe.dst, name, t->size);
|
||||
} else {
|
||||
if (t->name_off)
|
||||
continue;
|
||||
err = btf_add_type(&dist->pipe, t);
|
||||
}
|
||||
break;
|
||||
case BTF_KIND_ARRAY:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
/* All other types are added to split BTF. */
|
||||
if (adding_to_base)
|
||||
continue;
|
||||
err = btf_add_type(&dist->pipe, t);
|
||||
break;
|
||||
default:
|
||||
pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n",
|
||||
name, i, kind);
|
||||
return -EINVAL;
|
||||
|
||||
}
|
||||
if (err < 0)
|
||||
break;
|
||||
dist->id_map[i] = id++;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Split BTF ids without a mapping will be shifted downwards since distilled
|
||||
* base BTF is smaller than the original base BTF. For those that have a
|
||||
* mapping (either to base or updated split BTF), update the id based on
|
||||
* that mapping.
|
||||
*/
|
||||
static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i)
|
||||
{
|
||||
struct btf_type *t = btf_type_by_id(dist->pipe.dst, i);
|
||||
struct btf_field_iter it;
|
||||
__u32 *id;
|
||||
int err;
|
||||
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
||||
if (err)
|
||||
return err;
|
||||
while ((id = btf_field_iter_next(&it))) {
|
||||
if (dist->id_map[*id])
|
||||
*id = dist->id_map[*id];
|
||||
else if (*id >= dist->split_start_id)
|
||||
*id -= dist->diff_id;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Create updated split BTF with distilled base BTF; distilled base BTF
|
||||
* consists of BTF information required to clarify the types that split
|
||||
* BTF refers to, omitting unneeded details. Specifically it will contain
|
||||
* base types and memberless definitions of named structs, unions and enumerated
|
||||
* types. Associated reference types like pointers, arrays and anonymous
|
||||
* structs, unions and enumerated types will be added to split BTF.
|
||||
* Size is recorded for named struct/unions to help guide matching to the
|
||||
* target base BTF during later relocation.
|
||||
*
|
||||
* The only case where structs, unions or enumerated types are fully represented
|
||||
* is when they are anonymous; in such cases, the anonymous type is added to
|
||||
* split BTF in full.
|
||||
*
|
||||
* We return newly-created split BTF where the split BTF refers to a newly-created
|
||||
* distilled base BTF. Both must be freed separately by the caller.
|
||||
*/
|
||||
int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
|
||||
struct btf **new_split_btf)
|
||||
{
|
||||
struct btf *new_base = NULL, *new_split = NULL;
|
||||
const struct btf *old_base;
|
||||
unsigned int n = btf__type_cnt(src_btf);
|
||||
struct btf_distill dist = {};
|
||||
struct btf_type *t;
|
||||
int i, err = 0;
|
||||
|
||||
/* src BTF must be split BTF. */
|
||||
old_base = btf__base_btf(src_btf);
|
||||
if (!new_base_btf || !new_split_btf || !old_base)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
new_base = btf__new_empty();
|
||||
if (!new_base)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
btf__set_endianness(new_base, btf__endianness(src_btf));
|
||||
|
||||
dist.id_map = calloc(n, sizeof(*dist.id_map));
|
||||
if (!dist.id_map) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
dist.pipe.src = src_btf;
|
||||
dist.pipe.dst = new_base;
|
||||
dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
|
||||
if (IS_ERR(dist.pipe.str_off_map)) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
dist.split_start_id = btf__type_cnt(old_base);
|
||||
dist.split_start_str = old_base->hdr->str_len;
|
||||
|
||||
/* Pass over src split BTF; generate the list of base BTF type ids it
|
||||
* references; these will constitute our distilled BTF set to be
|
||||
* distributed over base and split BTF as appropriate.
|
||||
*/
|
||||
for (i = src_btf->start_id; i < n; i++) {
|
||||
err = btf_add_distilled_type_ids(&dist, i);
|
||||
if (err < 0)
|
||||
goto done;
|
||||
}
|
||||
/* Next add types for each of the required references to base BTF and split BTF
|
||||
* in turn.
|
||||
*/
|
||||
err = btf_add_distilled_types(&dist);
|
||||
if (err < 0)
|
||||
goto done;
|
||||
|
||||
/* Create new split BTF with distilled base BTF as its base; the final
|
||||
* state is split BTF with distilled base BTF that represents enough
|
||||
* about its base references to allow it to be relocated with the base
|
||||
* BTF available.
|
||||
*/
|
||||
new_split = btf__new_empty_split(new_base);
|
||||
if (!new_split) {
|
||||
err = -errno;
|
||||
goto done;
|
||||
}
|
||||
dist.pipe.dst = new_split;
|
||||
/* First add all split types */
|
||||
for (i = src_btf->start_id; i < n; i++) {
|
||||
t = btf_type_by_id(src_btf, i);
|
||||
err = btf_add_type(&dist.pipe, t);
|
||||
if (err < 0)
|
||||
goto done;
|
||||
}
|
||||
/* Now add distilled types to split BTF that are not added to base. */
|
||||
err = btf_add_distilled_types(&dist);
|
||||
if (err < 0)
|
||||
goto done;
|
||||
|
||||
/* All split BTF ids will be shifted downwards since there are less base
|
||||
* BTF ids in distilled base BTF.
|
||||
*/
|
||||
dist.diff_id = dist.split_start_id - btf__type_cnt(new_base);
|
||||
|
||||
n = btf__type_cnt(new_split);
|
||||
/* Now update base/split BTF ids. */
|
||||
for (i = 1; i < n; i++) {
|
||||
err = btf_update_distilled_type_ids(&dist, i);
|
||||
if (err < 0)
|
||||
break;
|
||||
}
|
||||
done:
|
||||
free(dist.id_map);
|
||||
hashmap__free(dist.pipe.str_off_map);
|
||||
if (err) {
|
||||
btf__free(new_split);
|
||||
btf__free(new_base);
|
||||
return libbpf_err(err);
|
||||
}
|
||||
*new_base_btf = new_base;
|
||||
*new_split_btf = new_split;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct btf_header *btf_header(const struct btf *btf)
|
||||
{
|
||||
return btf->hdr;
|
||||
}
|
||||
|
||||
void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
|
||||
{
|
||||
btf->base_btf = (struct btf *)base_btf;
|
||||
btf->start_id = btf__type_cnt(base_btf);
|
||||
btf->start_str_off = base_btf->hdr->str_len;
|
||||
}
|
||||
|
||||
int btf__relocate(struct btf *btf, const struct btf *base_btf)
|
||||
{
|
||||
int err = btf_relocate(btf, base_btf, NULL);
|
||||
|
||||
if (!err)
|
||||
btf->owns_base = false;
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
36
src/btf.h
36
src/btf.h
@@ -18,6 +18,7 @@ extern "C" {
|
||||
|
||||
#define BTF_ELF_SEC ".BTF"
|
||||
#define BTF_EXT_ELF_SEC ".BTF.ext"
|
||||
#define BTF_BASE_ELF_SEC ".BTF.base"
|
||||
#define MAPS_ELF_SEC ".maps"
|
||||
|
||||
struct btf;
|
||||
@@ -107,6 +108,27 @@ LIBBPF_API struct btf *btf__new_empty(void);
|
||||
*/
|
||||
LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
|
||||
|
||||
/**
|
||||
* @brief **btf__distill_base()** creates new versions of the split BTF
|
||||
* *src_btf* and its base BTF. The new base BTF will only contain the types
|
||||
* needed to improve robustness of the split BTF to small changes in base BTF.
|
||||
* When that split BTF is loaded against a (possibly changed) base, this
|
||||
* distilled base BTF will help update references to that (possibly changed)
|
||||
* base BTF.
|
||||
*
|
||||
* Both the new split and its associated new base BTF must be freed by
|
||||
* the caller.
|
||||
*
|
||||
* If successful, 0 is returned and **new_base_btf** and **new_split_btf**
|
||||
* will point at new base/split BTF. Both the new split and its associated
|
||||
* new base BTF must be freed by the caller.
|
||||
*
|
||||
* A negative value is returned on error and the thread-local `errno` variable
|
||||
* is set to the error code as well.
|
||||
*/
|
||||
LIBBPF_API int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
|
||||
struct btf **new_split_btf);
|
||||
|
||||
LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
|
||||
LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf);
|
||||
LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext);
|
||||
@@ -231,6 +253,20 @@ struct btf_dedup_opts {
|
||||
|
||||
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
|
||||
|
||||
/**
|
||||
* @brief **btf__relocate()** will check the split BTF *btf* for references
|
||||
* to base BTF kinds, and verify those references are compatible with
|
||||
* *base_btf*; if they are, *btf* is adjusted such that is re-parented to
|
||||
* *base_btf* and type ids and strings are adjusted to accommodate this.
|
||||
*
|
||||
* If successful, 0 is returned and **btf** now has **base_btf** as its
|
||||
* base.
|
||||
*
|
||||
* A negative value is returned on error and the thread-local `errno` variable
|
||||
* is set to the error code as well.
|
||||
*/
|
||||
LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf);
|
||||
|
||||
struct btf_dump;
|
||||
|
||||
struct btf_dump_opts {
|
||||
|
||||
@@ -1559,10 +1559,12 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
|
||||
* Clang for BPF target generates func_proto with no
|
||||
* args as a func_proto with a single void arg (e.g.,
|
||||
* `int (*f)(void)` vs just `int (*f)()`). We are
|
||||
* going to pretend there are no args for such case.
|
||||
* going to emit valid empty args (void) syntax for
|
||||
* such case. Similarly and conveniently, valid
|
||||
* no args case can be special-cased here as well.
|
||||
*/
|
||||
if (vlen == 1 && p->type == 0) {
|
||||
btf_dump_printf(d, ")");
|
||||
if (vlen == 0 || (vlen == 1 && p->type == 0)) {
|
||||
btf_dump_printf(d, "void)");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
177
src/btf_iter.c
Normal file
177
src/btf_iter.c
Normal file
@@ -0,0 +1,177 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
/* Copyright (c) 2024, Oracle and/or its affiliates. */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
|
||||
#define btf_var_secinfos(t) (struct btf_var_secinfo *)btf_type_var_secinfo(t)
|
||||
|
||||
#else
|
||||
#include "btf.h"
|
||||
#include "libbpf_internal.h"
|
||||
#endif
|
||||
|
||||
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
|
||||
enum btf_field_iter_kind iter_kind)
|
||||
{
|
||||
it->p = NULL;
|
||||
it->m_idx = -1;
|
||||
it->off_idx = 0;
|
||||
it->vlen = 0;
|
||||
|
||||
switch (iter_kind) {
|
||||
case BTF_FIELD_ITER_IDS:
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_UNKN:
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
it->desc = (struct btf_field_desc) {};
|
||||
break;
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} };
|
||||
break;
|
||||
case BTF_KIND_ARRAY:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
2, {sizeof(struct btf_type) + offsetof(struct btf_array, type),
|
||||
sizeof(struct btf_type) + offsetof(struct btf_array, index_type)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
0, {},
|
||||
sizeof(struct btf_member),
|
||||
1, {offsetof(struct btf_member, type)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, type)},
|
||||
sizeof(struct btf_param),
|
||||
1, {offsetof(struct btf_param, type)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_DATASEC:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
0, {},
|
||||
sizeof(struct btf_var_secinfo),
|
||||
1, {offsetof(struct btf_var_secinfo, type)}
|
||||
};
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case BTF_FIELD_ITER_STRS:
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_UNKN:
|
||||
it->desc = (struct btf_field_desc) {};
|
||||
break;
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_ARRAY:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
case BTF_KIND_DATASEC:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, name_off)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, name_off)},
|
||||
sizeof(struct btf_enum),
|
||||
1, {offsetof(struct btf_enum, name_off)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_ENUM64:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, name_off)},
|
||||
sizeof(struct btf_enum64),
|
||||
1, {offsetof(struct btf_enum64, name_off)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, name_off)},
|
||||
sizeof(struct btf_member),
|
||||
1, {offsetof(struct btf_member, name_off)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, name_off)},
|
||||
sizeof(struct btf_param),
|
||||
1, {offsetof(struct btf_param, name_off)}
|
||||
};
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (it->desc.m_sz)
|
||||
it->vlen = btf_vlen(t);
|
||||
|
||||
it->p = t;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u32 *btf_field_iter_next(struct btf_field_iter *it)
|
||||
{
|
||||
if (!it->p)
|
||||
return NULL;
|
||||
|
||||
if (it->m_idx < 0) {
|
||||
if (it->off_idx < it->desc.t_off_cnt)
|
||||
return it->p + it->desc.t_offs[it->off_idx++];
|
||||
/* move to per-member iteration */
|
||||
it->m_idx = 0;
|
||||
it->p += sizeof(struct btf_type);
|
||||
it->off_idx = 0;
|
||||
}
|
||||
|
||||
/* if type doesn't have members, stop */
|
||||
if (it->desc.m_sz == 0) {
|
||||
it->p = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (it->off_idx >= it->desc.m_off_cnt) {
|
||||
/* exhausted this member's fields, go to the next member */
|
||||
it->m_idx++;
|
||||
it->p += it->desc.m_sz;
|
||||
it->off_idx = 0;
|
||||
}
|
||||
|
||||
if (it->m_idx < it->vlen)
|
||||
return it->p + it->desc.m_offs[it->off_idx++];
|
||||
|
||||
it->p = NULL;
|
||||
return NULL;
|
||||
}
|
||||
519
src/btf_relocate.c
Normal file
519
src/btf_relocate.c
Normal file
@@ -0,0 +1,519 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
/* Copyright (c) 2024, Oracle and/or its affiliates. */
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/bsearch.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/bpf_verifier.h>
|
||||
|
||||
#define btf_type_by_id (struct btf_type *)btf_type_by_id
|
||||
#define btf__type_cnt btf_nr_types
|
||||
#define btf__base_btf btf_base_btf
|
||||
#define btf__name_by_offset btf_name_by_offset
|
||||
#define btf__str_by_offset btf_str_by_offset
|
||||
#define btf_kflag btf_type_kflag
|
||||
|
||||
#define calloc(nmemb, sz) kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN)
|
||||
#define free(ptr) kvfree(ptr)
|
||||
#define qsort(base, num, sz, cmp) sort(base, num, sz, cmp, NULL)
|
||||
|
||||
#else
|
||||
|
||||
#include "btf.h"
|
||||
#include "bpf.h"
|
||||
#include "libbpf.h"
|
||||
#include "libbpf_internal.h"
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
struct btf;
|
||||
|
||||
struct btf_relocate {
|
||||
struct btf *btf;
|
||||
const struct btf *base_btf;
|
||||
const struct btf *dist_base_btf;
|
||||
unsigned int nr_base_types;
|
||||
unsigned int nr_split_types;
|
||||
unsigned int nr_dist_base_types;
|
||||
int dist_str_len;
|
||||
int base_str_len;
|
||||
__u32 *id_map;
|
||||
__u32 *str_map;
|
||||
};
|
||||
|
||||
/* Set temporarily in relocation id_map if distilled base struct/union is
|
||||
* embedded in a split BTF struct/union; in such a case, size information must
|
||||
* match between distilled base BTF and base BTF representation of type.
|
||||
*/
|
||||
#define BTF_IS_EMBEDDED ((__u32)-1)
|
||||
|
||||
/* <name, size, id> triple used in sorting/searching distilled base BTF. */
|
||||
struct btf_name_info {
|
||||
const char *name;
|
||||
/* set when search requires a size match */
|
||||
bool needs_size: 1;
|
||||
unsigned int size: 31;
|
||||
__u32 id;
|
||||
};
|
||||
|
||||
static int btf_relocate_rewrite_type_id(struct btf_relocate *r, __u32 i)
|
||||
{
|
||||
struct btf_type *t = btf_type_by_id(r->btf, i);
|
||||
struct btf_field_iter it;
|
||||
__u32 *id;
|
||||
int err;
|
||||
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
while ((id = btf_field_iter_next(&it)))
|
||||
*id = r->id_map[*id];
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Simple string comparison used for sorting within BTF, since all distilled
|
||||
* types are named. If strings match, and size is non-zero for both elements
|
||||
* fall back to using size for ordering.
|
||||
*/
|
||||
static int cmp_btf_name_size(const void *n1, const void *n2)
|
||||
{
|
||||
const struct btf_name_info *ni1 = n1;
|
||||
const struct btf_name_info *ni2 = n2;
|
||||
int name_diff = strcmp(ni1->name, ni2->name);
|
||||
|
||||
if (!name_diff && ni1->needs_size && ni2->needs_size)
|
||||
return ni2->size - ni1->size;
|
||||
return name_diff;
|
||||
}
|
||||
|
||||
/* Binary search with a small twist; find leftmost element that matches
|
||||
* so that we can then iterate through all exact matches. So for example
|
||||
* searching { "a", "bb", "bb", "c" } we would always match on the
|
||||
* leftmost "bb".
|
||||
*/
|
||||
static struct btf_name_info *search_btf_name_size(struct btf_name_info *key,
|
||||
struct btf_name_info *vals,
|
||||
int nelems)
|
||||
{
|
||||
struct btf_name_info *ret = NULL;
|
||||
int high = nelems - 1;
|
||||
int low = 0;
|
||||
|
||||
while (low <= high) {
|
||||
int mid = (low + high)/2;
|
||||
struct btf_name_info *val = &vals[mid];
|
||||
int diff = cmp_btf_name_size(key, val);
|
||||
|
||||
if (diff == 0)
|
||||
ret = val;
|
||||
/* even if found, keep searching for leftmost match */
|
||||
if (diff <= 0)
|
||||
high = mid - 1;
|
||||
else
|
||||
low = mid + 1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* If a member of a split BTF struct/union refers to a base BTF
|
||||
* struct/union, mark that struct/union id temporarily in the id_map
|
||||
* with BTF_IS_EMBEDDED. Members can be const/restrict/volatile/typedef
|
||||
* reference types, but if a pointer is encountered, the type is no longer
|
||||
* considered embedded.
|
||||
*/
|
||||
static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i)
|
||||
{
|
||||
struct btf_type *t = btf_type_by_id(r->btf, i);
|
||||
struct btf_field_iter it;
|
||||
__u32 *id;
|
||||
int err;
|
||||
|
||||
if (!btf_is_composite(t))
|
||||
return 0;
|
||||
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
while ((id = btf_field_iter_next(&it))) {
|
||||
__u32 next_id = *id;
|
||||
|
||||
while (next_id) {
|
||||
t = btf_type_by_id(r->btf, next_id);
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
next_id = t->type;
|
||||
break;
|
||||
case BTF_KIND_ARRAY: {
|
||||
struct btf_array *a = btf_array(t);
|
||||
|
||||
next_id = a->type;
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
if (next_id < r->nr_dist_base_types)
|
||||
r->id_map[next_id] = BTF_IS_EMBEDDED;
|
||||
next_id = 0;
|
||||
break;
|
||||
default:
|
||||
next_id = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Build a map from distilled base BTF ids to base BTF ids. To do so, iterate
|
||||
* through base BTF looking up distilled type (using binary search) equivalents.
|
||||
*/
|
||||
static int btf_relocate_map_distilled_base(struct btf_relocate *r)
|
||||
{
|
||||
struct btf_name_info *info, *info_end;
|
||||
struct btf_type *base_t, *dist_t;
|
||||
__u8 *base_name_cnt = NULL;
|
||||
int err = 0;
|
||||
__u32 id;
|
||||
|
||||
/* generate a sort index array of name/type ids sorted by name for
|
||||
* distilled base BTF to speed name-based lookups.
|
||||
*/
|
||||
info = calloc(r->nr_dist_base_types, sizeof(*info));
|
||||
if (!info) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
info_end = info + r->nr_dist_base_types;
|
||||
for (id = 0; id < r->nr_dist_base_types; id++) {
|
||||
dist_t = btf_type_by_id(r->dist_base_btf, id);
|
||||
info[id].name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
|
||||
info[id].id = id;
|
||||
info[id].size = dist_t->size;
|
||||
info[id].needs_size = true;
|
||||
}
|
||||
qsort(info, r->nr_dist_base_types, sizeof(*info), cmp_btf_name_size);
|
||||
|
||||
/* Mark distilled base struct/union members of split BTF structs/unions
|
||||
* in id_map with BTF_IS_EMBEDDED; this signals that these types
|
||||
* need to match both name and size, otherwise embedding the base
|
||||
* struct/union in the split type is invalid.
|
||||
*/
|
||||
for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) {
|
||||
err = btf_mark_embedded_composite_type_ids(r, id);
|
||||
if (err)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Collect name counts for composite types in base BTF. If multiple
|
||||
* instances of a struct/union of the same name exist, we need to use
|
||||
* size to determine which to map to since name alone is ambiguous.
|
||||
*/
|
||||
base_name_cnt = calloc(r->base_str_len, sizeof(*base_name_cnt));
|
||||
if (!base_name_cnt) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
for (id = 1; id < r->nr_base_types; id++) {
|
||||
base_t = btf_type_by_id(r->base_btf, id);
|
||||
if (!btf_is_composite(base_t) || !base_t->name_off)
|
||||
continue;
|
||||
if (base_name_cnt[base_t->name_off] < 255)
|
||||
base_name_cnt[base_t->name_off]++;
|
||||
}
|
||||
|
||||
/* Now search base BTF for matching distilled base BTF types. */
|
||||
for (id = 1; id < r->nr_base_types; id++) {
|
||||
struct btf_name_info *dist_info, base_info = {};
|
||||
int dist_kind, base_kind;
|
||||
|
||||
base_t = btf_type_by_id(r->base_btf, id);
|
||||
/* distilled base consists of named types only. */
|
||||
if (!base_t->name_off)
|
||||
continue;
|
||||
base_kind = btf_kind(base_t);
|
||||
base_info.id = id;
|
||||
base_info.name = btf__name_by_offset(r->base_btf, base_t->name_off);
|
||||
switch (base_kind) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
/* These types should match both name and size */
|
||||
base_info.needs_size = true;
|
||||
base_info.size = base_t->size;
|
||||
break;
|
||||
case BTF_KIND_FWD:
|
||||
/* No size considerations for fwds. */
|
||||
break;
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
/* Size only needs to be used for struct/union if there
|
||||
* are multiple types in base BTF with the same name.
|
||||
* If there are multiple _distilled_ types with the same
|
||||
* name (a very unlikely scenario), that doesn't matter
|
||||
* unless corresponding _base_ types to match them are
|
||||
* missing.
|
||||
*/
|
||||
base_info.needs_size = base_name_cnt[base_t->name_off] > 1;
|
||||
base_info.size = base_t->size;
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
/* iterate over all matching distilled base types */
|
||||
for (dist_info = search_btf_name_size(&base_info, info, r->nr_dist_base_types);
|
||||
dist_info != NULL && dist_info < info_end &&
|
||||
cmp_btf_name_size(&base_info, dist_info) == 0;
|
||||
dist_info++) {
|
||||
if (!dist_info->id || dist_info->id >= r->nr_dist_base_types) {
|
||||
pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n",
|
||||
id, dist_info->id);
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
dist_t = btf_type_by_id(r->dist_base_btf, dist_info->id);
|
||||
dist_kind = btf_kind(dist_t);
|
||||
|
||||
/* Validate that the found distilled type is compatible.
|
||||
* Do not error out on mismatch as another match may
|
||||
* occur for an identically-named type.
|
||||
*/
|
||||
switch (dist_kind) {
|
||||
case BTF_KIND_FWD:
|
||||
switch (base_kind) {
|
||||
case BTF_KIND_FWD:
|
||||
if (btf_kflag(dist_t) != btf_kflag(base_t))
|
||||
continue;
|
||||
break;
|
||||
case BTF_KIND_STRUCT:
|
||||
if (btf_kflag(base_t))
|
||||
continue;
|
||||
break;
|
||||
case BTF_KIND_UNION:
|
||||
if (!btf_kflag(base_t))
|
||||
continue;
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
case BTF_KIND_INT:
|
||||
if (dist_kind != base_kind ||
|
||||
btf_int_encoding(base_t) != btf_int_encoding(dist_t))
|
||||
continue;
|
||||
break;
|
||||
case BTF_KIND_FLOAT:
|
||||
if (dist_kind != base_kind)
|
||||
continue;
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
/* ENUM and ENUM64 are encoded as sized ENUM in
|
||||
* distilled base BTF.
|
||||
*/
|
||||
if (base_kind != dist_kind && base_kind != BTF_KIND_ENUM64)
|
||||
continue;
|
||||
break;
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
/* size verification is required for embedded
|
||||
* struct/unions.
|
||||
*/
|
||||
if (r->id_map[dist_info->id] == BTF_IS_EMBEDDED &&
|
||||
base_t->size != dist_t->size)
|
||||
continue;
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
if (r->id_map[dist_info->id] &&
|
||||
r->id_map[dist_info->id] != BTF_IS_EMBEDDED) {
|
||||
/* we already have a match; this tells us that
|
||||
* multiple base types of the same name
|
||||
* have the same size, since for cases where
|
||||
* multiple types have the same name we match
|
||||
* on name and size. In this case, we have
|
||||
* no way of determining which to relocate
|
||||
* to in base BTF, so error out.
|
||||
*/
|
||||
pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n",
|
||||
base_info.name, dist_info->id,
|
||||
base_t->size, id, r->id_map[dist_info->id]);
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
/* map id and name */
|
||||
r->id_map[dist_info->id] = id;
|
||||
r->str_map[dist_t->name_off] = base_t->name_off;
|
||||
}
|
||||
}
|
||||
/* ensure all distilled BTF ids now have a mapping... */
|
||||
for (id = 1; id < r->nr_dist_base_types; id++) {
|
||||
const char *name;
|
||||
|
||||
if (r->id_map[id] && r->id_map[id] != BTF_IS_EMBEDDED)
|
||||
continue;
|
||||
dist_t = btf_type_by_id(r->dist_base_btf, id);
|
||||
name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
|
||||
pr_warn("distilled base BTF type '%s' [%d] is not mapped to base BTF id\n",
|
||||
name, id);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
done:
|
||||
free(base_name_cnt);
|
||||
free(info);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* distilled base should only have named int/float/enum/fwd/struct/union types. */
|
||||
static int btf_relocate_validate_distilled_base(struct btf_relocate *r)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 1; i < r->nr_dist_base_types; i++) {
|
||||
struct btf_type *t = btf_type_by_id(r->dist_base_btf, i);
|
||||
int kind = btf_kind(t);
|
||||
|
||||
switch (kind) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_FWD:
|
||||
if (t->name_off)
|
||||
break;
|
||||
pr_warn("type [%d], kind [%d] is invalid for distilled base BTF; it is anonymous\n",
|
||||
i, kind);
|
||||
return -EINVAL;
|
||||
default:
|
||||
pr_warn("type [%d] in distilled based BTF has unexpected kind [%d]\n",
|
||||
i, kind);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i)
|
||||
{
|
||||
struct btf_type *t = btf_type_by_id(r->btf, i);
|
||||
struct btf_field_iter it;
|
||||
__u32 *str_off;
|
||||
int off, err;
|
||||
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
while ((str_off = btf_field_iter_next(&it))) {
|
||||
if (!*str_off)
|
||||
continue;
|
||||
if (*str_off >= r->dist_str_len) {
|
||||
*str_off += r->base_str_len - r->dist_str_len;
|
||||
} else {
|
||||
off = r->str_map[*str_off];
|
||||
if (!off) {
|
||||
pr_warn("string '%s' [offset %u] is not mapped to base BTF",
|
||||
btf__str_by_offset(r->btf, off), *str_off);
|
||||
return -ENOENT;
|
||||
}
|
||||
*str_off = off;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If successful, output of relocation is updated BTF with base BTF pointing
|
||||
* at base_btf, and type ids, strings adjusted accordingly.
|
||||
*/
|
||||
int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map)
|
||||
{
|
||||
unsigned int nr_types = btf__type_cnt(btf);
|
||||
const struct btf_header *dist_base_hdr;
|
||||
const struct btf_header *base_hdr;
|
||||
struct btf_relocate r = {};
|
||||
int err = 0;
|
||||
__u32 id, i;
|
||||
|
||||
r.dist_base_btf = btf__base_btf(btf);
|
||||
if (!base_btf || r.dist_base_btf == base_btf)
|
||||
return -EINVAL;
|
||||
|
||||
r.nr_dist_base_types = btf__type_cnt(r.dist_base_btf);
|
||||
r.nr_base_types = btf__type_cnt(base_btf);
|
||||
r.nr_split_types = nr_types - r.nr_dist_base_types;
|
||||
r.btf = btf;
|
||||
r.base_btf = base_btf;
|
||||
|
||||
r.id_map = calloc(nr_types, sizeof(*r.id_map));
|
||||
r.str_map = calloc(btf_header(r.dist_base_btf)->str_len, sizeof(*r.str_map));
|
||||
dist_base_hdr = btf_header(r.dist_base_btf);
|
||||
base_hdr = btf_header(r.base_btf);
|
||||
r.dist_str_len = dist_base_hdr->str_len;
|
||||
r.base_str_len = base_hdr->str_len;
|
||||
if (!r.id_map || !r.str_map) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = btf_relocate_validate_distilled_base(&r);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
/* Split BTF ids need to be adjusted as base and distilled base
|
||||
* have different numbers of types, changing the start id of split
|
||||
* BTF.
|
||||
*/
|
||||
for (id = r.nr_dist_base_types; id < nr_types; id++)
|
||||
r.id_map[id] = id + r.nr_base_types - r.nr_dist_base_types;
|
||||
|
||||
/* Build a map from distilled base ids to actual base BTF ids; it is used
|
||||
* to update split BTF id references. Also build a str_map mapping from
|
||||
* distilled base BTF names to base BTF names.
|
||||
*/
|
||||
err = btf_relocate_map_distilled_base(&r);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
/* Next, rewrite type ids in split BTF, replacing split ids with updated
|
||||
* ids based on number of types in base BTF, and base ids with
|
||||
* relocated ids from base_btf.
|
||||
*/
|
||||
for (i = 0, id = r.nr_dist_base_types; i < r.nr_split_types; i++, id++) {
|
||||
err = btf_relocate_rewrite_type_id(&r, id);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
/* String offsets now need to be updated using the str_map. */
|
||||
for (i = 0; i < r.nr_split_types; i++) {
|
||||
err = btf_relocate_rewrite_strs(&r, i + r.nr_dist_base_types);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
/* Finally reset base BTF to be base_btf */
|
||||
btf_set_base_btf(btf, base_btf);
|
||||
|
||||
if (id_map) {
|
||||
*id_map = r.id_map;
|
||||
r.id_map = NULL;
|
||||
}
|
||||
err_out:
|
||||
free(r.id_map);
|
||||
free(r.str_map);
|
||||
return err;
|
||||
}
|
||||
@@ -28,6 +28,9 @@ int elf_open(const char *binary_path, struct elf_fd *elf_fd)
|
||||
int fd, ret;
|
||||
Elf *elf;
|
||||
|
||||
elf_fd->elf = NULL;
|
||||
elf_fd->fd = -1;
|
||||
|
||||
if (elf_version(EV_CURRENT) == EV_NONE) {
|
||||
pr_warn("elf: failed to init libelf for %s\n", binary_path);
|
||||
return -LIBBPF_ERRNO__LIBELF;
|
||||
|
||||
@@ -22,7 +22,7 @@ int probe_fd(int fd)
|
||||
|
||||
static int probe_kern_prog_name(int token_fd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
@@ -392,11 +392,41 @@ static int probe_uprobe_multi_link(int token_fd)
|
||||
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
|
||||
err = -errno; /* close() can clobber errno */
|
||||
|
||||
if (link_fd >= 0 || err != -EBADF) {
|
||||
if (link_fd >= 0)
|
||||
close(link_fd);
|
||||
close(prog_fd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Initial multi-uprobe support in kernel didn't handle PID filtering
|
||||
* correctly (it was doing thread filtering, not process filtering).
|
||||
* So now we'll detect if PID filtering logic was fixed, and, if not,
|
||||
* we'll pretend multi-uprobes are not supported, if not.
|
||||
* Multi-uprobes are used in USDT attachment logic, and we need to be
|
||||
* conservative here, because multi-uprobe selection happens early at
|
||||
* load time, while the use of PID filtering is known late at
|
||||
* attachment time, at which point it's too late to undo multi-uprobe
|
||||
* selection.
|
||||
*
|
||||
* Creating uprobe with pid == -1 for (invalid) '/' binary will fail
|
||||
* early with -EINVAL on kernels with fixed PID filtering logic;
|
||||
* otherwise -ESRCH would be returned if passed correct binary path
|
||||
* (but we'll just get -BADF, of course).
|
||||
*/
|
||||
link_opts.uprobe_multi.pid = -1; /* invalid PID */
|
||||
link_opts.uprobe_multi.path = "/"; /* invalid path */
|
||||
link_opts.uprobe_multi.offsets = &offset;
|
||||
link_opts.uprobe_multi.cnt = 1;
|
||||
|
||||
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
|
||||
err = -errno; /* close() can clobber errno */
|
||||
|
||||
if (link_fd >= 0)
|
||||
close(link_fd);
|
||||
close(prog_fd);
|
||||
|
||||
return link_fd < 0 && err == -EBADF;
|
||||
return link_fd < 0 && err == -EINVAL;
|
||||
}
|
||||
|
||||
static int probe_kern_bpf_cookie(int token_fd)
|
||||
|
||||
211
src/libbpf.c
211
src/libbpf.c
@@ -229,7 +229,30 @@ static const char * const prog_type_name[] = {
|
||||
static int __base_pr(enum libbpf_print_level level, const char *format,
|
||||
va_list args)
|
||||
{
|
||||
if (level == LIBBPF_DEBUG)
|
||||
const char *env_var = "LIBBPF_LOG_LEVEL";
|
||||
static enum libbpf_print_level min_level = LIBBPF_INFO;
|
||||
static bool initialized;
|
||||
|
||||
if (!initialized) {
|
||||
char *verbosity;
|
||||
|
||||
initialized = true;
|
||||
verbosity = getenv(env_var);
|
||||
if (verbosity) {
|
||||
if (strcasecmp(verbosity, "warn") == 0)
|
||||
min_level = LIBBPF_WARN;
|
||||
else if (strcasecmp(verbosity, "debug") == 0)
|
||||
min_level = LIBBPF_DEBUG;
|
||||
else if (strcasecmp(verbosity, "info") == 0)
|
||||
min_level = LIBBPF_INFO;
|
||||
else
|
||||
fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n",
|
||||
env_var, verbosity);
|
||||
}
|
||||
}
|
||||
|
||||
/* if too verbose, skip logging */
|
||||
if (level > min_level)
|
||||
return 0;
|
||||
|
||||
return vfprintf(stderr, format, args);
|
||||
@@ -473,8 +496,6 @@ struct bpf_program {
|
||||
};
|
||||
|
||||
struct bpf_struct_ops {
|
||||
const char *tname;
|
||||
const struct btf_type *type;
|
||||
struct bpf_program **progs;
|
||||
__u32 *kern_func_off;
|
||||
/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
|
||||
@@ -549,6 +570,7 @@ struct bpf_map {
|
||||
bool pinned;
|
||||
bool reused;
|
||||
bool autocreate;
|
||||
bool autoattach;
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
@@ -1059,11 +1081,14 @@ static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < obj->nr_maps; ++j) {
|
||||
const struct btf_type *type;
|
||||
|
||||
map = &obj->maps[j];
|
||||
if (!bpf_map__is_struct_ops(map))
|
||||
continue;
|
||||
|
||||
vlen = btf_vlen(map->st_ops->type);
|
||||
type = btf__type_by_id(obj->btf, map->st_ops->type_id);
|
||||
vlen = btf_vlen(type);
|
||||
for (k = 0; k < vlen; ++k) {
|
||||
slot_prog = map->st_ops->progs[k];
|
||||
if (prog != slot_prog)
|
||||
@@ -1097,8 +1122,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
|
||||
int err;
|
||||
|
||||
st_ops = map->st_ops;
|
||||
type = st_ops->type;
|
||||
tname = st_ops->tname;
|
||||
type = btf__type_by_id(btf, st_ops->type_id);
|
||||
tname = btf__name_by_offset(btf, type->name_off);
|
||||
err = find_struct_ops_kern_types(obj, tname, &mod_btf,
|
||||
&kern_type, &kern_type_id,
|
||||
&kern_vtype, &kern_vtype_id,
|
||||
@@ -1377,6 +1402,7 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
|
||||
map->def.value_size = type->size;
|
||||
map->def.max_entries = 1;
|
||||
map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
|
||||
map->autoattach = true;
|
||||
|
||||
map->st_ops = calloc(1, sizeof(*map->st_ops));
|
||||
if (!map->st_ops)
|
||||
@@ -1398,8 +1424,6 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
|
||||
memcpy(st_ops->data,
|
||||
data->d_buf + vsi->offset,
|
||||
type->size);
|
||||
st_ops->tname = tname;
|
||||
st_ops->type = type;
|
||||
st_ops->type_id = type_id;
|
||||
|
||||
pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
|
||||
@@ -4796,6 +4820,20 @@ int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach)
|
||||
{
|
||||
if (!bpf_map__is_struct_ops(map))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
map->autoattach = autoattach;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool bpf_map__autoattach(const struct bpf_map *map)
|
||||
{
|
||||
return map->autoattach;
|
||||
}
|
||||
|
||||
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
|
||||
{
|
||||
struct bpf_map_info info;
|
||||
@@ -7867,16 +7905,19 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
|
||||
}
|
||||
|
||||
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
|
||||
const char *obj_name,
|
||||
const struct bpf_object_open_opts *opts)
|
||||
{
|
||||
const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
|
||||
const char *kconfig, *btf_tmp_path, *token_path;
|
||||
struct bpf_object *obj;
|
||||
char tmp_name[64];
|
||||
int err;
|
||||
char *log_buf;
|
||||
size_t log_size;
|
||||
__u32 log_level;
|
||||
|
||||
if (obj_buf && !obj_name)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (elf_version(EV_CURRENT) == EV_NONE) {
|
||||
pr_warn("failed to init libelf for %s\n",
|
||||
path ? : "(mem buf)");
|
||||
@@ -7886,16 +7927,12 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
|
||||
if (!OPTS_VALID(opts, bpf_object_open_opts))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
obj_name = OPTS_GET(opts, object_name, NULL);
|
||||
obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name;
|
||||
if (obj_buf) {
|
||||
if (!obj_name) {
|
||||
snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
|
||||
(unsigned long)obj_buf,
|
||||
(unsigned long)obj_buf_sz);
|
||||
obj_name = tmp_name;
|
||||
}
|
||||
path = obj_name;
|
||||
pr_debug("loading object '%s' from buffer\n", obj_name);
|
||||
} else {
|
||||
pr_debug("loading object from %s\n", path);
|
||||
}
|
||||
|
||||
log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
|
||||
@@ -7979,9 +8016,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
|
||||
if (!path)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
pr_debug("loading %s\n", path);
|
||||
|
||||
return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
|
||||
return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts));
|
||||
}
|
||||
|
||||
struct bpf_object *bpf_object__open(const char *path)
|
||||
@@ -7993,10 +8028,15 @@ struct bpf_object *
|
||||
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
|
||||
const struct bpf_object_open_opts *opts)
|
||||
{
|
||||
char tmp_name[64];
|
||||
|
||||
if (!obj_buf || obj_buf_sz == 0)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
|
||||
/* create a (quite useless) default "name" for this memory buffer object */
|
||||
snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz);
|
||||
|
||||
return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts));
|
||||
}
|
||||
|
||||
static int bpf_object_unload(struct bpf_object *obj)
|
||||
@@ -8406,11 +8446,13 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
|
||||
|
||||
static void bpf_map_prepare_vdata(const struct bpf_map *map)
|
||||
{
|
||||
const struct btf_type *type;
|
||||
struct bpf_struct_ops *st_ops;
|
||||
__u32 i;
|
||||
|
||||
st_ops = map->st_ops;
|
||||
for (i = 0; i < btf_vlen(st_ops->type); i++) {
|
||||
type = btf__type_by_id(map->obj->btf, st_ops->type_id);
|
||||
for (i = 0; i < btf_vlen(type); i++) {
|
||||
struct bpf_program *prog = st_ops->progs[i];
|
||||
void *kern_data;
|
||||
int prog_fd;
|
||||
@@ -9673,6 +9715,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
|
||||
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
|
||||
Elf64_Shdr *shdr, Elf_Data *data)
|
||||
{
|
||||
const struct btf_type *type;
|
||||
const struct btf_member *member;
|
||||
struct bpf_struct_ops *st_ops;
|
||||
struct bpf_program *prog;
|
||||
@@ -9732,13 +9775,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
|
||||
}
|
||||
insn_idx = sym->st_value / BPF_INSN_SZ;
|
||||
|
||||
member = find_member_by_offset(st_ops->type, moff * 8);
|
||||
type = btf__type_by_id(btf, st_ops->type_id);
|
||||
member = find_member_by_offset(type, moff * 8);
|
||||
if (!member) {
|
||||
pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
|
||||
map->name, moff);
|
||||
return -EINVAL;
|
||||
}
|
||||
member_idx = member - btf_members(st_ops->type);
|
||||
member_idx = member - btf_members(type);
|
||||
name = btf__name_by_offset(btf, member->name_off);
|
||||
|
||||
if (!resolve_func_ptr(btf, member->type, NULL)) {
|
||||
@@ -10336,7 +10380,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
|
||||
struct bpf_map *
|
||||
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
|
||||
{
|
||||
if (prev == NULL)
|
||||
if (prev == NULL && obj != NULL)
|
||||
return obj->maps;
|
||||
|
||||
return __bpf_map__iter(prev, obj, 1);
|
||||
@@ -10345,7 +10389,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
|
||||
struct bpf_map *
|
||||
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
|
||||
{
|
||||
if (next == NULL) {
|
||||
if (next == NULL && obj != NULL) {
|
||||
if (!obj->nr_maps)
|
||||
return NULL;
|
||||
return obj->maps + obj->nr_maps - 1;
|
||||
@@ -12877,8 +12921,10 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
|
||||
__u32 zero = 0;
|
||||
int err, fd;
|
||||
|
||||
if (!bpf_map__is_struct_ops(map))
|
||||
if (!bpf_map__is_struct_ops(map)) {
|
||||
pr_warn("map '%s': can't attach non-struct_ops map\n", map->name);
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
}
|
||||
|
||||
if (map->fd < 0) {
|
||||
pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
|
||||
@@ -13671,14 +13717,15 @@ int libbpf_num_possible_cpus(void)
|
||||
|
||||
static int populate_skeleton_maps(const struct bpf_object *obj,
|
||||
struct bpf_map_skeleton *maps,
|
||||
size_t map_cnt)
|
||||
size_t map_cnt, size_t map_skel_sz)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < map_cnt; i++) {
|
||||
struct bpf_map **map = maps[i].map;
|
||||
const char *name = maps[i].name;
|
||||
void **mmaped = maps[i].mmaped;
|
||||
struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz;
|
||||
struct bpf_map **map = map_skel->map;
|
||||
const char *name = map_skel->name;
|
||||
void **mmaped = map_skel->mmaped;
|
||||
|
||||
*map = bpf_object__find_map_by_name(obj, name);
|
||||
if (!*map) {
|
||||
@@ -13695,13 +13742,14 @@ static int populate_skeleton_maps(const struct bpf_object *obj,
|
||||
|
||||
static int populate_skeleton_progs(const struct bpf_object *obj,
|
||||
struct bpf_prog_skeleton *progs,
|
||||
size_t prog_cnt)
|
||||
size_t prog_cnt, size_t prog_skel_sz)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < prog_cnt; i++) {
|
||||
struct bpf_program **prog = progs[i].prog;
|
||||
const char *name = progs[i].name;
|
||||
struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz;
|
||||
struct bpf_program **prog = prog_skel->prog;
|
||||
const char *name = prog_skel->name;
|
||||
|
||||
*prog = bpf_object__find_program_by_name(obj, name);
|
||||
if (!*prog) {
|
||||
@@ -13715,40 +13763,24 @@ static int populate_skeleton_progs(const struct bpf_object *obj,
|
||||
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
|
||||
const struct bpf_object_open_opts *opts)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
|
||||
.object_name = s->name,
|
||||
);
|
||||
struct bpf_object *obj;
|
||||
int err;
|
||||
|
||||
/* Attempt to preserve opts->object_name, unless overriden by user
|
||||
* explicitly. Overwriting object name for skeletons is discouraged,
|
||||
* as it breaks global data maps, because they contain object name
|
||||
* prefix as their own map name prefix. When skeleton is generated,
|
||||
* bpftool is making an assumption that this name will stay the same.
|
||||
*/
|
||||
if (opts) {
|
||||
memcpy(&skel_opts, opts, sizeof(*opts));
|
||||
if (!opts->object_name)
|
||||
skel_opts.object_name = s->name;
|
||||
}
|
||||
|
||||
obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
|
||||
err = libbpf_get_error(obj);
|
||||
if (err) {
|
||||
pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
|
||||
s->name, err);
|
||||
obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err);
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
*s->obj = obj;
|
||||
err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
|
||||
err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);
|
||||
if (err) {
|
||||
pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
|
||||
err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);
|
||||
if (err) {
|
||||
pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
|
||||
return libbpf_err(err);
|
||||
@@ -13778,20 +13810,20 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
|
||||
return libbpf_err(-errno);
|
||||
}
|
||||
|
||||
err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
|
||||
err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);
|
||||
if (err) {
|
||||
pr_warn("failed to populate subskeleton maps: %d\n", err);
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
|
||||
err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);
|
||||
if (err) {
|
||||
pr_warn("failed to populate subskeleton maps: %d\n", err);
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
|
||||
var_skel = &s->vars[var_idx];
|
||||
var_skel = (void *)s->vars + var_idx * s->var_skel_sz;
|
||||
map = *var_skel->map;
|
||||
map_type_id = bpf_map__btf_value_type_id(map);
|
||||
map_type = btf__type_by_id(btf, map_type_id);
|
||||
@@ -13838,10 +13870,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
|
||||
}
|
||||
|
||||
for (i = 0; i < s->map_cnt; i++) {
|
||||
struct bpf_map *map = *s->maps[i].map;
|
||||
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
|
||||
struct bpf_map *map = *map_skel->map;
|
||||
size_t mmap_sz = bpf_map_mmap_sz(map);
|
||||
int prot, map_fd = map->fd;
|
||||
void **mmaped = s->maps[i].mmaped;
|
||||
void **mmaped = map_skel->mmaped;
|
||||
|
||||
if (!mmaped)
|
||||
continue;
|
||||
@@ -13889,8 +13922,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
|
||||
int i, err;
|
||||
|
||||
for (i = 0; i < s->prog_cnt; i++) {
|
||||
struct bpf_program *prog = *s->progs[i].prog;
|
||||
struct bpf_link **link = s->progs[i].link;
|
||||
struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
|
||||
struct bpf_program *prog = *prog_skel->prog;
|
||||
struct bpf_link **link = prog_skel->link;
|
||||
|
||||
if (!prog->autoload || !prog->autoattach)
|
||||
continue;
|
||||
@@ -13922,6 +13956,38 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < s->map_cnt; i++) {
|
||||
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
|
||||
struct bpf_map *map = *map_skel->map;
|
||||
struct bpf_link **link;
|
||||
|
||||
if (!map->autocreate || !map->autoattach)
|
||||
continue;
|
||||
|
||||
/* only struct_ops maps can be attached */
|
||||
if (!bpf_map__is_struct_ops(map))
|
||||
continue;
|
||||
|
||||
/* skeleton is created with earlier version of bpftool, notify user */
|
||||
if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) {
|
||||
pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n",
|
||||
bpf_map__name(map));
|
||||
continue;
|
||||
}
|
||||
|
||||
link = map_skel->link;
|
||||
if (*link)
|
||||
continue;
|
||||
|
||||
*link = bpf_map__attach_struct_ops(map);
|
||||
if (!*link) {
|
||||
err = -errno;
|
||||
pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err);
|
||||
return libbpf_err(err);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -13930,11 +13996,25 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < s->prog_cnt; i++) {
|
||||
struct bpf_link **link = s->progs[i].link;
|
||||
struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
|
||||
struct bpf_link **link = prog_skel->link;
|
||||
|
||||
bpf_link__destroy(*link);
|
||||
*link = NULL;
|
||||
}
|
||||
|
||||
if (s->map_skel_sz < sizeof(struct bpf_map_skeleton))
|
||||
return;
|
||||
|
||||
for (i = 0; i < s->map_cnt; i++) {
|
||||
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
|
||||
struct bpf_link **link = map_skel->link;
|
||||
|
||||
if (link) {
|
||||
bpf_link__destroy(*link);
|
||||
*link = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
|
||||
@@ -13942,8 +14022,7 @@ void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
|
||||
if (!s)
|
||||
return;
|
||||
|
||||
if (s->progs)
|
||||
bpf_object__detach_skeleton(s);
|
||||
bpf_object__detach_skeleton(s);
|
||||
if (s->obj)
|
||||
bpf_object__close(*s->obj);
|
||||
free(s->maps);
|
||||
|
||||
23
src/libbpf.h
23
src/libbpf.h
@@ -98,7 +98,10 @@ typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
|
||||
|
||||
/**
|
||||
* @brief **libbpf_set_print()** sets user-provided log callback function to
|
||||
* be used for libbpf warnings and informational messages.
|
||||
* be used for libbpf warnings and informational messages. If the user callback
|
||||
* is not set, messages are logged to stderr by default. The verbosity of these
|
||||
* messages can be controlled by setting the environment variable
|
||||
* LIBBPF_LOG_LEVEL to either warn, info, or debug.
|
||||
* @param fn The log print function. If NULL, libbpf won't print anything.
|
||||
* @return Pointer to old print function.
|
||||
*
|
||||
@@ -975,6 +978,23 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
|
||||
LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach
|
||||
* map during BPF skeleton attach phase.
|
||||
* @param map the BPF map instance
|
||||
* @param autoattach whether to attach map during BPF skeleton attach phase
|
||||
* @return 0 on success; negative error code, otherwise
|
||||
*/
|
||||
LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map__autoattach()** returns whether BPF map is configured to
|
||||
* auto-attach during BPF skeleton attach phase.
|
||||
* @param map the BPF map instance
|
||||
* @return true if map is set to auto-attach during skeleton attach phase; false, otherwise
|
||||
*/
|
||||
LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map__fd()** gets the file descriptor of the passed
|
||||
* BPF map
|
||||
@@ -1669,6 +1689,7 @@ struct bpf_map_skeleton {
|
||||
const char *name;
|
||||
struct bpf_map **map;
|
||||
void **mmaped;
|
||||
struct bpf_link **link;
|
||||
};
|
||||
|
||||
struct bpf_prog_skeleton {
|
||||
|
||||
@@ -419,6 +419,10 @@ LIBBPF_1.4.0 {
|
||||
|
||||
LIBBPF_1.5.0 {
|
||||
global:
|
||||
btf__distill_base;
|
||||
btf__relocate;
|
||||
bpf_map__autoattach;
|
||||
bpf_map__set_autoattach;
|
||||
bpf_program__attach_sockmap;
|
||||
ring__consume_n;
|
||||
ring_buffer__consume_n;
|
||||
|
||||
@@ -234,6 +234,9 @@ struct btf_type;
|
||||
struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);
|
||||
const char *btf_kind_str(const struct btf_type *t);
|
||||
const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
|
||||
const struct btf_header *btf_header(const struct btf *btf);
|
||||
void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
|
||||
int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map);
|
||||
|
||||
static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
|
||||
{
|
||||
@@ -508,11 +511,33 @@ struct bpf_line_info_min {
|
||||
__u32 line_col;
|
||||
};
|
||||
|
||||
enum btf_field_iter_kind {
|
||||
BTF_FIELD_ITER_IDS,
|
||||
BTF_FIELD_ITER_STRS,
|
||||
};
|
||||
|
||||
struct btf_field_desc {
|
||||
/* once-per-type offsets */
|
||||
int t_off_cnt, t_offs[2];
|
||||
/* member struct size, or zero, if no members */
|
||||
int m_sz;
|
||||
/* repeated per-member offsets */
|
||||
int m_off_cnt, m_offs[1];
|
||||
};
|
||||
|
||||
struct btf_field_iter {
|
||||
struct btf_field_desc desc;
|
||||
void *p;
|
||||
int m_idx;
|
||||
int off_idx;
|
||||
int vlen;
|
||||
};
|
||||
|
||||
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind);
|
||||
__u32 *btf_field_iter_next(struct btf_field_iter *it);
|
||||
|
||||
typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
|
||||
typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
|
||||
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
|
||||
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
|
||||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||
@@ -597,13 +622,9 @@ static inline int ensure_good_fd(int fd)
|
||||
return fd;
|
||||
}
|
||||
|
||||
static inline int sys_dup2(int oldfd, int newfd)
|
||||
static inline int sys_dup3(int oldfd, int newfd, int flags)
|
||||
{
|
||||
#ifdef __NR_dup2
|
||||
return syscall(__NR_dup2, oldfd, newfd);
|
||||
#else
|
||||
return syscall(__NR_dup3, oldfd, newfd, 0);
|
||||
#endif
|
||||
return syscall(__NR_dup3, oldfd, newfd, flags);
|
||||
}
|
||||
|
||||
/* Point *fixed_fd* to the same file that *tmp_fd* points to.
|
||||
@@ -614,7 +635,7 @@ static inline int reuse_fd(int fixed_fd, int tmp_fd)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = sys_dup2(tmp_fd, fixed_fd);
|
||||
err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC);
|
||||
err = err < 0 ? -errno : 0;
|
||||
close(tmp_fd); /* clean up temporary FD */
|
||||
return err;
|
||||
|
||||
@@ -97,9 +97,6 @@ __u32 get_kernel_version(void)
|
||||
if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
|
||||
return 0;
|
||||
|
||||
if (major == 4 && minor == 19 && patch > 255)
|
||||
return KERNEL_VERSION(major, minor, 255);
|
||||
|
||||
return KERNEL_VERSION(major, minor, patch);
|
||||
}
|
||||
|
||||
|
||||
69
src/linker.c
69
src/linker.c
@@ -957,19 +957,33 @@ static int check_btf_str_off(__u32 *str_off, void *ctx)
|
||||
static int linker_sanity_check_btf(struct src_obj *obj)
|
||||
{
|
||||
struct btf_type *t;
|
||||
int i, n, err = 0;
|
||||
int i, n, err;
|
||||
|
||||
if (!obj->btf)
|
||||
return 0;
|
||||
|
||||
n = btf__type_cnt(obj->btf);
|
||||
for (i = 1; i < n; i++) {
|
||||
struct btf_field_iter it;
|
||||
__u32 *type_id, *str_off;
|
||||
|
||||
t = btf_type_by_id(obj->btf, i);
|
||||
|
||||
err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
|
||||
err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf);
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
||||
if (err)
|
||||
return err;
|
||||
while ((type_id = btf_field_iter_next(&it))) {
|
||||
if (*type_id >= n)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
|
||||
if (err)
|
||||
return err;
|
||||
while ((str_off = btf_field_iter_next(&it))) {
|
||||
if (!btf__str_by_offset(obj->btf, *str_off))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -2213,10 +2227,17 @@ static int linker_fixup_btf(struct src_obj *obj)
|
||||
vi = btf_var_secinfos(t);
|
||||
for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
|
||||
const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type);
|
||||
const char *var_name = btf__str_by_offset(obj->btf, vt->name_off);
|
||||
int var_linkage = btf_var(vt)->linkage;
|
||||
const char *var_name;
|
||||
int var_linkage;
|
||||
Elf64_Sym *sym;
|
||||
|
||||
/* could be a variable or function */
|
||||
if (!btf_is_var(vt))
|
||||
continue;
|
||||
|
||||
var_name = btf__str_by_offset(obj->btf, vt->name_off);
|
||||
var_linkage = btf_var(vt)->linkage;
|
||||
|
||||
/* no need to patch up static or extern vars */
|
||||
if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)
|
||||
continue;
|
||||
@@ -2234,26 +2255,10 @@ static int linker_fixup_btf(struct src_obj *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int remap_type_id(__u32 *type_id, void *ctx)
|
||||
{
|
||||
int *id_map = ctx;
|
||||
int new_id = id_map[*type_id];
|
||||
|
||||
/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
|
||||
if (new_id == 0 && *type_id != 0) {
|
||||
pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*type_id = id_map[*type_id];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
int i, j, n, start_id, id;
|
||||
int i, j, n, start_id, id, err;
|
||||
const char *name;
|
||||
|
||||
if (!obj->btf)
|
||||
@@ -2324,9 +2329,25 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
||||
n = btf__type_cnt(linker->btf);
|
||||
for (i = start_id; i < n; i++) {
|
||||
struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
|
||||
struct btf_field_iter it;
|
||||
__u32 *type_id;
|
||||
|
||||
if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
|
||||
return -EINVAL;
|
||||
err = btf_field_iter_init(&it, dst_t, BTF_FIELD_ITER_IDS);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
while ((type_id = btf_field_iter_next(&it))) {
|
||||
int new_id = obj->btf_type_map[*type_id];
|
||||
|
||||
/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
|
||||
if (new_id == 0 && *type_id != 0) {
|
||||
pr_warn("failed to find new ID mapping for original BTF type ID %u\n",
|
||||
*type_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*type_id = obj->btf_type_map[*type_id];
|
||||
}
|
||||
}
|
||||
|
||||
/* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's
|
||||
|
||||
Reference in New Issue
Block a user