Compare commits

..

1 Commits

Author SHA1 Message Date
thiagoftsm
0dbbb4e7e0 Apply patch to run binaries on Debian 10 2024-04-21 15:02:37 +00:00
49 changed files with 93589 additions and 98006 deletions

3
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,3 @@
Thank you for considering a contribution!
Please note that the `libbpf` authoritative source code is developed as part of bpf-next Linux source tree under tools/lib/bpf subdirectory and is periodically synced to Github. As such, all the libbpf changes should be sent to BPF mailing list, please don't open PRs here unless you are changing Github-specific parts of libbpf (e.g., Github-specific Makefile).

View File

@@ -12,9 +12,6 @@ inputs:
description: 'where is vmlinux file'
required: true
default: '${{ github.workspace }}/vmlinux'
llvm-version:
description: 'llvm version'
required: true
runs:
using: "composite"
@@ -31,6 +28,4 @@ runs:
export REPO_ROOT="${{ github.workspace }}"
export REPO_PATH="${{ inputs.repo-path }}"
export VMLINUX_BTF="${{ inputs.vmlinux }}"
export LLVM_VERSION="${{ inputs.llvm-version }}"
${{ github.action_path }}/build_selftests.sh

View File

@@ -10,21 +10,22 @@ foldable start prepare_selftests "Building selftests"
LIBBPF_PATH="${REPO_ROOT}"
llvm_latest_version() {
echo "19"
llvm_default_version() {
echo "16"
}
llvm_latest_version() {
echo "17"
}
LLVM_VERSION=$(llvm_default_version)
if [[ "${LLVM_VERSION}" == $(llvm_latest_version) ]]; then
REPO_DISTRO_SUFFIX=""
else
REPO_DISTRO_SUFFIX="-${LLVM_VERSION}"
fi
DISTRIB_CODENAME="noble"
test -f /etc/lsb-release && . /etc/lsb-release
echo "${DISTRIB_CODENAME}"
echo "deb https://apt.llvm.org/${DISTRIB_CODENAME}/ llvm-toolchain-${DISTRIB_CODENAME}${REPO_DISTRO_SUFFIX} main" \
echo "deb https://apt.llvm.org/focal/ llvm-toolchain-focal${REPO_DISTRO_SUFFIX} main" \
| sudo tee /etc/apt/sources.list.d/llvm.list
PREPARE_SELFTESTS_SCRIPT=${THISDIR}/prepare_selftests-${KERNEL}.sh

File diff suppressed because it is too large Load Diff

View File

@@ -13,10 +13,6 @@ inputs:
description: 'pahole rev or master'
required: true
default: 'master'
llvm-version:
description: 'llvm version'
required: false
default: '17'
runs:
using: "composite"
steps:
@@ -42,7 +38,6 @@ runs:
with:
pahole: ${{ inputs.pahole }}
arch: ${{ inputs.arch }}
llvm-version: ${{ inputs.llvm-version }}
# 1. download CHECKPOINT kernel source
- name: Get checkpoint commit
shell: bash
@@ -98,7 +93,6 @@ runs:
with:
repo-path: '.kernel'
kernel: ${{ inputs.kernel }}
llvm-version: ${{ inputs.llvm-version }}
# 4. prepare rootfs
- name: prepare rootfs
uses: libbpf/ci/prepare-rootfs@main

View File

@@ -53,7 +53,7 @@ jobs:
ubuntu:
runs-on: ubuntu-latest
name: Ubuntu Build (${{ matrix.arch }})
name: Ubuntu Focal Build (${{ matrix.arch }})
strategy:
fail-fast: false
matrix:
@@ -75,7 +75,7 @@ jobs:
if: matrix.arch != 'x86'
with:
distro:
ubuntu22.04
ubuntu20.04
arch:
${{ matrix.arch }}
setup:

View File

@@ -17,7 +17,7 @@ permissions:
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
concurrency:
group: ${{ github.workflow }}-${{ matrix.language }}-${{ github.ref }}
cancel-in-progress: true

View File

@@ -1,10 +1,10 @@
name: pahole-staging
on:
workflow_dispatch:
schedule:
- cron: '0 18 * * *'
jobs:
vmtest:
runs-on: ubuntu-20.04

View File

@@ -13,22 +13,22 @@ concurrency:
jobs:
vmtest:
runs-on: ${{ matrix.runs_on }}
name: Kernel ${{ matrix.kernel }} on ${{ matrix.arch }} + selftests
name: Kernel ${{ matrix.kernel }} on ${{ matrix.runs_on }} + selftests
strategy:
fail-fast: false
matrix:
include:
- kernel: 'LATEST'
runs_on: ubuntu-24.04
runs_on: ubuntu-20.04
arch: 'x86_64'
- kernel: '5.5.0'
runs_on: ubuntu-24.04
runs_on: ubuntu-20.04
arch: 'x86_64'
- kernel: '4.9.0'
runs_on: ubuntu-24.04
runs_on: ubuntu-20.04
arch: 'x86_64'
- kernel: 'LATEST'
runs_on: ["s390x", "docker-noble-main"]
runs_on: s390x
arch: 's390x'
steps:
- uses: actions/checkout@v4

View File

@@ -1,21 +0,0 @@
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@redhat.com>
Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
Dan Carpenter <error27@gmail.com> <dan.carpenter@oracle.com>
Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
Herbert Xu <herbert@gondor.apana.org.au>
Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
Kees Cook <kees@kernel.org> <keescook@chromium.org>
Leo Yan <leo.yan@linux.dev> <leo.yan@linaro.org>
Mark Starovoytov <mstarovo@pm.me> <mstarovoitov@marvell.com>
Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@mellanox.com>
Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@nvidia.com>
Puranjay Mohan <puranjay@kernel.org> <puranjay12@gmail.com>
Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
Vadim Fedorenko <vadim.fedorenko@linux.dev> <vadfed@meta.com>
Vadim Fedorenko <vadim.fedorenko@linux.dev> <vfedorenko@novek.ru>

View File

@@ -1 +1 @@
b408473ea01b2e499d23503e2bf898416da9d7ac
443574b033876c85a35de4c65c14f7fe092222b2

View File

@@ -1 +1 @@
2ad6d23f465a4f851e3bcf6d74c315ce7b2c205b
14bb1e8c8d4ad5d9d2febb7d19c70a3cf536e1e5

View File

@@ -1,69 +0,0 @@
From c71766e8ff7a7f950522d25896fba758585500df Mon Sep 17 00:00:00 2001
From: Song Liu <song@kernel.org>
Date: Mon, 22 Apr 2024 21:14:40 -0700
Subject: [PATCH] arch/Kconfig: Move SPECULATION_MITIGATIONS to arch/Kconfig
SPECULATION_MITIGATIONS is currently defined only for x86. As a result,
IS_ENABLED(CONFIG_SPECULATION_MITIGATIONS) is always false for other
archs. f337a6a21e2f effectively set "mitigations=off" by default on
non-x86 archs, which is not desired behavior. Jakub observed this
change when running bpf selftests on s390 and arm64.
Fix this by moving SPECULATION_MITIGATIONS to arch/Kconfig so that it is
available in all archs and thus can be used safely in kernel/cpu.c
Fixes: f337a6a21e2f ("x86/cpu: Actually turn off mitigations by default for SPECULATION_MITIGATIONS=n")
Cc: stable@vger.kernel.org
Cc: Sean Christopherson <seanjc@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Daniel Sneddon <daniel.sneddon@linux.intel.com>
Cc: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Song Liu <song@kernel.org>
---
arch/Kconfig | 10 ++++++++++
arch/x86/Kconfig | 10 ----------
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/arch/Kconfig b/arch/Kconfig
index 9f066785bb71..8f4af75005f8 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1609,4 +1609,14 @@ config CC_HAS_SANE_FUNCTION_ALIGNMENT
# strict alignment always, even with -falign-functions.
def_bool CC_HAS_MIN_FUNCTION_ALIGNMENT || CC_IS_CLANG
+menuconfig SPECULATION_MITIGATIONS
+ bool "Mitigations for speculative execution vulnerabilities"
+ default y
+ help
+ Say Y here to enable options which enable mitigations for
+ speculative execution hardware vulnerabilities.
+
+ If you say N, all mitigations will be disabled. You really
+ should know what you are doing to say so.
+
endmenu
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 39886bab943a..50c890fce5e0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2486,16 +2486,6 @@ config PREFIX_SYMBOLS
def_bool y
depends on CALL_PADDING && !CFI_CLANG
-menuconfig SPECULATION_MITIGATIONS
- bool "Mitigations for speculative execution vulnerabilities"
- default y
- help
- Say Y here to enable options which enable mitigations for
- speculative execution hardware vulnerabilities.
-
- If you say N, all mitigations will be disabled. You really
- should know what you are doing to say so.
-
if SPECULATION_MITIGATIONS
config MITIGATION_PAGE_TABLE_ISOLATION
--
2.43.0

View File

@@ -1,32 +0,0 @@
From 0daad0a615e687e1247230f3d0c31ae60ba32314 Mon Sep 17 00:00:00 2001
From: Andrii Nakryiko <andrii@kernel.org>
Date: Tue, 28 May 2024 15:29:38 -0700
Subject: [PATCH bpf-next] selftests/bpf: fix inet_csk_accept prototype in
test_sk_storage_tracing.c
Recent kernel change ([0]) changed inet_csk_accept() prototype. Adapt
progs/test_sk_storage_tracing.c to take that into account.
[0] 92ef0fd55ac8 ("net: change proto and proto_ops accept type")
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
index 02e718f06e0f..40531e56776e 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
@@ -84,7 +84,7 @@ int BPF_PROG(trace_tcp_connect, struct sock *sk)
}
SEC("fexit/inet_csk_accept")
-int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern,
+int BPF_PROG(inet_csk_accept, struct sock *sk, struct proto_accept_arg *arg,
struct sock *accepted_sk)
{
set_task_info(accepted_sk);
--
2.43.0

View File

@@ -41,6 +41,7 @@ task_fd_query_rawtp
task_fd_query_tp
tc_bpf
tcp_estats
tcp_rtt
test_global_funcs/arg_tag_ctx*
tp_attach_query
usdt/urand_pid_attach

View File

@@ -12,4 +12,3 @@ xdp_bonding/xdp_bonding_features # started failing after net merge from 359e
tc_redirect/tc_redirect_dtime # uapi breakage after net-next commit 885c36e59f46 ("net: Re-use and set mono_delivery_time bit for userspace tstamp packets")
migrate_reuseport/IPv4 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation
migrate_reuseport/IPv6 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation
verify_pkcs7_sig # keeps failing

View File

@@ -67,14 +67,12 @@ local_configs_path=${PROJECT_NAME}/vmtest/configs
DENYLIST=$(read_lists \
"$configs_path/DENYLIST" \
"$configs_path/DENYLIST.${ARCH}" \
"$local_configs_path/DENYLIST" \
"$local_configs_path/DENYLIST-${KERNEL}" \
"$local_configs_path/DENYLIST-${KERNEL}.${ARCH}" \
)
ALLOWLIST=$(read_lists \
"$configs_path/ALLOWLIST" \
"$configs_path/ALLOWLIST.${ARCH}" \
"$local_configs_path/ALLOWLIST" \
"$local_configs_path/ALLOWLIST-${KERNEL}" \
"$local_configs_path/ALLOWLIST-${KERNEL}.${ARCH}" \
)

View File

@@ -219,14 +219,6 @@ compilation and skeleton generation. Using Libbpf-rs will make building user
space part of the BPF application easier. Note that the BPF program themselves
must still be written in plain C.
libbpf logging
==============
By default, libbpf logs informational and warning messages to stderr. The
verbosity of these messages can be controlled by setting the environment
variable LIBBPF_LOG_LEVEL to either warn, info, or debug. A custom log
callback can be set using ``libbpf_set_print()``.
Additional Documentation
========================

View File

@@ -1115,7 +1115,6 @@ enum bpf_attach_type {
BPF_CGROUP_UNIX_GETSOCKNAME,
BPF_NETKIT_PRIMARY,
BPF_NETKIT_PEER,
BPF_TRACE_KPROBE_SESSION,
__MAX_BPF_ATTACH_TYPE
};
@@ -1136,7 +1135,6 @@ enum bpf_link_type {
BPF_LINK_TYPE_TCX = 11,
BPF_LINK_TYPE_UPROBE_MULTI = 12,
BPF_LINK_TYPE_NETKIT = 13,
BPF_LINK_TYPE_SOCKMAP = 14,
__MAX_BPF_LINK_TYPE,
};
@@ -1425,8 +1423,6 @@ enum {
#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
/* If set, XDP frames will be transmitted after processing */
#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
/* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */
#define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2)
/* type for BPF_ENABLE_STATS */
enum bpf_stats_type {
@@ -3398,10 +3394,6 @@ union bpf_attr {
* for the nexthop. If the src addr cannot be derived,
* **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
* case, *params*->dmac and *params*->smac are not set either.
* **BPF_FIB_LOOKUP_MARK**
* Use the mark present in *params*->mark for the fib lookup.
* This option should not be used with BPF_FIB_LOOKUP_DIRECT,
* as it only has meaning for full lookups.
*
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
@@ -5030,7 +5022,7 @@ union bpf_attr {
* bytes will be copied to *dst*
* Return
* The **hash_algo** is returned on success,
* **-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if
* **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
* invalid arguments are passed.
*
* struct socket *bpf_sock_from_file(struct file *file)
@@ -5516,7 +5508,7 @@ union bpf_attr {
* bytes will be copied to *dst*
* Return
* The **hash_algo** is returned on success,
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
* **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
*
* void *bpf_kptr_xchg(void *map_value, void *ptr)
@@ -6209,17 +6201,12 @@ union { \
__u64 :64; \
} __attribute__((aligned(8)))
/* The enum used in skb->tstamp_type. It specifies the clock type
* of the time stored in the skb->tstamp.
*/
enum {
BPF_SKB_TSTAMP_UNSPEC = 0, /* DEPRECATED */
BPF_SKB_TSTAMP_DELIVERY_MONO = 1, /* DEPRECATED */
BPF_SKB_CLOCK_REALTIME = 0,
BPF_SKB_CLOCK_MONOTONIC = 1,
BPF_SKB_CLOCK_TAI = 2,
/* For any future BPF_SKB_CLOCK_* that the bpf prog cannot handle,
* the bpf prog can try to deduce it by ingress/egress/skb->sk->sk_clockid.
BPF_SKB_TSTAMP_UNSPEC,
BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
/* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
* the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
* and try to deduce it by ingress, egress or skb->sk->sk_clockid.
*/
};
@@ -6733,10 +6720,6 @@ struct bpf_link_info {
__u32 ifindex;
__u32 attach_type;
} netkit;
struct {
__u32 map_id;
__u32 attach_type;
} sockmap;
};
} __attribute__((aligned(8)));
@@ -6955,8 +6938,6 @@ enum {
* socket transition to LISTEN state.
*/
BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
* Arg1: measured RTT input (mrtt)
* Arg2: updated srtt
*/
BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
* It will be called to handle
@@ -7139,7 +7120,6 @@ enum {
BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
BPF_FIB_LOOKUP_TBID = (1U << 3),
BPF_FIB_LOOKUP_SRC = (1U << 4),
BPF_FIB_LOOKUP_MARK = (1U << 5),
};
enum {
@@ -7172,7 +7152,7 @@ struct bpf_fib_lookup {
/* output: MTU value */
__u16 mtu_result;
} __attribute__((packed, aligned(2)));
};
/* input: L3 device index for lookup
* output: device index from FIB lookup
*/
@@ -7217,19 +7197,8 @@ struct bpf_fib_lookup {
__u32 tbid;
};
union {
/* input */
struct {
__u32 mark; /* policy routing */
/* 2 4-byte holes for input */
};
/* output: source and dest mac */
struct {
__u8 smac[6]; /* ETH_ALEN */
__u8 dmac[6]; /* ETH_ALEN */
};
};
__u8 smac[6]; /* ETH_ALEN */
__u8 dmac[6]; /* ETH_ALEN */
};
struct bpf_redir_neigh {
@@ -7316,10 +7285,6 @@ struct bpf_timer {
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_wq {
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_dynptr {
__u64 __opaque[2];
} __attribute__((aligned(8)));
@@ -7512,13 +7477,4 @@ struct bpf_iter_num {
__u64 __opaque[1];
} __attribute__((aligned(8)));
/*
* Flags to control BPF kfunc behaviour.
* - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective
* helper documentation for details.)
*/
enum bpf_kfunc_flags {
BPF_F_PAD_ZEROS = (1ULL << 0),
};
#endif /* _UAPI__LINUX_BPF_H__ */

123
include/uapi/linux/fcntl.h Normal file
View File

@@ -0,0 +1,123 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_LINUX_FCNTL_H
#define _UAPI_LINUX_FCNTL_H
#include <asm/fcntl.h>
#include <linux/openat2.h>
#define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0)
#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
/*
* Cancel a blocking posix lock; internal use only until we expose an
* asynchronous lock api to userspace:
*/
#define F_CANCELLK (F_LINUX_SPECIFIC_BASE + 5)
/* Create a file descriptor with FD_CLOEXEC set. */
#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6)
/*
* Request nofications on a directory.
* See below for events that may be notified.
*/
#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2)
/*
* Set and get of pipe page size array
*/
#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
#define F_GETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 8)
/*
* Set/Get seals
*/
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
/*
* Types of seals
*/
#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
#define F_SEAL_WRITE 0x0008 /* prevent writes */
#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
#define F_SEAL_EXEC 0x0020 /* prevent chmod modifying exec bits */
/* (1U << 31) is reserved for signed error codes */
/*
* Set/Get write life time hints. {GET,SET}_RW_HINT operate on the
* underlying inode, while {GET,SET}_FILE_RW_HINT operate only on
* the specific file.
*/
#define F_GET_RW_HINT (F_LINUX_SPECIFIC_BASE + 11)
#define F_SET_RW_HINT (F_LINUX_SPECIFIC_BASE + 12)
#define F_GET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 13)
#define F_SET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 14)
/*
* Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be
* used to clear any hints previously set.
*/
#define RWH_WRITE_LIFE_NOT_SET 0
#define RWH_WRITE_LIFE_NONE 1
#define RWH_WRITE_LIFE_SHORT 2
#define RWH_WRITE_LIFE_MEDIUM 3
#define RWH_WRITE_LIFE_LONG 4
#define RWH_WRITE_LIFE_EXTREME 5
/*
* The originally introduced spelling is remained from the first
* versions of the patch set that introduced the feature, see commit
* v4.13-rc1~212^2~51.
*/
#define RWF_WRITE_LIFE_NOT_SET RWH_WRITE_LIFE_NOT_SET
/*
* Types of directory notifications that may be requested.
*/
#define DN_ACCESS 0x00000001 /* File accessed */
#define DN_MODIFY 0x00000002 /* File modified */
#define DN_CREATE 0x00000004 /* File created */
#define DN_DELETE 0x00000008 /* File removed */
#define DN_RENAME 0x00000010 /* File renamed */
#define DN_ATTRIB 0x00000020 /* File changed attibutes */
#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
/*
* The constants AT_REMOVEDIR and AT_EACCESS have the same value. AT_EACCESS is
* meaningful only to faccessat, while AT_REMOVEDIR is meaningful only to
* unlinkat. The two functions do completely different things and therefore,
* the flags can be allowed to overlap. For example, passing AT_REMOVEDIR to
* faccessat would be undefined behavior and thus treating it equivalent to
* AT_EACCESS is valid undefined behavior.
*/
#define AT_FDCWD -100 /* Special value used to indicate
openat should use the current
working directory. */
#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
#define AT_EACCESS 0x200 /* Test access permitted for
effective IDs, not real IDs. */
#define AT_REMOVEDIR 0x200 /* Remove directory instead of
unlinking file. */
#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */
#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */
#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */
#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */
#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
/* Flags for name_to_handle_at(2). We reuse AT_ flag space to save bits... */
#define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
compare object identity and may not
be usable to open_by_handle_at(2) */
#if defined(__KERNEL__)
#define AT_GETATTR_NOSEC 0x80000000
#endif
#endif /* _UAPI_LINUX_FCNTL_H */

View File

@@ -41,10 +41,6 @@
*/
#define XDP_UMEM_TX_SW_CSUM (1 << 1)
/* Request to reserve tx_metadata_len bytes of per-chunk metadata.
*/
#define XDP_UMEM_TX_METADATA_LEN (1 << 2)
struct sockaddr_xdp {
__u16 sxdp_family;
__u16 sxdp_flags;

View File

@@ -146,28 +146,6 @@ enum {
NETDEV_A_QSTATS_TX_PACKETS,
NETDEV_A_QSTATS_TX_BYTES,
NETDEV_A_QSTATS_RX_ALLOC_FAIL,
NETDEV_A_QSTATS_RX_HW_DROPS,
NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS,
NETDEV_A_QSTATS_RX_CSUM_COMPLETE,
NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY,
NETDEV_A_QSTATS_RX_CSUM_NONE,
NETDEV_A_QSTATS_RX_CSUM_BAD,
NETDEV_A_QSTATS_RX_HW_GRO_PACKETS,
NETDEV_A_QSTATS_RX_HW_GRO_BYTES,
NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS,
NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES,
NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS,
NETDEV_A_QSTATS_TX_HW_DROPS,
NETDEV_A_QSTATS_TX_HW_DROP_ERRORS,
NETDEV_A_QSTATS_TX_CSUM_NONE,
NETDEV_A_QSTATS_TX_NEEDS_CSUM,
NETDEV_A_QSTATS_TX_HW_GSO_PACKETS,
NETDEV_A_QSTATS_TX_HW_GSO_BYTES,
NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS,
NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES,
NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS,
NETDEV_A_QSTATS_TX_STOP,
NETDEV_A_QSTATS_TX_WAKE,
__NETDEV_A_QSTATS_MAX,
NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)

View File

@@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_LINUX_OPENAT2_H
#define _UAPI_LINUX_OPENAT2_H
#include <linux/types.h>
/*
* Arguments for how openat2(2) should open the target path. If only @flags and
* @mode are non-zero, then openat2(2) operates very similarly to openat(2).
*
* However, unlike openat(2), unknown or invalid bits in @flags result in
* -EINVAL rather than being silently ignored. @mode must be zero unless one of
* {O_CREAT, O_TMPFILE} are set.
*
* @flags: O_* flags.
* @mode: O_CREAT/O_TMPFILE file mode.
* @resolve: RESOLVE_* flags.
*/
struct open_how {
__u64 flags;
__u64 mode;
__u64 resolve;
};
/* how->resolve flags for openat2(2). */
#define RESOLVE_NO_XDEV 0x01 /* Block mount-point crossings
(includes bind-mounts). */
#define RESOLVE_NO_MAGICLINKS 0x02 /* Block traversal through procfs-style
"magic-links". */
#define RESOLVE_NO_SYMLINKS 0x04 /* Block traversal through all symlinks
(implies OEXT_NO_MAGICLINKS) */
#define RESOLVE_BENEATH 0x08 /* Block "lexical" trickery like
"..", symlinks, and absolute
paths which escape the dirfd. */
#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".."
be scoped inside the dirfd
(similar to chroot(2)). */
#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be
completed through cached lookup. May
return -EAGAIN if that's not
possible. */
#endif /* _UAPI_LINUX_OPENAT2_H */

View File

@@ -1349,14 +1349,12 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */
/* 0x7 available */
/* 5-0x7 available */
#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */

View File

@@ -1,37 +0,0 @@
#!/usr/bin/env bash
set -eu
usage () {
echo "USAGE: ./mailmap-update.sh <libbpf-repo> <linux-repo>"
exit 1
}
LIBBPF_REPO="${1-""}"
LINUX_REPO="${2-""}"
if [ -z "${LIBBPF_REPO}" ] || [ -z "${LINUX_REPO}" ]; then
echo "Error: libbpf or linux repos are not specified"
usage
fi
LIBBPF_MAILMAP="${LIBBPF_REPO}/.mailmap"
LINUX_MAILMAP="${LINUX_REPO}/.mailmap"
tmpfile="$(mktemp)"
cleanup() {
rm -f "${tmpfile}"
}
trap cleanup EXIT
grep_lines() {
local pattern="$1"
local file="$2"
grep "${pattern}" "${file}" || true
}
while read -r email; do
grep_lines "${email}$" "${LINUX_MAILMAP}" >> "${tmpfile}"
done < <(git log --format='<%ae>' | sort -u)
sort -u "${tmpfile}" > "${LIBBPF_MAILMAP}"

View File

@@ -295,22 +295,6 @@ Latest changes to BPF helper definitions.
" -- src/bpf_helper_defs.h
fi
echo "Regenerating .mailmap..."
cd_to "${LINUX_REPO}"
git checkout "${TIP_SYM_REF}"
cd_to "${LIBBPF_REPO}"
"${LIBBPF_REPO}"/scripts/mailmap-update.sh "${LIBBPF_REPO}" "${LINUX_REPO}"
# if anything changed, commit it
mailmap_changes=$(git status --porcelain .mailmap | wc -l)
if ((${mailmap_changes} == 1)); then
git add .mailmap
git commit -s -m "sync: update .mailmap
Update .mailmap based on libbpf's list of contributors and on the latest
.mailmap version in the upstream repository.
" -- .mailmap
fi
# Use generated cover-letter as a template for "sync commit" with
# baseline and checkpoint commits from kernel repo (and leave summary
# from cover letter intact, of course)

View File

@@ -9,7 +9,7 @@ else
endif
LIBBPF_MAJOR_VERSION := 1
LIBBPF_MINOR_VERSION := 5
LIBBPF_MINOR_VERSION := 4
LIBBPF_PATCH_VERSION := 0
LIBBPF_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).$(LIBBPF_PATCH_VERSION)
LIBBPF_MAJMIN_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).0
@@ -55,7 +55,7 @@ STATIC_OBJDIR := $(OBJDIR)/staticobjs
OBJS := bpf.o btf.o libbpf.o libbpf_errno.o netlink.o \
nlattr.o str_error.o libbpf_probes.o bpf_prog_linfo.o \
btf_dump.o hashmap.o ringbuf.o strset.o linker.o gen_loader.o \
relo_core.o usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o
relo_core.o usdt.o zip.o elf.o features.o
SHARED_OBJS := $(addprefix $(SHARED_OBJDIR)/,$(OBJS))
STATIC_OBJS := $(addprefix $(STATIC_OBJDIR)/,$(OBJS))
@@ -119,13 +119,13 @@ $(OBJDIR)/libbpf.so.$(LIBBPF_VERSION): $(SHARED_OBJS)
-Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
$^ $(ALL_LDFLAGS) -o $@
$(OBJDIR)/libbpf.pc: force | $(OBJDIR)
$(OBJDIR)/libbpf.pc: force
$(Q)sed -e "s|@PREFIX@|$(PREFIX)|" \
-e "s|@LIBDIR@|$(LIBDIR_PC)|" \
-e "s|@VERSION@|$(LIBBPF_VERSION)|" \
< libbpf.pc.template > $@
$(OBJDIR) $(STATIC_OBJDIR) $(SHARED_OBJDIR):
$(STATIC_OBJDIR) $(SHARED_OBJDIR):
$(call msg,MKDIR,$@)
$(Q)mkdir -p $@

View File

@@ -105,7 +105,7 @@ int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
*/
int probe_memcg_account(int token_fd)
{
const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
struct bpf_insn insns[] = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
BPF_EXIT_INSN(),
@@ -766,7 +766,6 @@ int bpf_link_create(int prog_fd, int target_fd,
return libbpf_err(-EINVAL);
break;
case BPF_TRACE_KPROBE_MULTI:
case BPF_TRACE_KPROBE_SESSION:
attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0);
attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0);
attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0));

View File

@@ -2,7 +2,7 @@
#ifndef __BPF_CORE_READ_H__
#define __BPF_CORE_READ_H__
#include "bpf_helpers.h"
#include <bpf/bpf_helpers.h>
/*
* enum bpf_field_info_kind is passed as a second argument into
@@ -104,7 +104,6 @@ enum bpf_enum_value_kind {
case 2: val = *(const unsigned short *)p; break; \
case 4: val = *(const unsigned int *)p; break; \
case 8: val = *(const unsigned long long *)p; break; \
default: val = 0; break; \
} \
val <<= __CORE_RELO(s, field, LSHIFT_U64); \
if (__CORE_RELO(s, field, SIGNED)) \

View File

@@ -1851,10 +1851,6 @@ static long (* const bpf_skb_load_bytes_relative)(const void *skb, __u32 offset,
* for the nexthop. If the src addr cannot be derived,
* **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
* case, *params*->dmac and *params*->smac are not set either.
* **BPF_FIB_LOOKUP_MARK**
* Use the mark present in *params*->mark for the fib lookup.
* This option should not be used with BPF_FIB_LOOKUP_DIRECT,
* as it only has meaning for full lookups.
*
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
@@ -3802,7 +3798,7 @@ static __u64 (* const bpf_ktime_get_coarse_ns)(void) = (void *) 160;
*
* Returns
* The **hash_algo** is returned on success,
* **-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if
* **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
* invalid arguments are passed.
*/
static long (* const bpf_ima_inode_hash)(struct inode *inode, void *dst, __u32 size) = (void *) 161;
@@ -4416,7 +4412,7 @@ static long (* const bpf_skb_set_tstamp)(struct __sk_buff *skb, __u64 tstamp, __
*
* Returns
* The **hash_algo** is returned on success,
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
* **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
*/
static long (* const bpf_ima_file_hash)(struct file *file, void *dst, __u32 size) = (void *) 193;

View File

@@ -137,8 +137,7 @@
/*
* Helper function to perform a tail call with a constant/immediate map slot.
*/
#if (defined(__clang__) && __clang_major__ >= 8) || (!defined(__clang__) && __GNUC__ > 12)
#if defined(__bpf__)
#if __clang_major__ >= 8 && defined(__bpf__)
static __always_inline void
bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
{
@@ -166,7 +165,6 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
: "r0", "r1", "r2", "r3", "r4", "r5");
}
#endif
#endif
enum libbpf_pin_type {
LIBBPF_PIN_NONE,
@@ -186,21 +184,10 @@ enum libbpf_tristate {
#define __kptr __attribute__((btf_type_tag("kptr")))
#define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr")))
#if defined (__clang__)
#define bpf_ksym_exists(sym) ({ \
_Static_assert(!__builtin_constant_p(!!sym), \
#sym " should be marked as __weak"); \
!!sym; \
#define bpf_ksym_exists(sym) ({ \
_Static_assert(!__builtin_constant_p(!!sym), #sym " should be marked as __weak"); \
!!sym; \
})
#elif __GNUC__ > 8
#define bpf_ksym_exists(sym) ({ \
_Static_assert(__builtin_has_attribute (*sym, __weak__), \
#sym " should be marked as __weak"); \
!!sym; \
})
#else
#define bpf_ksym_exists(sym) !!sym
#endif
#define __arg_ctx __attribute__((btf_decl_tag("arg:ctx")))
#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))

View File

@@ -633,18 +633,18 @@ struct pt_regs;
#endif
#define ___bpf_ctx_cast0() ctx
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), ctx[0]
#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), ctx[1]
#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), ctx[2]
#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), ctx[3]
#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), ctx[4]
#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), ctx[5]
#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), ctx[6]
#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), ctx[7]
#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), ctx[8]
#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), ctx[9]
#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), ctx[10]
#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), ctx[11]
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
#define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
/*
@@ -786,14 +786,14 @@ ____##name(unsigned long long *ctx ___bpf_ctx_decl(args))
struct pt_regs;
#define ___bpf_kprobe_args0() ctx
#define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (unsigned long long)PT_REGS_PARM1(ctx)
#define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (unsigned long long)PT_REGS_PARM2(ctx)
#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (unsigned long long)PT_REGS_PARM3(ctx)
#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (unsigned long long)PT_REGS_PARM4(ctx)
#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (unsigned long long)PT_REGS_PARM5(ctx)
#define ___bpf_kprobe_args6(x, args...) ___bpf_kprobe_args5(args), (unsigned long long)PT_REGS_PARM6(ctx)
#define ___bpf_kprobe_args7(x, args...) ___bpf_kprobe_args6(args), (unsigned long long)PT_REGS_PARM7(ctx)
#define ___bpf_kprobe_args8(x, args...) ___bpf_kprobe_args7(args), (unsigned long long)PT_REGS_PARM8(ctx)
#define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
#define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
#define ___bpf_kprobe_args6(x, args...) ___bpf_kprobe_args5(args), (void *)PT_REGS_PARM6(ctx)
#define ___bpf_kprobe_args7(x, args...) ___bpf_kprobe_args6(args), (void *)PT_REGS_PARM7(ctx)
#define ___bpf_kprobe_args8(x, args...) ___bpf_kprobe_args7(args), (void *)PT_REGS_PARM8(ctx)
#define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
/*
@@ -821,7 +821,7 @@ static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args)
#define ___bpf_kretprobe_args0() ctx
#define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (unsigned long long)PT_REGS_RC(ctx)
#define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
#define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
/*
@@ -845,24 +845,24 @@ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
/* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
#define ___bpf_syscall_args0() ctx
#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (unsigned long long)PT_REGS_PARM1_SYSCALL(regs)
#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (unsigned long long)PT_REGS_PARM2_SYSCALL(regs)
#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (unsigned long long)PT_REGS_PARM3_SYSCALL(regs)
#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (unsigned long long)PT_REGS_PARM4_SYSCALL(regs)
#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (unsigned long long)PT_REGS_PARM5_SYSCALL(regs)
#define ___bpf_syscall_args6(x, args...) ___bpf_syscall_args5(args), (unsigned long long)PT_REGS_PARM6_SYSCALL(regs)
#define ___bpf_syscall_args7(x, args...) ___bpf_syscall_args6(args), (unsigned long long)PT_REGS_PARM7_SYSCALL(regs)
#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_SYSCALL(regs)
#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_SYSCALL(regs)
#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs)
#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs)
#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs)
#define ___bpf_syscall_args6(x, args...) ___bpf_syscall_args5(args), (void *)PT_REGS_PARM6_SYSCALL(regs)
#define ___bpf_syscall_args7(x, args...) ___bpf_syscall_args6(args), (void *)PT_REGS_PARM7_SYSCALL(regs)
#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
/* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
#define ___bpf_syswrap_args0() ctx
#define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (unsigned long long)PT_REGS_PARM1_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (unsigned long long)PT_REGS_PARM2_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (unsigned long long)PT_REGS_PARM3_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (unsigned long long)PT_REGS_PARM4_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (unsigned long long)PT_REGS_PARM5_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args6(x, args...) ___bpf_syswrap_args5(args), (unsigned long long)PT_REGS_PARM6_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args7(x, args...) ___bpf_syswrap_args6(args), (unsigned long long)PT_REGS_PARM7_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args6(x, args...) ___bpf_syswrap_args5(args), (void *)PT_REGS_PARM6_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args7(x, args...) ___bpf_syswrap_args6(args), (void *)PT_REGS_PARM7_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
/*

722
src/btf.c
View File

@@ -116,9 +116,6 @@ struct btf {
/* whether strings are already deduplicated */
bool strs_deduped;
/* whether base_btf should be freed in btf_free for this instance */
bool owns_base;
/* BTF object FD, if loaded into kernel */
int fd;
@@ -601,7 +598,7 @@ static int btf_sanity_check(const struct btf *btf)
__u32 i, n = btf__type_cnt(btf);
int err;
for (i = btf->start_id; i < n; i++) {
for (i = 1; i < n; i++) {
t = btf_type_by_id(btf, i);
err = btf_validate_type(btf, t, i);
if (err)
@@ -972,8 +969,6 @@ void btf__free(struct btf *btf)
free(btf->raw_data);
free(btf->raw_data_swapped);
free(btf->type_offs);
if (btf->owns_base)
btf__free(btf->base_btf);
free(btf);
}
@@ -996,7 +991,6 @@ static struct btf *btf_new_empty(struct btf *base_btf)
btf->base_btf = base_btf;
btf->start_id = btf__type_cnt(base_btf);
btf->start_str_off = base_btf->hdr->str_len;
btf->swapped_endian = base_btf->swapped_endian;
}
/* +1 for empty string at offset 0 */
@@ -1090,86 +1084,16 @@ struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
return libbpf_ptr(btf_new(data, size, base_btf));
}
struct btf_elf_secs {
Elf_Data *btf_data;
Elf_Data *btf_ext_data;
Elf_Data *btf_base_data;
};
static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs)
{
Elf_Scn *scn = NULL;
Elf_Data *data;
GElf_Ehdr ehdr;
size_t shstrndx;
int idx = 0;
if (!gelf_getehdr(elf, &ehdr)) {
pr_warn("failed to get EHDR from %s\n", path);
goto err;
}
if (elf_getshdrstrndx(elf, &shstrndx)) {
pr_warn("failed to get section names section index for %s\n",
path);
goto err;
}
if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
pr_warn("failed to get e_shstrndx from %s\n", path);
goto err;
}
while ((scn = elf_nextscn(elf, scn)) != NULL) {
Elf_Data **field;
GElf_Shdr sh;
char *name;
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
pr_warn("failed to get section(%d) header from %s\n",
idx, path);
goto err;
}
name = elf_strptr(elf, shstrndx, sh.sh_name);
if (!name) {
pr_warn("failed to get section(%d) name from %s\n",
idx, path);
goto err;
}
if (strcmp(name, BTF_ELF_SEC) == 0)
field = &secs->btf_data;
else if (strcmp(name, BTF_EXT_ELF_SEC) == 0)
field = &secs->btf_ext_data;
else if (strcmp(name, BTF_BASE_ELF_SEC) == 0)
field = &secs->btf_base_data;
else
continue;
data = elf_getdata(scn, 0);
if (!data) {
pr_warn("failed to get section(%d, %s) data from %s\n",
idx, name, path);
goto err;
}
*field = data;
}
return 0;
err:
return -LIBBPF_ERRNO__FORMAT;
}
static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
struct btf_ext **btf_ext)
{
struct btf_elf_secs secs = {};
struct btf *dist_base_btf = NULL;
Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
int err = 0, fd = -1, idx = 0;
struct btf *btf = NULL;
int err = 0, fd = -1;
Elf_Scn *scn = NULL;
Elf *elf = NULL;
GElf_Ehdr ehdr;
size_t shstrndx;
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("failed to init libelf for %s\n", path);
@@ -1183,48 +1107,73 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
return ERR_PTR(err);
}
err = -LIBBPF_ERRNO__FORMAT;
elf = elf_begin(fd, ELF_C_READ, NULL);
if (!elf) {
pr_warn("failed to open %s as ELF file\n", path);
goto done;
}
err = btf_find_elf_sections(elf, path, &secs);
if (err)
if (!gelf_getehdr(elf, &ehdr)) {
pr_warn("failed to get EHDR from %s\n", path);
goto done;
}
if (!secs.btf_data) {
if (elf_getshdrstrndx(elf, &shstrndx)) {
pr_warn("failed to get section names section index for %s\n",
path);
goto done;
}
if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
pr_warn("failed to get e_shstrndx from %s\n", path);
goto done;
}
while ((scn = elf_nextscn(elf, scn)) != NULL) {
GElf_Shdr sh;
char *name;
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
pr_warn("failed to get section(%d) header from %s\n",
idx, path);
goto done;
}
name = elf_strptr(elf, shstrndx, sh.sh_name);
if (!name) {
pr_warn("failed to get section(%d) name from %s\n",
idx, path);
goto done;
}
if (strcmp(name, BTF_ELF_SEC) == 0) {
btf_data = elf_getdata(scn, 0);
if (!btf_data) {
pr_warn("failed to get section(%d, %s) data from %s\n",
idx, name, path);
goto done;
}
continue;
} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
btf_ext_data = elf_getdata(scn, 0);
if (!btf_ext_data) {
pr_warn("failed to get section(%d, %s) data from %s\n",
idx, name, path);
goto done;
}
continue;
}
}
if (!btf_data) {
pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
err = -ENODATA;
goto done;
}
if (secs.btf_base_data) {
dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size,
NULL);
if (IS_ERR(dist_base_btf)) {
err = PTR_ERR(dist_base_btf);
dist_base_btf = NULL;
goto done;
}
}
btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size,
dist_base_btf ?: base_btf);
if (IS_ERR(btf)) {
err = PTR_ERR(btf);
btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
err = libbpf_get_error(btf);
if (err)
goto done;
}
if (dist_base_btf && base_btf) {
err = btf__relocate(btf, base_btf);
if (err)
goto done;
btf__free(dist_base_btf);
dist_base_btf = NULL;
}
if (dist_base_btf)
btf->owns_base = true;
switch (gelf_getclass(elf)) {
case ELFCLASS32:
@@ -1238,12 +1187,11 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
break;
}
if (btf_ext && secs.btf_ext_data) {
*btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size);
if (IS_ERR(*btf_ext)) {
err = PTR_ERR(*btf_ext);
if (btf_ext && btf_ext_data) {
*btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
err = libbpf_get_error(*btf_ext);
if (err)
goto done;
}
} else if (btf_ext) {
*btf_ext = NULL;
}
@@ -1257,7 +1205,6 @@ done:
if (btf_ext)
btf_ext__free(*btf_ext);
btf__free(dist_base_btf);
btf__free(btf);
return ERR_PTR(err);
@@ -1792,8 +1739,9 @@ struct btf_pipe {
struct hashmap *str_off_map; /* map string offsets from src to dst */
};
static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
static int btf_rewrite_str(__u32 *str_off, void *ctx)
{
struct btf_pipe *p = ctx;
long mapped_off;
int off, err;
@@ -1823,11 +1771,10 @@ static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
return 0;
}
static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
{
struct btf_field_iter it;
struct btf_pipe p = { .src = src_btf, .dst = btf };
struct btf_type *t;
__u32 *str_off;
int sz, err;
sz = btf_type_size(src_type);
@@ -1835,33 +1782,35 @@ static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)
return libbpf_err(sz);
/* deconstruct BTF, if necessary, and invalidate raw_data */
if (btf_ensure_modifiable(p->dst))
if (btf_ensure_modifiable(btf))
return libbpf_err(-ENOMEM);
t = btf_add_type_mem(p->dst, sz);
t = btf_add_type_mem(btf, sz);
if (!t)
return libbpf_err(-ENOMEM);
memcpy(t, src_type, sz);
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
if (err)
return libbpf_err(err);
while ((str_off = btf_field_iter_next(&it))) {
err = btf_rewrite_str(p, str_off);
if (err)
return libbpf_err(err);
}
return btf_commit_type(p->dst, sz);
return btf_commit_type(btf, sz);
}
int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
{
struct btf_pipe p = { .src = src_btf, .dst = btf };
struct btf *btf = ctx;
return btf_add_type(&p, src_type);
if (!*type_id) /* nothing to do for VOID references */
return 0;
/* we haven't updated btf's type count yet, so
* btf->start_id + btf->nr_types - 1 is the type ID offset we should
* add to all newly added BTF types
*/
*type_id += btf->start_id + btf->nr_types - 1;
return 0;
}
static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
@@ -1909,9 +1858,6 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
memcpy(t, src_btf->types_data, data_sz);
for (i = 0; i < cnt; i++) {
struct btf_field_iter it;
__u32 *type_id, *str_off;
sz = btf_type_size(t);
if (sz < 0) {
/* unlikely, has to be corrupted src_btf */
@@ -1923,31 +1869,15 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
*off = t - btf->types_data;
/* add, dedup, and remap strings referenced by this BTF type */
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
if (err)
goto err_out;
while ((str_off = btf_field_iter_next(&it))) {
err = btf_rewrite_str(&p, str_off);
if (err)
goto err_out;
}
/* remap all type IDs referenced from this BTF type */
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
if (err)
goto err_out;
while ((type_id = btf_field_iter_next(&it))) {
if (!*type_id) /* nothing to do for VOID references */
continue;
/* we haven't updated btf's type count yet, so
* btf->start_id + btf->nr_types - 1 is the type ID offset we should
* add to all newly added BTF types
*/
*type_id += btf->start_id + btf->nr_types - 1;
}
/* go to next type data and type offset index entry */
t += sz;
off++;
@@ -3523,19 +3453,11 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *
int i, r;
for (i = 0; i < d->btf->nr_types; i++) {
struct btf_field_iter it;
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
__u32 *str_off;
r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
r = btf_type_visit_str_offs(t, fn, ctx);
if (r)
return r;
while ((str_off = btf_field_iter_next(&it))) {
r = fn(str_off, ctx);
if (r)
return r;
}
}
if (!d->btf_ext)
@@ -4997,23 +4919,10 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
for (i = 0; i < d->btf->nr_types; i++) {
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
struct btf_field_iter it;
__u32 *type_id;
r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
if (r)
return r;
while ((type_id = btf_field_iter_next(&it))) {
__u32 resolved_id, new_id;
resolved_id = resolve_type_id(d, *type_id);
new_id = d->hypot_map[resolved_id];
if (new_id > BTF_MAX_NR_TYPES)
return -EINVAL;
*type_id = new_id;
}
}
if (!d->btf_ext)
@@ -5094,6 +5003,136 @@ struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_bt
return btf__parse_split(path, vmlinux_btf);
}
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
{
int i, n, err;
switch (btf_kind(t)) {
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
return 0;
case BTF_KIND_FWD:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
return visit(&t->type, ctx);
case BTF_KIND_ARRAY: {
struct btf_array *a = btf_array(t);
err = visit(&a->type, ctx);
err = err ?: visit(&a->index_type, ctx);
return err;
}
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
struct btf_member *m = btf_members(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->type, ctx);
if (err)
return err;
}
return 0;
}
case BTF_KIND_FUNC_PROTO: {
struct btf_param *m = btf_params(t);
err = visit(&t->type, ctx);
if (err)
return err;
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->type, ctx);
if (err)
return err;
}
return 0;
}
case BTF_KIND_DATASEC: {
struct btf_var_secinfo *m = btf_var_secinfos(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->type, ctx);
if (err)
return err;
}
return 0;
}
default:
return -EINVAL;
}
}
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
{
int i, n, err;
err = visit(&t->name_off, ctx);
if (err)
return err;
switch (btf_kind(t)) {
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
struct btf_member *m = btf_members(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->name_off, ctx);
if (err)
return err;
}
break;
}
case BTF_KIND_ENUM: {
struct btf_enum *m = btf_enum(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->name_off, ctx);
if (err)
return err;
}
break;
}
case BTF_KIND_ENUM64: {
struct btf_enum64 *m = btf_enum64(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->name_off, ctx);
if (err)
return err;
}
break;
}
case BTF_KIND_FUNC_PROTO: {
struct btf_param *m = btf_params(t);
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
err = visit(&m->name_off, ctx);
if (err)
return err;
}
break;
}
default:
break;
}
return 0;
}
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
{
const struct btf_ext_info *seg;
@@ -5173,328 +5212,3 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void
return 0;
}
struct btf_distill {
struct btf_pipe pipe;
int *id_map;
unsigned int split_start_id;
unsigned int split_start_str;
int diff_id;
};
static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i)
{
struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i);
struct btf_field_iter it;
__u32 *id;
int err;
err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS);
if (err)
return err;
while ((id = btf_field_iter_next(&it))) {
struct btf_type *base_t;
if (!*id)
continue;
/* split BTF id, not needed */
if (*id >= dist->split_start_id)
continue;
/* already added ? */
if (dist->id_map[*id] > 0)
continue;
/* only a subset of base BTF types should be referenced from
* split BTF; ensure nothing unexpected is referenced.
*/
base_t = btf_type_by_id(dist->pipe.src, *id);
switch (btf_kind(base_t)) {
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_FWD:
case BTF_KIND_ARRAY:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_TYPEDEF:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
case BTF_KIND_PTR:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_VOLATILE:
case BTF_KIND_FUNC_PROTO:
case BTF_KIND_TYPE_TAG:
dist->id_map[*id] = *id;
break;
default:
pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n",
*id, btf_kind(base_t));
return -EINVAL;
}
/* If a base type is used, ensure types it refers to are
* marked as used also; so for example if we find a PTR to INT
* we need both the PTR and INT.
*
* The only exception is named struct/unions, since distilled
* base BTF composite types have no members.
*/
if (btf_is_composite(base_t) && base_t->name_off)
continue;
err = btf_add_distilled_type_ids(dist, *id);
if (err)
return err;
}
return 0;
}
static int btf_add_distilled_types(struct btf_distill *dist)
{
bool adding_to_base = dist->pipe.dst->start_id == 1;
int id = btf__type_cnt(dist->pipe.dst);
struct btf_type *t;
int i, err = 0;
/* Add types for each of the required references to either distilled
* base or split BTF, depending on type characteristics.
*/
for (i = 1; i < dist->split_start_id; i++) {
const char *name;
int kind;
if (!dist->id_map[i])
continue;
t = btf_type_by_id(dist->pipe.src, i);
kind = btf_kind(t);
name = btf__name_by_offset(dist->pipe.src, t->name_off);
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_FWD:
/* Named int, float, fwd are added to base. */
if (!adding_to_base)
continue;
err = btf_add_type(&dist->pipe, t);
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
/* Named struct/union are added to base as 0-vlen
* struct/union of same size. Anonymous struct/unions
* are added to split BTF as-is.
*/
if (adding_to_base) {
if (!t->name_off)
continue;
err = btf_add_composite(dist->pipe.dst, kind, name, t->size);
} else {
if (t->name_off)
continue;
err = btf_add_type(&dist->pipe, t);
}
break;
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
/* Named enum[64]s are added to base as a sized
* enum; relocation will match with appropriately-named
* and sized enum or enum64.
*
* Anonymous enums are added to split BTF as-is.
*/
if (adding_to_base) {
if (!t->name_off)
continue;
err = btf__add_enum(dist->pipe.dst, name, t->size);
} else {
if (t->name_off)
continue;
err = btf_add_type(&dist->pipe, t);
}
break;
case BTF_KIND_ARRAY:
case BTF_KIND_TYPEDEF:
case BTF_KIND_PTR:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_VOLATILE:
case BTF_KIND_FUNC_PROTO:
case BTF_KIND_TYPE_TAG:
/* All other types are added to split BTF. */
if (adding_to_base)
continue;
err = btf_add_type(&dist->pipe, t);
break;
default:
pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n",
name, i, kind);
return -EINVAL;
}
if (err < 0)
break;
dist->id_map[i] = id++;
}
return err;
}
/* Split BTF ids without a mapping will be shifted downwards since distilled
* base BTF is smaller than the original base BTF. For those that have a
* mapping (either to base or updated split BTF), update the id based on
* that mapping.
*/
static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i)
{
struct btf_type *t = btf_type_by_id(dist->pipe.dst, i);
struct btf_field_iter it;
__u32 *id;
int err;
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (err)
return err;
while ((id = btf_field_iter_next(&it))) {
if (dist->id_map[*id])
*id = dist->id_map[*id];
else if (*id >= dist->split_start_id)
*id -= dist->diff_id;
}
return 0;
}
/* Create updated split BTF with distilled base BTF; distilled base BTF
* consists of BTF information required to clarify the types that split
* BTF refers to, omitting unneeded details. Specifically it will contain
* base types and memberless definitions of named structs, unions and enumerated
* types. Associated reference types like pointers, arrays and anonymous
* structs, unions and enumerated types will be added to split BTF.
* Size is recorded for named struct/unions to help guide matching to the
* target base BTF during later relocation.
*
* The only case where structs, unions or enumerated types are fully represented
* is when they are anonymous; in such cases, the anonymous type is added to
* split BTF in full.
*
* We return newly-created split BTF where the split BTF refers to a newly-created
* distilled base BTF. Both must be freed separately by the caller.
*/
int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
struct btf **new_split_btf)
{
struct btf *new_base = NULL, *new_split = NULL;
const struct btf *old_base;
unsigned int n = btf__type_cnt(src_btf);
struct btf_distill dist = {};
struct btf_type *t;
int i, err = 0;
/* src BTF must be split BTF. */
old_base = btf__base_btf(src_btf);
if (!new_base_btf || !new_split_btf || !old_base)
return libbpf_err(-EINVAL);
new_base = btf__new_empty();
if (!new_base)
return libbpf_err(-ENOMEM);
btf__set_endianness(new_base, btf__endianness(src_btf));
dist.id_map = calloc(n, sizeof(*dist.id_map));
if (!dist.id_map) {
err = -ENOMEM;
goto done;
}
dist.pipe.src = src_btf;
dist.pipe.dst = new_base;
dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
if (IS_ERR(dist.pipe.str_off_map)) {
err = -ENOMEM;
goto done;
}
dist.split_start_id = btf__type_cnt(old_base);
dist.split_start_str = old_base->hdr->str_len;
/* Pass over src split BTF; generate the list of base BTF type ids it
* references; these will constitute our distilled BTF set to be
* distributed over base and split BTF as appropriate.
*/
for (i = src_btf->start_id; i < n; i++) {
err = btf_add_distilled_type_ids(&dist, i);
if (err < 0)
goto done;
}
/* Next add types for each of the required references to base BTF and split BTF
* in turn.
*/
err = btf_add_distilled_types(&dist);
if (err < 0)
goto done;
/* Create new split BTF with distilled base BTF as its base; the final
* state is split BTF with distilled base BTF that represents enough
* about its base references to allow it to be relocated with the base
* BTF available.
*/
new_split = btf__new_empty_split(new_base);
if (!new_split) {
err = -errno;
goto done;
}
dist.pipe.dst = new_split;
/* First add all split types */
for (i = src_btf->start_id; i < n; i++) {
t = btf_type_by_id(src_btf, i);
err = btf_add_type(&dist.pipe, t);
if (err < 0)
goto done;
}
/* Now add distilled types to split BTF that are not added to base. */
err = btf_add_distilled_types(&dist);
if (err < 0)
goto done;
/* All split BTF ids will be shifted downwards since there are less base
* BTF ids in distilled base BTF.
*/
dist.diff_id = dist.split_start_id - btf__type_cnt(new_base);
n = btf__type_cnt(new_split);
/* Now update base/split BTF ids. */
for (i = 1; i < n; i++) {
err = btf_update_distilled_type_ids(&dist, i);
if (err < 0)
break;
}
done:
free(dist.id_map);
hashmap__free(dist.pipe.str_off_map);
if (err) {
btf__free(new_split);
btf__free(new_base);
return libbpf_err(err);
}
*new_base_btf = new_base;
*new_split_btf = new_split;
return 0;
}
const struct btf_header *btf_header(const struct btf *btf)
{
return btf->hdr;
}
void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
{
btf->base_btf = (struct btf *)base_btf;
btf->start_id = btf__type_cnt(base_btf);
btf->start_str_off = base_btf->hdr->str_len;
}
int btf__relocate(struct btf *btf, const struct btf *base_btf)
{
int err = btf_relocate(btf, base_btf, NULL);
if (!err)
btf->owns_base = false;
return libbpf_err(err);
}

View File

@@ -18,7 +18,6 @@ extern "C" {
#define BTF_ELF_SEC ".BTF"
#define BTF_EXT_ELF_SEC ".BTF.ext"
#define BTF_BASE_ELF_SEC ".BTF.base"
#define MAPS_ELF_SEC ".maps"
struct btf;
@@ -108,27 +107,6 @@ LIBBPF_API struct btf *btf__new_empty(void);
*/
LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
/**
* @brief **btf__distill_base()** creates new versions of the split BTF
* *src_btf* and its base BTF. The new base BTF will only contain the types
* needed to improve robustness of the split BTF to small changes in base BTF.
* When that split BTF is loaded against a (possibly changed) base, this
* distilled base BTF will help update references to that (possibly changed)
* base BTF.
*
* Both the new split and its associated new base BTF must be freed by
* the caller.
*
* If successful, 0 is returned and **new_base_btf** and **new_split_btf**
* will point at new base/split BTF. Both the new split and its associated
* new base BTF must be freed by the caller.
*
* A negative value is returned on error and the thread-local `errno` variable
* is set to the error code as well.
*/
LIBBPF_API int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
struct btf **new_split_btf);
LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf);
LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext);
@@ -253,20 +231,6 @@ struct btf_dedup_opts {
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
/**
* @brief **btf__relocate()** will check the split BTF *btf* for references
* to base BTF kinds, and verify those references are compatible with
* *base_btf*; if they are, *btf* is adjusted such that is re-parented to
* *base_btf* and type ids and strings are adjusted to accommodate this.
*
* If successful, 0 is returned and **btf** now has **base_btf** as its
* base.
*
* A negative value is returned on error and the thread-local `errno` variable
* is set to the error code as well.
*/
LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf);
struct btf_dump;
struct btf_dump_opts {

View File

@@ -1559,12 +1559,10 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
* Clang for BPF target generates func_proto with no
* args as a func_proto with a single void arg (e.g.,
* `int (*f)(void)` vs just `int (*f)()`). We are
* going to emit valid empty args (void) syntax for
* such case. Similarly and conveniently, valid
* no args case can be special-cased here as well.
* going to pretend there are no args for such case.
*/
if (vlen == 0 || (vlen == 1 && p->type == 0)) {
btf_dump_printf(d, "void)");
if (vlen == 1 && p->type == 0) {
btf_dump_printf(d, ")");
return;
}
@@ -1931,7 +1929,6 @@ static int btf_dump_int_data(struct btf_dump *d,
if (d->typed_dump->is_array_terminated)
break;
if (*(char *)data == '\0') {
btf_dump_type_values(d, "'\\0'");
d->typed_dump->is_array_terminated = true;
break;
}
@@ -2034,7 +2031,6 @@ static int btf_dump_array_data(struct btf_dump *d,
__u32 i, elem_type_id;
__s64 elem_size;
bool is_array_member;
bool is_array_terminated;
elem_type_id = array->type;
elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL);
@@ -2070,15 +2066,12 @@ static int btf_dump_array_data(struct btf_dump *d,
*/
is_array_member = d->typed_dump->is_array_member;
d->typed_dump->is_array_member = true;
is_array_terminated = d->typed_dump->is_array_terminated;
d->typed_dump->is_array_terminated = false;
for (i = 0; i < array->nelems; i++, data += elem_size) {
if (d->typed_dump->is_array_terminated)
break;
btf_dump_dump_type_data(d, NULL, elem_type, elem_type_id, data, 0, 0);
}
d->typed_dump->is_array_member = is_array_member;
d->typed_dump->is_array_terminated = is_array_terminated;
d->typed_dump->depth--;
btf_dump_data_pfx(d);
btf_dump_type_values(d, "]");

View File

@@ -1,177 +0,0 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2021 Facebook */
/* Copyright (c) 2024, Oracle and/or its affiliates. */
#ifdef __KERNEL__
#include <linux/bpf.h>
#include <linux/btf.h>
#define btf_var_secinfos(t) (struct btf_var_secinfo *)btf_type_var_secinfo(t)
#else
#include "btf.h"
#include "libbpf_internal.h"
#endif
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
enum btf_field_iter_kind iter_kind)
{
it->p = NULL;
it->m_idx = -1;
it->off_idx = 0;
it->vlen = 0;
switch (iter_kind) {
case BTF_FIELD_ITER_IDS:
switch (btf_kind(t)) {
case BTF_KIND_UNKN:
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
it->desc = (struct btf_field_desc) {};
break;
case BTF_KIND_FWD:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} };
break;
case BTF_KIND_ARRAY:
it->desc = (struct btf_field_desc) {
2, {sizeof(struct btf_type) + offsetof(struct btf_array, type),
sizeof(struct btf_type) + offsetof(struct btf_array, index_type)}
};
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
it->desc = (struct btf_field_desc) {
0, {},
sizeof(struct btf_member),
1, {offsetof(struct btf_member, type)}
};
break;
case BTF_KIND_FUNC_PROTO:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, type)},
sizeof(struct btf_param),
1, {offsetof(struct btf_param, type)}
};
break;
case BTF_KIND_DATASEC:
it->desc = (struct btf_field_desc) {
0, {},
sizeof(struct btf_var_secinfo),
1, {offsetof(struct btf_var_secinfo, type)}
};
break;
default:
return -EINVAL;
}
break;
case BTF_FIELD_ITER_STRS:
switch (btf_kind(t)) {
case BTF_KIND_UNKN:
it->desc = (struct btf_field_desc) {};
break;
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_FWD:
case BTF_KIND_ARRAY:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
case BTF_KIND_DATASEC:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)}
};
break;
case BTF_KIND_ENUM:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_enum),
1, {offsetof(struct btf_enum, name_off)}
};
break;
case BTF_KIND_ENUM64:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_enum64),
1, {offsetof(struct btf_enum64, name_off)}
};
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_member),
1, {offsetof(struct btf_member, name_off)}
};
break;
case BTF_KIND_FUNC_PROTO:
it->desc = (struct btf_field_desc) {
1, {offsetof(struct btf_type, name_off)},
sizeof(struct btf_param),
1, {offsetof(struct btf_param, name_off)}
};
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
if (it->desc.m_sz)
it->vlen = btf_vlen(t);
it->p = t;
return 0;
}
__u32 *btf_field_iter_next(struct btf_field_iter *it)
{
if (!it->p)
return NULL;
if (it->m_idx < 0) {
if (it->off_idx < it->desc.t_off_cnt)
return it->p + it->desc.t_offs[it->off_idx++];
/* move to per-member iteration */
it->m_idx = 0;
it->p += sizeof(struct btf_type);
it->off_idx = 0;
}
/* if type doesn't have members, stop */
if (it->desc.m_sz == 0) {
it->p = NULL;
return NULL;
}
if (it->off_idx >= it->desc.m_off_cnt) {
/* exhausted this member's fields, go to the next member */
it->m_idx++;
it->p += it->desc.m_sz;
it->off_idx = 0;
}
if (it->m_idx < it->vlen)
return it->p + it->desc.m_offs[it->off_idx++];
it->p = NULL;
return NULL;
}

View File

@@ -1,519 +0,0 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2024, Oracle and/or its affiliates. */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#ifdef __KERNEL__
#include <linux/bpf.h>
#include <linux/bsearch.h>
#include <linux/btf.h>
#include <linux/sort.h>
#include <linux/string.h>
#include <linux/bpf_verifier.h>
#define btf_type_by_id (struct btf_type *)btf_type_by_id
#define btf__type_cnt btf_nr_types
#define btf__base_btf btf_base_btf
#define btf__name_by_offset btf_name_by_offset
#define btf__str_by_offset btf_str_by_offset
#define btf_kflag btf_type_kflag
#define calloc(nmemb, sz) kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN)
#define free(ptr) kvfree(ptr)
#define qsort(base, num, sz, cmp) sort(base, num, sz, cmp, NULL)
#else
#include "btf.h"
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
#endif /* __KERNEL__ */
struct btf;
struct btf_relocate {
struct btf *btf;
const struct btf *base_btf;
const struct btf *dist_base_btf;
unsigned int nr_base_types;
unsigned int nr_split_types;
unsigned int nr_dist_base_types;
int dist_str_len;
int base_str_len;
__u32 *id_map;
__u32 *str_map;
};
/* Set temporarily in relocation id_map if distilled base struct/union is
* embedded in a split BTF struct/union; in such a case, size information must
* match between distilled base BTF and base BTF representation of type.
*/
#define BTF_IS_EMBEDDED ((__u32)-1)
/* <name, size, id> triple used in sorting/searching distilled base BTF. */
struct btf_name_info {
const char *name;
/* set when search requires a size match */
bool needs_size: 1;
unsigned int size: 31;
__u32 id;
};
static int btf_relocate_rewrite_type_id(struct btf_relocate *r, __u32 i)
{
struct btf_type *t = btf_type_by_id(r->btf, i);
struct btf_field_iter it;
__u32 *id;
int err;
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (err)
return err;
while ((id = btf_field_iter_next(&it)))
*id = r->id_map[*id];
return 0;
}
/* Simple string comparison used for sorting within BTF, since all distilled
* types are named. If strings match, and size is non-zero for both elements
* fall back to using size for ordering.
*/
static int cmp_btf_name_size(const void *n1, const void *n2)
{
const struct btf_name_info *ni1 = n1;
const struct btf_name_info *ni2 = n2;
int name_diff = strcmp(ni1->name, ni2->name);
if (!name_diff && ni1->needs_size && ni2->needs_size)
return ni2->size - ni1->size;
return name_diff;
}
/* Binary search with a small twist; find leftmost element that matches
* so that we can then iterate through all exact matches. So for example
* searching { "a", "bb", "bb", "c" } we would always match on the
* leftmost "bb".
*/
static struct btf_name_info *search_btf_name_size(struct btf_name_info *key,
struct btf_name_info *vals,
int nelems)
{
struct btf_name_info *ret = NULL;
int high = nelems - 1;
int low = 0;
while (low <= high) {
int mid = (low + high)/2;
struct btf_name_info *val = &vals[mid];
int diff = cmp_btf_name_size(key, val);
if (diff == 0)
ret = val;
/* even if found, keep searching for leftmost match */
if (diff <= 0)
high = mid - 1;
else
low = mid + 1;
}
return ret;
}
/* If a member of a split BTF struct/union refers to a base BTF
* struct/union, mark that struct/union id temporarily in the id_map
* with BTF_IS_EMBEDDED. Members can be const/restrict/volatile/typedef
* reference types, but if a pointer is encountered, the type is no longer
* considered embedded.
*/
static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i)
{
struct btf_type *t = btf_type_by_id(r->btf, i);
struct btf_field_iter it;
__u32 *id;
int err;
if (!btf_is_composite(t))
return 0;
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
if (err)
return err;
while ((id = btf_field_iter_next(&it))) {
__u32 next_id = *id;
while (next_id) {
t = btf_type_by_id(r->btf, next_id);
switch (btf_kind(t)) {
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_VOLATILE:
case BTF_KIND_TYPEDEF:
case BTF_KIND_TYPE_TAG:
next_id = t->type;
break;
case BTF_KIND_ARRAY: {
struct btf_array *a = btf_array(t);
next_id = a->type;
break;
}
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
if (next_id < r->nr_dist_base_types)
r->id_map[next_id] = BTF_IS_EMBEDDED;
next_id = 0;
break;
default:
next_id = 0;
break;
}
}
}
return 0;
}
/* Build a map from distilled base BTF ids to base BTF ids. To do so, iterate
* through base BTF looking up distilled type (using binary search) equivalents.
*/
static int btf_relocate_map_distilled_base(struct btf_relocate *r)
{
struct btf_name_info *info, *info_end;
struct btf_type *base_t, *dist_t;
__u8 *base_name_cnt = NULL;
int err = 0;
__u32 id;
/* generate a sort index array of name/type ids sorted by name for
* distilled base BTF to speed name-based lookups.
*/
info = calloc(r->nr_dist_base_types, sizeof(*info));
if (!info) {
err = -ENOMEM;
goto done;
}
info_end = info + r->nr_dist_base_types;
for (id = 0; id < r->nr_dist_base_types; id++) {
dist_t = btf_type_by_id(r->dist_base_btf, id);
info[id].name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
info[id].id = id;
info[id].size = dist_t->size;
info[id].needs_size = true;
}
qsort(info, r->nr_dist_base_types, sizeof(*info), cmp_btf_name_size);
/* Mark distilled base struct/union members of split BTF structs/unions
* in id_map with BTF_IS_EMBEDDED; this signals that these types
* need to match both name and size, otherwise embedding the base
* struct/union in the split type is invalid.
*/
for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) {
err = btf_mark_embedded_composite_type_ids(r, id);
if (err)
goto done;
}
/* Collect name counts for composite types in base BTF. If multiple
* instances of a struct/union of the same name exist, we need to use
* size to determine which to map to since name alone is ambiguous.
*/
base_name_cnt = calloc(r->base_str_len, sizeof(*base_name_cnt));
if (!base_name_cnt) {
err = -ENOMEM;
goto done;
}
for (id = 1; id < r->nr_base_types; id++) {
base_t = btf_type_by_id(r->base_btf, id);
if (!btf_is_composite(base_t) || !base_t->name_off)
continue;
if (base_name_cnt[base_t->name_off] < 255)
base_name_cnt[base_t->name_off]++;
}
/* Now search base BTF for matching distilled base BTF types. */
for (id = 1; id < r->nr_base_types; id++) {
struct btf_name_info *dist_info, base_info = {};
int dist_kind, base_kind;
base_t = btf_type_by_id(r->base_btf, id);
/* distilled base consists of named types only. */
if (!base_t->name_off)
continue;
base_kind = btf_kind(base_t);
base_info.id = id;
base_info.name = btf__name_by_offset(r->base_btf, base_t->name_off);
switch (base_kind) {
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
/* These types should match both name and size */
base_info.needs_size = true;
base_info.size = base_t->size;
break;
case BTF_KIND_FWD:
/* No size considerations for fwds. */
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
/* Size only needs to be used for struct/union if there
* are multiple types in base BTF with the same name.
* If there are multiple _distilled_ types with the same
* name (a very unlikely scenario), that doesn't matter
* unless corresponding _base_ types to match them are
* missing.
*/
base_info.needs_size = base_name_cnt[base_t->name_off] > 1;
base_info.size = base_t->size;
break;
default:
continue;
}
/* iterate over all matching distilled base types */
for (dist_info = search_btf_name_size(&base_info, info, r->nr_dist_base_types);
dist_info != NULL && dist_info < info_end &&
cmp_btf_name_size(&base_info, dist_info) == 0;
dist_info++) {
if (!dist_info->id || dist_info->id >= r->nr_dist_base_types) {
pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n",
id, dist_info->id);
err = -EINVAL;
goto done;
}
dist_t = btf_type_by_id(r->dist_base_btf, dist_info->id);
dist_kind = btf_kind(dist_t);
/* Validate that the found distilled type is compatible.
* Do not error out on mismatch as another match may
* occur for an identically-named type.
*/
switch (dist_kind) {
case BTF_KIND_FWD:
switch (base_kind) {
case BTF_KIND_FWD:
if (btf_kflag(dist_t) != btf_kflag(base_t))
continue;
break;
case BTF_KIND_STRUCT:
if (btf_kflag(base_t))
continue;
break;
case BTF_KIND_UNION:
if (!btf_kflag(base_t))
continue;
break;
default:
continue;
}
break;
case BTF_KIND_INT:
if (dist_kind != base_kind ||
btf_int_encoding(base_t) != btf_int_encoding(dist_t))
continue;
break;
case BTF_KIND_FLOAT:
if (dist_kind != base_kind)
continue;
break;
case BTF_KIND_ENUM:
/* ENUM and ENUM64 are encoded as sized ENUM in
* distilled base BTF.
*/
if (base_kind != dist_kind && base_kind != BTF_KIND_ENUM64)
continue;
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
/* size verification is required for embedded
* struct/unions.
*/
if (r->id_map[dist_info->id] == BTF_IS_EMBEDDED &&
base_t->size != dist_t->size)
continue;
break;
default:
continue;
}
if (r->id_map[dist_info->id] &&
r->id_map[dist_info->id] != BTF_IS_EMBEDDED) {
/* we already have a match; this tells us that
* multiple base types of the same name
* have the same size, since for cases where
* multiple types have the same name we match
* on name and size. In this case, we have
* no way of determining which to relocate
* to in base BTF, so error out.
*/
pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n",
base_info.name, dist_info->id,
base_t->size, id, r->id_map[dist_info->id]);
err = -EINVAL;
goto done;
}
/* map id and name */
r->id_map[dist_info->id] = id;
r->str_map[dist_t->name_off] = base_t->name_off;
}
}
/* ensure all distilled BTF ids now have a mapping... */
for (id = 1; id < r->nr_dist_base_types; id++) {
const char *name;
if (r->id_map[id] && r->id_map[id] != BTF_IS_EMBEDDED)
continue;
dist_t = btf_type_by_id(r->dist_base_btf, id);
name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
pr_warn("distilled base BTF type '%s' [%d] is not mapped to base BTF id\n",
name, id);
err = -EINVAL;
break;
}
done:
free(base_name_cnt);
free(info);
return err;
}
/* distilled base should only have named int/float/enum/fwd/struct/union types. */
static int btf_relocate_validate_distilled_base(struct btf_relocate *r)
{
unsigned int i;
for (i = 1; i < r->nr_dist_base_types; i++) {
struct btf_type *t = btf_type_by_id(r->dist_base_btf, i);
int kind = btf_kind(t);
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_FWD:
if (t->name_off)
break;
pr_warn("type [%d], kind [%d] is invalid for distilled base BTF; it is anonymous\n",
i, kind);
return -EINVAL;
default:
pr_warn("type [%d] in distilled based BTF has unexpected kind [%d]\n",
i, kind);
return -EINVAL;
}
}
return 0;
}
static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i)
{
struct btf_type *t = btf_type_by_id(r->btf, i);
struct btf_field_iter it;
__u32 *str_off;
int off, err;
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
return err;
while ((str_off = btf_field_iter_next(&it))) {
if (!*str_off)
continue;
if (*str_off >= r->dist_str_len) {
*str_off += r->base_str_len - r->dist_str_len;
} else {
off = r->str_map[*str_off];
if (!off) {
pr_warn("string '%s' [offset %u] is not mapped to base BTF",
btf__str_by_offset(r->btf, off), *str_off);
return -ENOENT;
}
*str_off = off;
}
}
return 0;
}
/* If successful, output of relocation is updated BTF with base BTF pointing
* at base_btf, and type ids, strings adjusted accordingly.
*/
int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map)
{
unsigned int nr_types = btf__type_cnt(btf);
const struct btf_header *dist_base_hdr;
const struct btf_header *base_hdr;
struct btf_relocate r = {};
int err = 0;
__u32 id, i;
r.dist_base_btf = btf__base_btf(btf);
if (!base_btf || r.dist_base_btf == base_btf)
return -EINVAL;
r.nr_dist_base_types = btf__type_cnt(r.dist_base_btf);
r.nr_base_types = btf__type_cnt(base_btf);
r.nr_split_types = nr_types - r.nr_dist_base_types;
r.btf = btf;
r.base_btf = base_btf;
r.id_map = calloc(nr_types, sizeof(*r.id_map));
r.str_map = calloc(btf_header(r.dist_base_btf)->str_len, sizeof(*r.str_map));
dist_base_hdr = btf_header(r.dist_base_btf);
base_hdr = btf_header(r.base_btf);
r.dist_str_len = dist_base_hdr->str_len;
r.base_str_len = base_hdr->str_len;
if (!r.id_map || !r.str_map) {
err = -ENOMEM;
goto err_out;
}
err = btf_relocate_validate_distilled_base(&r);
if (err)
goto err_out;
/* Split BTF ids need to be adjusted as base and distilled base
* have different numbers of types, changing the start id of split
* BTF.
*/
for (id = r.nr_dist_base_types; id < nr_types; id++)
r.id_map[id] = id + r.nr_base_types - r.nr_dist_base_types;
/* Build a map from distilled base ids to actual base BTF ids; it is used
* to update split BTF id references. Also build a str_map mapping from
* distilled base BTF names to base BTF names.
*/
err = btf_relocate_map_distilled_base(&r);
if (err)
goto err_out;
/* Next, rewrite type ids in split BTF, replacing split ids with updated
* ids based on number of types in base BTF, and base ids with
* relocated ids from base_btf.
*/
for (i = 0, id = r.nr_dist_base_types; i < r.nr_split_types; i++, id++) {
err = btf_relocate_rewrite_type_id(&r, id);
if (err)
goto err_out;
}
/* String offsets now need to be updated using the str_map. */
for (i = 0; i < r.nr_split_types; i++) {
err = btf_relocate_rewrite_strs(&r, i + r.nr_dist_base_types);
if (err)
goto err_out;
}
/* Finally reset base BTF to be base_btf */
btf_set_base_btf(btf, base_btf);
if (id_map) {
*id_map = r.id_map;
r.id_map = NULL;
}
err_out:
free(r.id_map);
free(r.str_map);
return err;
}

View File

@@ -28,9 +28,6 @@ int elf_open(const char *binary_path, struct elf_fd *elf_fd)
int fd, ret;
Elf *elf;
elf_fd->elf = NULL;
elf_fd->fd = -1;
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("elf: failed to init libelf for %s\n", binary_path);
return -LIBBPF_ERRNO__LIBELF;

View File

@@ -22,7 +22,7 @@ int probe_fd(int fd)
static int probe_kern_prog_name(int token_fd)
{
const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@@ -392,41 +392,11 @@ static int probe_uprobe_multi_link(int token_fd)
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
err = -errno; /* close() can clobber errno */
if (link_fd >= 0 || err != -EBADF) {
if (link_fd >= 0)
close(link_fd);
close(prog_fd);
return 0;
}
/* Initial multi-uprobe support in kernel didn't handle PID filtering
* correctly (it was doing thread filtering, not process filtering).
* So now we'll detect if PID filtering logic was fixed, and, if not,
* we'll pretend multi-uprobes are not supported, if not.
* Multi-uprobes are used in USDT attachment logic, and we need to be
* conservative here, because multi-uprobe selection happens early at
* load time, while the use of PID filtering is known late at
* attachment time, at which point it's too late to undo multi-uprobe
* selection.
*
* Creating uprobe with pid == -1 for (invalid) '/' binary will fail
* early with -EINVAL on kernels with fixed PID filtering logic;
* otherwise -ESRCH would be returned if passed correct binary path
* (but we'll just get -BADF, of course).
*/
link_opts.uprobe_multi.pid = -1; /* invalid PID */
link_opts.uprobe_multi.path = "/"; /* invalid path */
link_opts.uprobe_multi.offsets = &offset;
link_opts.uprobe_multi.cnt = 1;
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
err = -errno; /* close() can clobber errno */
if (link_fd >= 0)
close(link_fd);
close(prog_fd);
return link_fd < 0 && err == -EINVAL;
return link_fd < 0 && err == -EBADF;
}
static int probe_kern_bpf_cookie(int token_fd)

View File

@@ -132,7 +132,6 @@ static const char * const attach_type_name[] = {
[BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi",
[BPF_NETKIT_PRIMARY] = "netkit_primary",
[BPF_NETKIT_PEER] = "netkit_peer",
[BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session",
};
static const char * const link_type_name[] = {
@@ -150,7 +149,6 @@ static const char * const link_type_name[] = {
[BPF_LINK_TYPE_TCX] = "tcx",
[BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi",
[BPF_LINK_TYPE_NETKIT] = "netkit",
[BPF_LINK_TYPE_SOCKMAP] = "sockmap",
};
static const char * const map_type_name[] = {
@@ -229,30 +227,7 @@ static const char * const prog_type_name[] = {
static int __base_pr(enum libbpf_print_level level, const char *format,
va_list args)
{
const char *env_var = "LIBBPF_LOG_LEVEL";
static enum libbpf_print_level min_level = LIBBPF_INFO;
static bool initialized;
if (!initialized) {
char *verbosity;
initialized = true;
verbosity = getenv(env_var);
if (verbosity) {
if (strcasecmp(verbosity, "warn") == 0)
min_level = LIBBPF_WARN;
else if (strcasecmp(verbosity, "debug") == 0)
min_level = LIBBPF_DEBUG;
else if (strcasecmp(verbosity, "info") == 0)
min_level = LIBBPF_INFO;
else
fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n",
env_var, verbosity);
}
}
/* if too verbose, skip logging */
if (level > min_level)
if (level == LIBBPF_DEBUG)
return 0;
return vfprintf(stderr, format, args);
@@ -496,6 +471,8 @@ struct bpf_program {
};
struct bpf_struct_ops {
const char *tname;
const struct btf_type *type;
struct bpf_program **progs;
__u32 *kern_func_off;
/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
@@ -570,7 +547,6 @@ struct bpf_map {
bool pinned;
bool reused;
bool autocreate;
bool autoattach;
__u64 map_extra;
};
@@ -1081,14 +1057,11 @@ static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
continue;
for (j = 0; j < obj->nr_maps; ++j) {
const struct btf_type *type;
map = &obj->maps[j];
if (!bpf_map__is_struct_ops(map))
continue;
type = btf__type_by_id(obj->btf, map->st_ops->type_id);
vlen = btf_vlen(type);
vlen = btf_vlen(map->st_ops->type);
for (k = 0; k < vlen; ++k) {
slot_prog = map->st_ops->progs[k];
if (prog != slot_prog)
@@ -1122,8 +1095,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
int err;
st_ops = map->st_ops;
type = btf__type_by_id(btf, st_ops->type_id);
tname = btf__name_by_offset(btf, type->name_off);
type = st_ops->type;
tname = st_ops->tname;
err = find_struct_ops_kern_types(obj, tname, &mod_btf,
&kern_type, &kern_type_id,
&kern_vtype, &kern_vtype_id,
@@ -1153,7 +1126,6 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
const struct btf_type *mtype, *kern_mtype;
__u32 mtype_id, kern_mtype_id;
void *mdata, *kern_mdata;
struct bpf_program *prog;
__s64 msize, kern_msize;
__u32 moff, kern_moff;
__u32 kern_member_idx;
@@ -1171,28 +1143,18 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
kern_member = find_member_by_name(kern_btf, kern_type, mname);
if (!kern_member) {
if (!libbpf_is_mem_zeroed(mdata, msize)) {
pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
/* Skip all zeros or null fields if they are not
* presented in the kernel BTF.
*/
if (libbpf_is_mem_zeroed(mdata, msize)) {
pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n",
map->name, mname);
return -ENOTSUP;
continue;
}
if (st_ops->progs[i]) {
/* If we had declaratively set struct_ops callback, we need to
* force its autoload to false, because it doesn't have
* a chance of succeeding from POV of the current struct_ops map.
* If this program is still referenced somewhere else, though,
* then bpf_object_adjust_struct_ops_autoload() will update its
* autoload accordingly.
*/
st_ops->progs[i]->autoload = false;
st_ops->progs[i] = NULL;
}
/* Skip all-zero/NULL fields if they are not present in the kernel BTF */
pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n",
pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
map->name, mname);
continue;
return -ENOTSUP;
}
kern_member_idx = kern_member - btf_members(kern_type);
@@ -1218,19 +1180,13 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
}
if (btf_is_ptr(mtype)) {
prog = *(void **)mdata;
/* just like for !kern_member case above, reset declaratively
* set (at compile time) program's autload to false,
* if user replaced it with another program or NULL
*/
if (st_ops->progs[i] && st_ops->progs[i] != prog)
st_ops->progs[i]->autoload = false;
struct bpf_program *prog;
/* Update the value from the shadow type */
prog = *(void **)mdata;
st_ops->progs[i] = prog;
if (!prog)
continue;
if (!is_valid_st_ops_program(obj, prog)) {
pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
map->name, mname);
@@ -1402,7 +1358,6 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
map->def.value_size = type->size;
map->def.max_entries = 1;
map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
map->autoattach = true;
map->st_ops = calloc(1, sizeof(*map->st_ops));
if (!map->st_ops)
@@ -1424,6 +1379,8 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
memcpy(st_ops->data,
data->d_buf + vsi->offset,
type->size);
st_ops->tname = tname;
st_ops->type = type;
st_ops->type_id = type_id;
pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
@@ -2013,20 +1970,6 @@ static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
return NULL;
}
static struct extern_desc *find_extern_by_name_with_len(const struct bpf_object *obj,
const void *name, int len)
{
const char *ext_name;
int i;
for (i = 0; i < obj->nr_extern; i++) {
ext_name = obj->externs[i].name;
if (strlen(ext_name) == len && strncmp(ext_name, name, len) == 0)
return &obj->externs[i];
}
return NULL;
}
static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
char value)
{
@@ -4820,20 +4763,6 @@ int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
return 0;
}
int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach)
{
if (!bpf_map__is_struct_ops(map))
return libbpf_err(-EINVAL);
map->autoattach = autoattach;
return 0;
}
bool bpf_map__autoattach(const struct bpf_map *map)
{
return map->autoattach;
}
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
{
struct bpf_map_info info;
@@ -7410,11 +7339,7 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
__u32 log_level = prog->log_level;
int ret, err;
/* Be more helpful by rejecting programs that can't be validated early
* with more meaningful and actionable error message.
*/
switch (prog->type) {
case BPF_PROG_TYPE_UNSPEC:
if (prog->type == BPF_PROG_TYPE_UNSPEC) {
/*
* The program type must be set. Most likely we couldn't find a proper
* section definition at load time, and thus we didn't infer the type.
@@ -7422,15 +7347,6 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
prog->name, prog->sec_name);
return -EINVAL;
case BPF_PROG_TYPE_STRUCT_OPS:
if (prog->attach_btf_id == 0) {
pr_warn("prog '%s': SEC(\"struct_ops\") program isn't referenced anywhere, did you forget to use it?\n",
prog->name);
return -EINVAL;
}
break;
default:
break;
}
if (!insns || !insns_cnt)
@@ -7905,19 +7821,16 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
}
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
const char *obj_name,
const struct bpf_object_open_opts *opts)
{
const char *kconfig, *btf_tmp_path, *token_path;
const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
struct bpf_object *obj;
char tmp_name[64];
int err;
char *log_buf;
size_t log_size;
__u32 log_level;
if (obj_buf && !obj_name)
return ERR_PTR(-EINVAL);
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("failed to init libelf for %s\n",
path ? : "(mem buf)");
@@ -7927,12 +7840,16 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
if (!OPTS_VALID(opts, bpf_object_open_opts))
return ERR_PTR(-EINVAL);
obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name;
obj_name = OPTS_GET(opts, object_name, NULL);
if (obj_buf) {
if (!obj_name) {
snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
(unsigned long)obj_buf,
(unsigned long)obj_buf_sz);
obj_name = tmp_name;
}
path = obj_name;
pr_debug("loading object '%s' from buffer\n", obj_name);
} else {
pr_debug("loading object from %s\n", path);
}
log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
@@ -8016,7 +7933,9 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
if (!path)
return libbpf_err_ptr(-EINVAL);
return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts));
pr_debug("loading %s\n", path);
return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
}
struct bpf_object *bpf_object__open(const char *path)
@@ -8028,15 +7947,10 @@ struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts)
{
char tmp_name[64];
if (!obj_buf || obj_buf_sz == 0)
return libbpf_err_ptr(-EINVAL);
/* create a (quite useless) default "name" for this memory buffer object */
snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz);
return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts));
return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
}
static int bpf_object_unload(struct bpf_object *obj)
@@ -8072,10 +7986,7 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
return 0;
}
typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
const char *sym_name, void *ctx);
static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
{
char sym_type, sym_name[500];
unsigned long long sym_addr;
@@ -8115,13 +8026,8 @@ static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
struct bpf_object *obj = ctx;
const struct btf_type *t;
struct extern_desc *ext;
char *res;
res = strstr(sym_name, ".llvm.");
if (sym_type == 'd' && res)
ext = find_extern_by_name_with_len(obj, sym_name, res - sym_name);
else
ext = find_extern_by_name(obj, sym_name);
ext = find_extern_by_name(obj, sym_name);
if (!ext || ext->type != EXT_KSYM)
return 0;
@@ -8446,13 +8352,11 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
static void bpf_map_prepare_vdata(const struct bpf_map *map)
{
const struct btf_type *type;
struct bpf_struct_ops *st_ops;
__u32 i;
st_ops = map->st_ops;
type = btf__type_by_id(map->obj->btf, st_ops->type_id);
for (i = 0; i < btf_vlen(type); i++) {
for (i = 0; i < btf_vlen(st_ops->type); i++) {
struct bpf_program *prog = st_ops->progs[i];
void *kern_data;
int prog_fd;
@@ -9345,7 +9249,6 @@ static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_lin
static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_kprobe_session(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
@@ -9362,7 +9265,6 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session),
SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
@@ -9715,7 +9617,6 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
Elf64_Shdr *shdr, Elf_Data *data)
{
const struct btf_type *type;
const struct btf_member *member;
struct bpf_struct_ops *st_ops;
struct bpf_program *prog;
@@ -9775,14 +9676,13 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
}
insn_idx = sym->st_value / BPF_INSN_SZ;
type = btf__type_by_id(btf, st_ops->type_id);
member = find_member_by_offset(type, moff * 8);
member = find_member_by_offset(st_ops->type, moff * 8);
if (!member) {
pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
map->name, moff);
return -EINVAL;
}
member_idx = member - btf_members(type);
member_idx = member - btf_members(st_ops->type);
name = btf__name_by_offset(btf, member->name_off);
if (!resolve_func_ptr(btf, member->type, NULL)) {
@@ -9935,28 +9835,16 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
enum bpf_attach_type attach_type,
int *btf_obj_fd, int *btf_type_id)
{
int ret, i, mod_len;
const char *fn_name, *mod_name = NULL;
int ret, i;
fn_name = strchr(attach_name, ':');
if (fn_name) {
mod_name = attach_name;
mod_len = fn_name - mod_name;
fn_name++;
}
if (!mod_name || strncmp(mod_name, "vmlinux", mod_len) == 0) {
ret = find_attach_btf_id(obj->btf_vmlinux,
mod_name ? fn_name : attach_name,
attach_type);
if (ret > 0) {
*btf_obj_fd = 0; /* vmlinux BTF */
*btf_type_id = ret;
return 0;
}
if (ret != -ENOENT)
return ret;
ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
if (ret > 0) {
*btf_obj_fd = 0; /* vmlinux BTF */
*btf_type_id = ret;
return 0;
}
if (ret != -ENOENT)
return ret;
ret = load_module_btfs(obj);
if (ret)
@@ -9965,12 +9853,7 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
for (i = 0; i < obj->btf_module_cnt; i++) {
const struct module_btf *mod = &obj->btf_modules[i];
if (mod_name && strncmp(mod->name, mod_name, mod_len) != 0)
continue;
ret = find_attach_btf_id(mod->btf,
mod_name ? fn_name : attach_name,
attach_type);
ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
if (ret > 0) {
*btf_obj_fd = mod->fd;
*btf_type_id = ret;
@@ -10380,7 +10263,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
struct bpf_map *
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
{
if (prev == NULL && obj != NULL)
if (prev == NULL)
return obj->maps;
return __bpf_map__iter(prev, obj, 1);
@@ -10389,7 +10272,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
struct bpf_map *
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
{
if (next == NULL && obj != NULL) {
if (next == NULL) {
if (!obj->nr_maps)
return NULL;
return obj->maps + obj->nr_maps - 1;
@@ -11474,14 +11357,13 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
struct kprobe_multi_resolve res = {
.pattern = pattern,
};
enum bpf_attach_type attach_type;
struct bpf_link *link = NULL;
char errmsg[STRERR_BUFSIZE];
const unsigned long *addrs;
int err, link_fd, prog_fd;
bool retprobe, session;
const __u64 *cookies;
const char **syms;
bool retprobe;
size_t cnt;
if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
@@ -11520,12 +11402,6 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
}
retprobe = OPTS_GET(opts, retprobe, false);
session = OPTS_GET(opts, session, false);
if (retprobe && session)
return libbpf_err_ptr(-EINVAL);
attach_type = session ? BPF_TRACE_KPROBE_SESSION : BPF_TRACE_KPROBE_MULTI;
lopts.kprobe_multi.syms = syms;
lopts.kprobe_multi.addrs = addrs;
@@ -11540,7 +11416,7 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
}
link->detach = &bpf_link__detach_fd;
link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts);
link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
if (link_fd < 0) {
err = -errno;
pr_warn("prog '%s': failed to attach: %s\n",
@@ -11637,7 +11513,7 @@ static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, stru
n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
if (n < 1) {
pr_warn("kprobe multi pattern is invalid: %s\n", spec);
pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
return -EINVAL;
}
@@ -11646,32 +11522,6 @@ static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, stru
return libbpf_get_error(*link);
}
static int attach_kprobe_session(const struct bpf_program *prog, long cookie,
struct bpf_link **link)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, .session = true);
const char *spec;
char *pattern;
int n;
*link = NULL;
/* no auto-attach for SEC("kprobe.session") */
if (strcmp(prog->sec_name, "kprobe.session") == 0)
return 0;
spec = prog->sec_name + sizeof("kprobe.session/") - 1;
n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
if (n < 1) {
pr_warn("kprobe session pattern is invalid: %s\n", spec);
return -EINVAL;
}
*link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
free(pattern);
return *link ? 0 : -errno;
}
static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
@@ -12661,12 +12511,6 @@ bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
return bpf_program_attach_fd(prog, netns_fd, "netns", NULL);
}
struct bpf_link *
bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd)
{
return bpf_program_attach_fd(prog, map_fd, "sockmap", NULL);
}
struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
{
/* target_fd/target_ifindex use the same field in LINK_CREATE */
@@ -12921,10 +12765,8 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
__u32 zero = 0;
int err, fd;
if (!bpf_map__is_struct_ops(map)) {
pr_warn("map '%s': can't attach non-struct_ops map\n", map->name);
if (!bpf_map__is_struct_ops(map))
return libbpf_err_ptr(-EINVAL);
}
if (map->fd < 0) {
pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
@@ -13717,15 +13559,14 @@ int libbpf_num_possible_cpus(void)
static int populate_skeleton_maps(const struct bpf_object *obj,
struct bpf_map_skeleton *maps,
size_t map_cnt, size_t map_skel_sz)
size_t map_cnt)
{
int i;
for (i = 0; i < map_cnt; i++) {
struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz;
struct bpf_map **map = map_skel->map;
const char *name = map_skel->name;
void **mmaped = map_skel->mmaped;
struct bpf_map **map = maps[i].map;
const char *name = maps[i].name;
void **mmaped = maps[i].mmaped;
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
@@ -13742,14 +13583,13 @@ static int populate_skeleton_maps(const struct bpf_object *obj,
static int populate_skeleton_progs(const struct bpf_object *obj,
struct bpf_prog_skeleton *progs,
size_t prog_cnt, size_t prog_skel_sz)
size_t prog_cnt)
{
int i;
for (i = 0; i < prog_cnt; i++) {
struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz;
struct bpf_program **prog = prog_skel->prog;
const char *name = prog_skel->name;
struct bpf_program **prog = progs[i].prog;
const char *name = progs[i].name;
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
@@ -13763,24 +13603,40 @@ static int populate_skeleton_progs(const struct bpf_object *obj,
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
const struct bpf_object_open_opts *opts)
{
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
.object_name = s->name,
);
struct bpf_object *obj;
int err;
obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err);
/* Attempt to preserve opts->object_name, unless overriden by user
* explicitly. Overwriting object name for skeletons is discouraged,
* as it breaks global data maps, because they contain object name
* prefix as their own map name prefix. When skeleton is generated,
* bpftool is making an assumption that this name will stay the same.
*/
if (opts) {
memcpy(&skel_opts, opts, sizeof(*opts));
if (!opts->object_name)
skel_opts.object_name = s->name;
}
obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
err = libbpf_get_error(obj);
if (err) {
pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
s->name, err);
return libbpf_err(err);
}
*s->obj = obj;
err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);
err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
if (err) {
pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
return libbpf_err(err);
}
err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);
err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
if (err) {
pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
return libbpf_err(err);
@@ -13810,20 +13666,20 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
return libbpf_err(-errno);
}
err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);
err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);
err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
var_skel = (void *)s->vars + var_idx * s->var_skel_sz;
var_skel = &s->vars[var_idx];
map = *var_skel->map;
map_type_id = bpf_map__btf_value_type_id(map);
map_type = btf__type_by_id(btf, map_type_id);
@@ -13870,11 +13726,10 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
}
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_map *map = *map_skel->map;
struct bpf_map *map = *s->maps[i].map;
size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = map->fd;
void **mmaped = map_skel->mmaped;
void **mmaped = s->maps[i].mmaped;
if (!mmaped)
continue;
@@ -13922,9 +13777,8 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
int i, err;
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
struct bpf_program *prog = *prog_skel->prog;
struct bpf_link **link = prog_skel->link;
struct bpf_program *prog = *s->progs[i].prog;
struct bpf_link **link = s->progs[i].link;
if (!prog->autoload || !prog->autoattach)
continue;
@@ -13956,38 +13810,6 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
*/
}
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_map *map = *map_skel->map;
struct bpf_link **link;
if (!map->autocreate || !map->autoattach)
continue;
/* only struct_ops maps can be attached */
if (!bpf_map__is_struct_ops(map))
continue;
/* skeleton is created with earlier version of bpftool, notify user */
if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) {
pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n",
bpf_map__name(map));
continue;
}
link = map_skel->link;
if (*link)
continue;
*link = bpf_map__attach_struct_ops(map);
if (!*link) {
err = -errno;
pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err);
return libbpf_err(err);
}
}
return 0;
}
@@ -13996,25 +13818,11 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
int i;
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
struct bpf_link **link = prog_skel->link;
struct bpf_link **link = s->progs[i].link;
bpf_link__destroy(*link);
*link = NULL;
}
if (s->map_skel_sz < sizeof(struct bpf_map_skeleton))
return;
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_link **link = map_skel->link;
if (link) {
bpf_link__destroy(*link);
*link = NULL;
}
}
}
void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
@@ -14022,7 +13830,8 @@ void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
if (!s)
return;
bpf_object__detach_skeleton(s);
if (s->progs)
bpf_object__detach_skeleton(s);
if (s->obj)
bpf_object__close(*s->obj);
free(s->maps);

View File

@@ -98,10 +98,7 @@ typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
/**
* @brief **libbpf_set_print()** sets user-provided log callback function to
* be used for libbpf warnings and informational messages. If the user callback
* is not set, messages are logged to stderr by default. The verbosity of these
* messages can be controlled by setting the environment variable
* LIBBPF_LOG_LEVEL to either warn, info, or debug.
* be used for libbpf warnings and informational messages.
* @param fn The log print function. If NULL, libbpf won't print anything.
* @return Pointer to old print function.
*
@@ -542,12 +539,10 @@ struct bpf_kprobe_multi_opts {
size_t cnt;
/* create return kprobes */
bool retprobe;
/* create session kprobes */
bool session;
size_t :0;
};
#define bpf_kprobe_multi_opts__last_field session
#define bpf_kprobe_multi_opts__last_field retprobe
LIBBPF_API struct bpf_link *
bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
@@ -800,8 +795,6 @@ bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd);
LIBBPF_API struct bpf_link *
bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd);
LIBBPF_API struct bpf_link *
bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd);
LIBBPF_API struct bpf_link *
bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex);
LIBBPF_API struct bpf_link *
bpf_program__attach_freplace(const struct bpf_program *prog,
@@ -978,23 +971,6 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
/**
* @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach
* map during BPF skeleton attach phase.
* @param map the BPF map instance
* @param autoattach whether to attach map during BPF skeleton attach phase
* @return 0 on success; negative error code, otherwise
*/
LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach);
/**
* @brief **bpf_map__autoattach()** returns whether BPF map is configured to
* auto-attach during BPF skeleton attach phase.
* @param map the BPF map instance
* @return true if map is set to auto-attach during skeleton attach phase; false, otherwise
*/
LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map);
/**
* @brief **bpf_map__fd()** gets the file descriptor of the passed
* BPF map
@@ -1317,7 +1293,6 @@ LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd,
ring_buffer_sample_fn sample_cb, void *ctx);
LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb);
LIBBPF_API int ring_buffer__consume_n(struct ring_buffer *rb, size_t n);
LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb);
/**
@@ -1392,17 +1367,6 @@ LIBBPF_API int ring__map_fd(const struct ring *r);
*/
LIBBPF_API int ring__consume(struct ring *r);
/**
* @brief **ring__consume_n()** consumes up to a requested amount of items from
* a ringbuffer without event polling.
*
* @param r A ringbuffer object.
* @param n Maximum amount of items to consume.
* @return The number of items consumed, or a negative number if any of the
* callbacks return an error.
*/
LIBBPF_API int ring__consume_n(struct ring *r, size_t n);
struct user_ring_buffer_opts {
size_t sz; /* size of this struct, for forward/backward compatibility */
};
@@ -1689,7 +1653,6 @@ struct bpf_map_skeleton {
const char *name;
struct bpf_map **map;
void **mmaped;
struct bpf_link **link;
};
struct bpf_prog_skeleton {

View File

@@ -416,14 +416,3 @@ LIBBPF_1.4.0 {
btf__new_split;
btf_ext__raw_data;
} LIBBPF_1.3.0;
LIBBPF_1.5.0 {
global:
btf__distill_base;
btf__relocate;
bpf_map__autoattach;
bpf_map__set_autoattach;
bpf_program__attach_sockmap;
ring__consume_n;
ring_buffer__consume_n;
} LIBBPF_1.4.0;

View File

@@ -234,9 +234,6 @@ struct btf_type;
struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);
const char *btf_kind_str(const struct btf_type *t);
const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
const struct btf_header *btf_header(const struct btf *btf);
void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map);
static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
{
@@ -511,38 +508,21 @@ struct bpf_line_info_min {
__u32 line_col;
};
enum btf_field_iter_kind {
BTF_FIELD_ITER_IDS,
BTF_FIELD_ITER_STRS,
};
struct btf_field_desc {
/* once-per-type offsets */
int t_off_cnt, t_offs[2];
/* member struct size, or zero, if no members */
int m_sz;
/* repeated per-member offsets */
int m_off_cnt, m_offs[1];
};
struct btf_field_iter {
struct btf_field_desc desc;
void *p;
int m_idx;
int off_idx;
int vlen;
};
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind);
__u32 *btf_field_iter_next(struct btf_field_iter *it);
typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
__u32 kind);
typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
const char *sym_name, void *ctx);
int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *arg);
/* handle direct returned errors */
static inline int libbpf_err(int ret)
{
@@ -622,9 +602,13 @@ static inline int ensure_good_fd(int fd)
return fd;
}
static inline int sys_dup3(int oldfd, int newfd, int flags)
static inline int sys_dup2(int oldfd, int newfd)
{
return syscall(__NR_dup3, oldfd, newfd, flags);
#ifdef __NR_dup2
return syscall(__NR_dup2, oldfd, newfd);
#else
return syscall(__NR_dup3, oldfd, newfd, 0);
#endif
}
/* Point *fixed_fd* to the same file that *tmp_fd* points to.
@@ -635,7 +619,7 @@ static inline int reuse_fd(int fixed_fd, int tmp_fd)
{
int err;
err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC);
err = sys_dup2(tmp_fd, fixed_fd);
err = err < 0 ? -errno : 0;
close(tmp_fd); /* clean up temporary FD */
return err;

View File

@@ -451,8 +451,7 @@ int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helpe
/* If BPF verifier doesn't recognize BPF helper ID (enum bpf_func_id)
* at all, it will emit something like "invalid func unknown#181".
* If BPF verifier recognizes BPF helper but it's not supported for
* given BPF program type, it will emit "unknown func bpf_sys_bpf#166"
* or "program of this type cannot use helper bpf_sys_bpf#166".
* given BPF program type, it will emit "unknown func bpf_sys_bpf#166".
* In both cases, provided combination of BPF program type and BPF
* helper is not supported by the kernel.
* In all other cases, probe_prog_load() above will either succeed (e.g.,
@@ -461,8 +460,7 @@ int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helpe
* that), or we'll get some more specific BPF verifier error about
* some unsatisfied conditions.
*/
if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func ") ||
strstr(buf, "program of this type cannot use helper ")))
if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func ")))
return 0;
return 1; /* assume supported */
}

View File

@@ -4,6 +4,6 @@
#define __LIBBPF_VERSION_H
#define LIBBPF_MAJOR_VERSION 1
#define LIBBPF_MINOR_VERSION 5
#define LIBBPF_MINOR_VERSION 4
#endif /* __LIBBPF_VERSION_H */

View File

@@ -957,33 +957,19 @@ static int check_btf_str_off(__u32 *str_off, void *ctx)
static int linker_sanity_check_btf(struct src_obj *obj)
{
struct btf_type *t;
int i, n, err;
int i, n, err = 0;
if (!obj->btf)
return 0;
n = btf__type_cnt(obj->btf);
for (i = 1; i < n; i++) {
struct btf_field_iter it;
__u32 *type_id, *str_off;
t = btf_type_by_id(obj->btf, i);
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf);
if (err)
return err;
while ((type_id = btf_field_iter_next(&it))) {
if (*type_id >= n)
return -EINVAL;
}
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
if (err)
return err;
while ((str_off = btf_field_iter_next(&it))) {
if (!btf__str_by_offset(obj->btf, *str_off))
return -EINVAL;
}
}
return 0;
@@ -2227,17 +2213,10 @@ static int linker_fixup_btf(struct src_obj *obj)
vi = btf_var_secinfos(t);
for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type);
const char *var_name;
int var_linkage;
const char *var_name = btf__str_by_offset(obj->btf, vt->name_off);
int var_linkage = btf_var(vt)->linkage;
Elf64_Sym *sym;
/* could be a variable or function */
if (!btf_is_var(vt))
continue;
var_name = btf__str_by_offset(obj->btf, vt->name_off);
var_linkage = btf_var(vt)->linkage;
/* no need to patch up static or extern vars */
if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)
continue;
@@ -2255,10 +2234,26 @@ static int linker_fixup_btf(struct src_obj *obj)
return 0;
}
static int remap_type_id(__u32 *type_id, void *ctx)
{
int *id_map = ctx;
int new_id = id_map[*type_id];
/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
if (new_id == 0 && *type_id != 0) {
pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id);
return -EINVAL;
}
*type_id = id_map[*type_id];
return 0;
}
static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
{
const struct btf_type *t;
int i, j, n, start_id, id, err;
int i, j, n, start_id, id;
const char *name;
if (!obj->btf)
@@ -2329,25 +2324,9 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
n = btf__type_cnt(linker->btf);
for (i = start_id; i < n; i++) {
struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
struct btf_field_iter it;
__u32 *type_id;
err = btf_field_iter_init(&it, dst_t, BTF_FIELD_ITER_IDS);
if (err)
return err;
while ((type_id = btf_field_iter_next(&it))) {
int new_id = obj->btf_type_map[*type_id];
/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
if (new_id == 0 && *type_id != 0) {
pr_warn("failed to find new ID mapping for original BTF type ID %u\n",
*type_id);
return -EINVAL;
}
*type_id = obj->btf_type_map[*type_id];
}
if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
return -EINVAL;
}
/* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's

View File

@@ -231,7 +231,7 @@ static inline int roundup_len(__u32 len)
return (len + 7) / 8 * 8;
}
static int64_t ringbuf_process_ring(struct ring *r, size_t n)
static int64_t ringbuf_process_ring(struct ring *r)
{
int *len_ptr, len, err;
/* 64-bit to avoid overflow in case of extreme application behavior */
@@ -268,42 +268,12 @@ static int64_t ringbuf_process_ring(struct ring *r, size_t n)
}
smp_store_release(r->consumer_pos, cons_pos);
if (cnt >= n)
goto done;
}
} while (got_new_data);
done:
return cnt;
}
/* Consume available ring buffer(s) data without event polling, up to n
* records.
*
* Returns number of records consumed across all registered ring buffers (or
* n, whichever is less), or negative number if any of the callbacks return
* error.
*/
int ring_buffer__consume_n(struct ring_buffer *rb, size_t n)
{
int64_t err, res = 0;
int i;
for (i = 0; i < rb->ring_cnt; i++) {
struct ring *ring = rb->rings[i];
err = ringbuf_process_ring(ring, n);
if (err < 0)
return libbpf_err(err);
res += err;
n -= err;
if (n == 0)
break;
}
return res > INT_MAX ? INT_MAX : res;
}
/* Consume available ring buffer(s) data without event polling.
* Returns number of records consumed across all registered ring buffers (or
* INT_MAX, whichever is less), or negative number if any of the callbacks
@@ -317,15 +287,13 @@ int ring_buffer__consume(struct ring_buffer *rb)
for (i = 0; i < rb->ring_cnt; i++) {
struct ring *ring = rb->rings[i];
err = ringbuf_process_ring(ring, INT_MAX);
err = ringbuf_process_ring(ring);
if (err < 0)
return libbpf_err(err);
res += err;
if (res > INT_MAX) {
res = INT_MAX;
break;
}
}
if (res > INT_MAX)
return INT_MAX;
return res;
}
@@ -346,13 +314,13 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
__u32 ring_id = rb->events[i].data.fd;
struct ring *ring = rb->rings[ring_id];
err = ringbuf_process_ring(ring, INT_MAX);
err = ringbuf_process_ring(ring);
if (err < 0)
return libbpf_err(err);
res += err;
}
if (res > INT_MAX)
res = INT_MAX;
return INT_MAX;
return res;
}
@@ -403,22 +371,17 @@ int ring__map_fd(const struct ring *r)
return r->map_fd;
}
int ring__consume_n(struct ring *r, size_t n)
int ring__consume(struct ring *r)
{
int64_t res;
res = ringbuf_process_ring(r, n);
res = ringbuf_process_ring(r);
if (res < 0)
return libbpf_err(res);
return res > INT_MAX ? INT_MAX : res;
}
int ring__consume(struct ring *r)
{
return ring__consume_n(r, INT_MAX);
}
static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
{
if (rb->consumer_pos) {

View File

@@ -2,7 +2,6 @@
#undef _GNU_SOURCE
#include <string.h>
#include <stdio.h>
#include <errno.h>
#include "str_error.h"
/* make sure libbpf doesn't use kernel-only integer typedefs */
@@ -16,18 +15,7 @@
char *libbpf_strerror_r(int err, char *dst, int len)
{
int ret = strerror_r(err < 0 ? -err : err, dst, len);
/* on glibc <2.13, ret == -1 and errno is set, if strerror_r() can't
* handle the error, on glibc >=2.13 *positive* (errno-like) error
* code is returned directly
*/
if (ret == -1)
ret = errno;
if (ret) {
if (ret == EINVAL)
/* strerror_r() doesn't recognize this specific error */
snprintf(dst, len, "unknown error (%d)", err < 0 ? err : -err);
else
snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
}
if (ret)
snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
return dst;
}

View File

@@ -214,18 +214,18 @@ long bpf_usdt_cookie(struct pt_regs *ctx)
/* we rely on ___bpf_apply() and ___bpf_narg() macros already defined in bpf_tracing.h */
#define ___bpf_usdt_args0() ctx
#define ___bpf_usdt_args1(x) ___bpf_usdt_args0(), ({ long _x; bpf_usdt_arg(ctx, 0, &_x); _x; })
#define ___bpf_usdt_args2(x, args...) ___bpf_usdt_args1(args), ({ long _x; bpf_usdt_arg(ctx, 1, &_x); _x; })
#define ___bpf_usdt_args3(x, args...) ___bpf_usdt_args2(args), ({ long _x; bpf_usdt_arg(ctx, 2, &_x); _x; })
#define ___bpf_usdt_args4(x, args...) ___bpf_usdt_args3(args), ({ long _x; bpf_usdt_arg(ctx, 3, &_x); _x; })
#define ___bpf_usdt_args5(x, args...) ___bpf_usdt_args4(args), ({ long _x; bpf_usdt_arg(ctx, 4, &_x); _x; })
#define ___bpf_usdt_args6(x, args...) ___bpf_usdt_args5(args), ({ long _x; bpf_usdt_arg(ctx, 5, &_x); _x; })
#define ___bpf_usdt_args7(x, args...) ___bpf_usdt_args6(args), ({ long _x; bpf_usdt_arg(ctx, 6, &_x); _x; })
#define ___bpf_usdt_args8(x, args...) ___bpf_usdt_args7(args), ({ long _x; bpf_usdt_arg(ctx, 7, &_x); _x; })
#define ___bpf_usdt_args9(x, args...) ___bpf_usdt_args8(args), ({ long _x; bpf_usdt_arg(ctx, 8, &_x); _x; })
#define ___bpf_usdt_args10(x, args...) ___bpf_usdt_args9(args), ({ long _x; bpf_usdt_arg(ctx, 9, &_x); _x; })
#define ___bpf_usdt_args11(x, args...) ___bpf_usdt_args10(args), ({ long _x; bpf_usdt_arg(ctx, 10, &_x); _x; })
#define ___bpf_usdt_args12(x, args...) ___bpf_usdt_args11(args), ({ long _x; bpf_usdt_arg(ctx, 11, &_x); _x; })
#define ___bpf_usdt_args1(x) ___bpf_usdt_args0(), ({ long _x; bpf_usdt_arg(ctx, 0, &_x); (void *)_x; })
#define ___bpf_usdt_args2(x, args...) ___bpf_usdt_args1(args), ({ long _x; bpf_usdt_arg(ctx, 1, &_x); (void *)_x; })
#define ___bpf_usdt_args3(x, args...) ___bpf_usdt_args2(args), ({ long _x; bpf_usdt_arg(ctx, 2, &_x); (void *)_x; })
#define ___bpf_usdt_args4(x, args...) ___bpf_usdt_args3(args), ({ long _x; bpf_usdt_arg(ctx, 3, &_x); (void *)_x; })
#define ___bpf_usdt_args5(x, args...) ___bpf_usdt_args4(args), ({ long _x; bpf_usdt_arg(ctx, 4, &_x); (void *)_x; })
#define ___bpf_usdt_args6(x, args...) ___bpf_usdt_args5(args), ({ long _x; bpf_usdt_arg(ctx, 5, &_x); (void *)_x; })
#define ___bpf_usdt_args7(x, args...) ___bpf_usdt_args6(args), ({ long _x; bpf_usdt_arg(ctx, 6, &_x); (void *)_x; })
#define ___bpf_usdt_args8(x, args...) ___bpf_usdt_args7(args), ({ long _x; bpf_usdt_arg(ctx, 7, &_x); (void *)_x; })
#define ___bpf_usdt_args9(x, args...) ___bpf_usdt_args8(args), ({ long _x; bpf_usdt_arg(ctx, 8, &_x); (void *)_x; })
#define ___bpf_usdt_args10(x, args...) ___bpf_usdt_args9(args), ({ long _x; bpf_usdt_arg(ctx, 9, &_x); (void *)_x; })
#define ___bpf_usdt_args11(x, args...) ___bpf_usdt_args10(args), ({ long _x; bpf_usdt_arg(ctx, 10, &_x); (void *)_x; })
#define ___bpf_usdt_args12(x, args...) ___bpf_usdt_args11(args), ({ long _x; bpf_usdt_arg(ctx, 11, &_x); (void *)_x; })
#define ___bpf_usdt_args(args...) ___bpf_apply(___bpf_usdt_args, ___bpf_narg(args))(args)
/*