Merge branch 'libbpf:master' into master

This commit is contained in:
thiagoftsm
2022-08-22 19:29:03 +00:00
committed by GitHub
65 changed files with 75630 additions and 133503 deletions

View File

@@ -6,9 +6,9 @@ THISDIR="$(cd $(dirname $0) && pwd)"
source ${THISDIR}/helpers.sh source ${THISDIR}/helpers.sh
travis_fold start prepare_selftests "Building selftests" foldable start prepare_selftests "Building selftests"
LLVM_VER=15 LLVM_VER=16
LIBBPF_PATH="${REPO_ROOT}" LIBBPF_PATH="${REPO_ROOT}"
PREPARE_SELFTESTS_SCRIPT=${THISDIR}/prepare_selftests-${KERNEL}.sh PREPARE_SELFTESTS_SCRIPT=${THISDIR}/prepare_selftests-${KERNEL}.sh
@@ -39,4 +39,4 @@ cd ${LIBBPF_PATH}
rm selftests/bpf/.gitignore rm selftests/bpf/.gitignore
git add selftests git add selftests
travis_fold end prepare_selftests foldable end prepare_selftests

View File

@@ -1,26 +1,18 @@
# $1 - start or end # $1 - start or end
# $2 - fold identifier, no spaces # $2 - fold identifier, no spaces
# $3 - fold section description # $3 - fold section description
travis_fold() { foldable() {
local YELLOW='\033[1;33m' local YELLOW='\033[1;33m'
local NOCOLOR='\033[0m' local NOCOLOR='\033[0m'
if [ -z ${GITHUB_WORKFLOW+x} ]; then if [ $1 = "start" ]; then
echo travis_fold:$1:$2 line="::group::$2"
if [ ! -z "${3:-}" ]; then if [ ! -z "${3:-}" ]; then
echo -e "${YELLOW}$3${NOCOLOR}" line="$line - ${YELLOW}$3${NOCOLOR}"
fi fi
echo
else else
if [ $1 = "start" ]; then line="::endgroup::"
line="::group::$2"
if [ ! -z "${3:-}" ]; then
line="$line - ${YELLOW}$3${NOCOLOR}"
fi
else
line="::endgroup::"
fi
echo -e "$line"
fi fi
echo -e "$line"
} }
__print() { __print() {

File diff suppressed because it is too large Load Diff

View File

@@ -16,7 +16,7 @@ inputs:
runs: runs:
using: "composite" using: "composite"
steps: steps:
# setup envinronment # setup environment
- name: Setup environment - name: Setup environment
uses: libbpf/ci/setup-build-env@master uses: libbpf/ci/setup-build-env@master
with: with:
@@ -41,9 +41,10 @@ runs:
- name: Prepare to build BPF selftests - name: Prepare to build BPF selftests
shell: bash shell: bash
run: | run: |
echo "::group::Prepare buidling selftest" echo "::group::Prepare building selftest"
cd .kernel cd .kernel
cp ${{ github.workspace }}/travis-ci/vmtest/configs/config-latest.${{ inputs.arch }} .config cat tools/testing/selftests/bpf/config \
tools/testing/selftests/bpf/config.${{ inputs.arch }} > .config
make olddefconfig && make prepare make olddefconfig && make prepare
cd - cd -
echo "::endgroup::" echo "::endgroup::"
@@ -78,6 +79,7 @@ runs:
kernel: ${{ inputs.kernel }} kernel: ${{ inputs.kernel }}
project-name: 'libbpf' project-name: 'libbpf'
arch: ${{ inputs.arch }} arch: ${{ inputs.arch }}
image-output: '/tmp/root.img'
# 5. run selftest in QEMU # 5. run selftest in QEMU
- name: Run selftests - name: Run selftests
uses: libbpf/ci/run-qemu@master uses: libbpf/ci/run-qemu@master

View File

@@ -32,7 +32,7 @@ jobs:
- name: gcc-10 ASan+UBSan - name: gcc-10 ASan+UBSan
target: RUN_GCC10_ASAN target: RUN_GCC10_ASAN
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
name: Checkout name: Checkout
- uses: ./.github/actions/setup - uses: ./.github/actions/setup
name: Setup name: Setup
@@ -53,7 +53,7 @@ jobs:
- arch: s390x - arch: s390x
- arch: x86 - arch: x86
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
name: Checkout name: Checkout
- uses: ./.github/actions/setup - uses: ./.github/actions/setup
name: Pre-Setup name: Pre-Setup

View File

@@ -11,7 +11,7 @@ jobs:
if: github.repository == 'libbpf/libbpf' if: github.repository == 'libbpf/libbpf'
name: Coverity name: Coverity
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: ./.github/actions/setup - uses: ./.github/actions/setup
- name: Run coverity - name: Run coverity
run: | run: |

View File

@@ -25,7 +25,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: vmtest with customized pahole/Kernel name: vmtest with customized pahole/Kernel
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: ./.github/actions/setup - uses: ./.github/actions/setup
- uses: ./.github/actions/vmtest - uses: ./.github/actions/vmtest
with: with:

View File

@@ -12,7 +12,7 @@ jobs:
env: env:
STAGING: tmp.master STAGING: tmp.master
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: ./.github/actions/setup - uses: ./.github/actions/setup
- uses: ./.github/actions/vmtest - uses: ./.github/actions/vmtest
with: with:

View File

@@ -31,7 +31,7 @@ jobs:
runs_on: z15 runs_on: z15
arch: 's390x' arch: 's390x'
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
name: Checkout name: Checkout
- uses: ./.github/actions/setup - uses: ./.github/actions/setup
name: Setup name: Setup

View File

@@ -1 +1 @@
d28b25a62a47a8c8aa19bd543863aab6717e68c9 14b20b784f59bdd95f6f1cfb112c9818bcec4d84

View File

@@ -1 +1 @@
b0d93b44641a83c28014ca38001e85bf6dc8501e e34cfee65ec891a319ce79797dda18083af33a76

View File

@@ -1,17 +1,29 @@
This is a mirror of [bpf-next Linux source <img src="https://user-images.githubusercontent.com/508075/185997470-2f427d3d-f040-4eef-afc5-ae4f766615b2.png" width="40%" >
tree](https://kernel.googlesource.com/pub/scm/linux/kernel/git/bpf/bpf-next)'s
`tools/lib/bpf` directory plus its supporting header files.
All the gory details of syncing can be found in `scripts/sync-kernel.sh` libbpf
script. [![Github Actions Builds & Tests](https://github.com/libbpf/libbpf/actions/workflows/test.yml/badge.svg)](https://github.com/libbpf/libbpf/actions/workflows/test.yml)
[![Total alerts](https://img.shields.io/lgtm/alerts/g/libbpf/libbpf.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/libbpf/libbpf/alerts/)
[![Coverity](https://img.shields.io/coverity/scan/18195.svg)](https://scan.coverity.com/projects/libbpf)
[![OSS-Fuzz Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/libbpf.svg)](https://oss-fuzz-build-logs.storage.googleapis.com/index.html#libbpf)
======
Some header files in this repo (`include/linux/*.h`) are reduced versions of **This is the official home of the libbpf library.**
their counterpart files at
[bpf-next](https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/)'s
`tools/include/linux/*.h` to make compilation successful.
BPF/libbpf usage and questions *Please use this Github repository for building and packaging libbpf
============================== and when using it in your projects through Git submodule.*
Libbpf *authoritative source code* is developed as part of [bpf-next Linux source
tree](https://kernel.googlesource.com/pub/scm/linux/kernel/git/bpf/bpf-next) under
`tools/lib/bpf` subdirectory and is periodically synced to Github. As such, all the
libbpf changes should be sent to [BPF mailing list](http://vger.kernel.org/vger-lists.html#bpf),
please don't open PRs here unless you are changing Github-specific parts of libbpf
(e.g., Github-specific Makefile).
Libbpf and general BPF usage questions
======================================
Libbpf documentation can be found [here](https://libbpf.readthedocs.io/en/latest/api.html).
It's an ongoing effort and has ways to go, but please take a look and consider contributing as well.
Please check out [libbpf-bootstrap](https://github.com/libbpf/libbpf-bootstrap) Please check out [libbpf-bootstrap](https://github.com/libbpf/libbpf-bootstrap)
and [the companion blog post](https://nakryiko.com/posts/libbpf-bootstrap/) for and [the companion blog post](https://nakryiko.com/posts/libbpf-bootstrap/) for
@@ -36,12 +48,8 @@ to help you with whatever issue you have. This repository's PRs and issues
should be opened only for dealing with issues pertaining to specific way this should be opened only for dealing with issues pertaining to specific way this
libbpf mirror repo is set up and organized. libbpf mirror repo is set up and organized.
Build Building libbpf
[![Github Actions Builds & Tests](https://github.com/libbpf/libbpf/actions/workflows/test.yml/badge.svg)](https://github.com/libbpf/libbpf/actions/workflows/test.yml) ===============
[![Total alerts](https://img.shields.io/lgtm/alerts/g/libbpf/libbpf.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/libbpf/libbpf/alerts/)
[![Coverity](https://img.shields.io/coverity/scan/18195.svg)](https://scan.coverity.com/projects/libbpf)
[![OSS-Fuzz Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/libbpf.svg)](https://oss-fuzz-build-logs.storage.googleapis.com/index.html#libbpf)
=====
libelf is an internal dependency of libbpf and thus it is required to link libelf is an internal dependency of libbpf and thus it is required to link
against and must be installed on the system for applications to work. against and must be installed on the system for applications to work.
pkg-config is used by default to find libelf, and the program called can be pkg-config is used by default to find libelf, and the program called can be
@@ -156,6 +164,18 @@ Package dependencies of libbpf, package names may vary across distros:
[![libbpf distro packaging status](https://repology.org/badge/vertical-allrepos/libbpf.svg)](https://repology.org/project/libbpf/versions) [![libbpf distro packaging status](https://repology.org/badge/vertical-allrepos/libbpf.svg)](https://repology.org/project/libbpf/versions)
bpf-next to Github sync
=======================
All the gory details of syncing can be found in `scripts/sync-kernel.sh`
script.
Some header files in this repo (`include/linux/*.h`) are reduced versions of
their counterpart files at
[bpf-next](https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/)'s
`tools/include/linux/*.h` to make compilation successful.
License License
======= =======

View File

@@ -76,7 +76,7 @@ for phase in "${PHASES[@]}"; do
info "install" info "install"
docker_exec make -j$((4*$(nproc))) -C src OBJDIR=../build DESTDIR=../install install docker_exec make -j$((4*$(nproc))) -C src OBJDIR=../build DESTDIR=../install install
info "link binary" info "link binary"
docker_exec bash -c "EXTRA_CFLAGS=\"${EXTRA_CFLAGS}\" EXTRA_LDFLAGS=\"${EXTRA_LDFLAGS}\" ./travis-ci/managers/test_compile.sh" docker_exec bash -c "EXTRA_CFLAGS=\"${EXTRA_CFLAGS}\" EXTRA_LDFLAGS=\"${EXTRA_LDFLAGS}\" ./ci/managers/test_compile.sh"
;; ;;
CLEANUP) CLEANUP)
info "Cleanup phase" info "Cleanup phase"

View File

@@ -71,6 +71,7 @@ sk_lookup # v5.9+
sk_storage_tracing # missing bpf_sk_storage_get() helper sk_storage_tracing # missing bpf_sk_storage_get() helper
skb_ctx # ctx_{size, }_{in, out} in BPF_PROG_TEST_RUN is missing skb_ctx # ctx_{size, }_{in, out} in BPF_PROG_TEST_RUN is missing
skb_helpers # helpers added in 5.8+ skb_helpers # helpers added in 5.8+
skeleton # creates too big ARRAY map
snprintf # v5.13+ snprintf # v5.13+
snprintf_btf # v5.10+ snprintf_btf # v5.10+
sock_fields # v5.10+ sock_fields # v5.10+

View File

@@ -65,3 +65,4 @@ send_signal # intermittently fails to receive signa
select_reuseport # intermittently fails on new s390x setup select_reuseport # intermittently fails on new s390x setup
xdp_synproxy # JIT does not support calling kernel function (kfunc) xdp_synproxy # JIT does not support calling kernel function (kfunc)
unpriv_bpf_disabled # fentry unpriv_bpf_disabled # fentry
lru_bug

View File

@@ -1,26 +1,18 @@
# $1 - start or end # $1 - start or end
# $2 - fold identifier, no spaces # $2 - fold identifier, no spaces
# $3 - fold section description # $3 - fold section description
travis_fold() { foldable() {
local YELLOW='\033[1;33m' local YELLOW='\033[1;33m'
local NOCOLOR='\033[0m' local NOCOLOR='\033[0m'
if [ -z ${GITHUB_WORKFLOW+x} ]; then if [ $1 = "start" ]; then
echo travis_fold:$1:$2 line="::group::$2"
if [ ! -z "${3:-}" ]; then if [ ! -z "${3:-}" ]; then
echo -e "${YELLOW}$3${NOCOLOR}" line="$line - ${YELLOW}$3${NOCOLOR}"
fi fi
echo
else else
if [ $1 = "start" ]; then line="::endgroup::"
line="::group::$2"
if [ ! -z "${3:-}" ]; then
line="$line - ${YELLOW}$3${NOCOLOR}"
fi
else
line="::endgroup::"
fi
echo -e "$line"
fi fi
echo -e "$line"
} }
__print() { __print() {

76
ci/vmtest/run_selftests.sh Executable file
View File

@@ -0,0 +1,76 @@
#!/bin/bash
set -euo pipefail
source $(cd $(dirname $0) && pwd)/helpers.sh
ARCH=$(uname -m)
STATUS_FILE=/exitstatus
read_lists() {
(for path in "$@"; do
if [[ -s "$path" ]]; then
cat "$path"
fi;
done) | cut -d'#' -f1 | tr -s ' \t\n' ','
}
test_progs() {
if [[ "${KERNEL}" != '4.9.0' ]]; then
foldable start test_progs "Testing test_progs"
# "&& true" does not change the return code (it is not executed
# if the Python script fails), but it prevents exiting on a
# failure due to the "set -e".
./test_progs ${DENYLIST:+-d$DENYLIST} ${ALLOWLIST:+-a$ALLOWLIST} && true
echo "test_progs:$?" >> "${STATUS_FILE}"
foldable end test_progs
fi
}
test_progs_noalu() {
foldable start test_progs-no_alu32 "Testing test_progs-no_alu32"
./test_progs-no_alu32 ${DENYLIST:+-d$DENYLIST} ${ALLOWLIST:+-a$ALLOWLIST} && true
echo "test_progs-no_alu32:$?" >> "${STATUS_FILE}"
foldable end test_progs-no_alu32
}
test_maps() {
if [[ "${KERNEL}" == 'latest' ]]; then
foldable start test_maps "Testing test_maps"
./test_maps && true
echo "test_maps:$?" >> "${STATUS_FILE}"
foldable end test_maps
fi
}
test_verifier() {
if [[ "${KERNEL}" == 'latest' ]]; then
foldable start test_verifier "Testing test_verifier"
./test_verifier && true
echo "test_verifier:$?" >> "${STATUS_FILE}"
foldable end test_verifier
fi
}
foldable end vm_init
configs_path=${PROJECT_NAME}/vmtest/configs
DENYLIST=$(read_lists "$configs_path/DENYLIST-${KERNEL}" "$configs_path/DENYLIST-${KERNEL}.${ARCH}")
ALLOWLIST=$(read_lists "$configs_path/ALLOWLIST-${KERNEL}" "$configs_path/ALLOWLIST-${KERNEL}.${ARCH}")
echo "DENYLIST: ${DENYLIST}"
echo "ALLOWLIST: ${ALLOWLIST}"
cd ${PROJECT_NAME}/selftests/bpf
if [ $# -eq 0 ]; then
test_progs
test_progs_noalu
test_maps
test_verifier
else
for test_name in "$@"; do
"${test_name}"
done
fi

View File

@@ -9,8 +9,8 @@ described here. It's recommended to follow these conventions whenever a
new function or type is added to keep libbpf API clean and consistent. new function or type is added to keep libbpf API clean and consistent.
All types and functions provided by libbpf API should have one of the All types and functions provided by libbpf API should have one of the
following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``, following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``btf_dump_``,
``btf_dump_``, ``ring_buffer_``, ``perf_buffer_``. ``ring_buffer_``, ``perf_buffer_``.
System call wrappers System call wrappers
-------------------- --------------------
@@ -59,15 +59,6 @@ Auxiliary functions and types that don't fit well in any of categories
described above should have ``libbpf_`` prefix, e.g. described above should have ``libbpf_`` prefix, e.g.
``libbpf_get_error`` or ``libbpf_prog_type_by_name``. ``libbpf_get_error`` or ``libbpf_prog_type_by_name``.
AF_XDP functions
-------------------
AF_XDP functions should have an ``xsk_`` prefix, e.g.
``xsk_umem__get_data`` or ``xsk_umem__create``. The interface consists
of both low-level ring access functions and high-level configuration
functions. These can be mixed and matched. Note that these functions
are not reentrant for performance reasons.
ABI ABI
--- ---

View File

@@ -2361,7 +2361,8 @@ union bpf_attr {
* Pull in non-linear data in case the *skb* is non-linear and not * Pull in non-linear data in case the *skb* is non-linear and not
* all of *len* are part of the linear section. Make *len* bytes * all of *len* are part of the linear section. Make *len* bytes
* from *skb* readable and writable. If a zero value is passed for * from *skb* readable and writable. If a zero value is passed for
* *len*, then the whole length of the *skb* is pulled. * *len*, then all bytes in the linear part of *skb* will be made
* readable and writable.
* *
* This helper is only needed for reading and writing with direct * This helper is only needed for reading and writing with direct
* packet access. * packet access.
@@ -2572,10 +2573,12 @@ union bpf_attr {
* There are two supported modes at this time: * There are two supported modes at this time:
* *
* * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
* (room space is added or removed below the layer 2 header). * (room space is added or removed between the layer 2 and
* layer 3 headers).
* *
* * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
* (room space is added or removed below the layer 3 header). * (room space is added or removed between the layer 3 and
* layer 4 headers).
* *
* The following flags are supported at this time: * The following flags are supported at this time:
* *
@@ -3007,8 +3010,18 @@ union bpf_attr {
* **BPF_F_USER_STACK** * **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack. * Collect a user space stack instead of a kernel stack.
* **BPF_F_USER_BUILD_ID** * **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack, * Collect (build_id, file_offset) instead of ips for user
* only valid if **BPF_F_USER_STACK** is also specified. * stack, only valid if **BPF_F_USER_STACK** is also
* specified.
*
* *file_offset* is an offset relative to the beginning
* of the executable or shared object file backing the vma
* which the *ip* falls in. It is *not* an offset relative
* to that object's base address. Accordingly, it must be
* adjusted by adding (sh_addr - sh_offset), where
* sh_{addr,offset} correspond to the executable section
* containing *file_offset* in the object, for comparisons
* to symbols' st_value to be valid.
* *
* **bpf_get_stack**\ () can collect up to * **bpf_get_stack**\ () can collect up to
* **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
@@ -5226,22 +5239,25 @@ union bpf_attr {
* Return * Return
* Nothing. Always succeeds. * Nothing. Always succeeds.
* *
* long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset) * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags)
* Description * Description
* Read *len* bytes from *src* into *dst*, starting from *offset* * Read *len* bytes from *src* into *dst*, starting from *offset*
* into *src*. * into *src*.
* *flags* is currently unused.
* Return * Return
* 0 on success, -E2BIG if *offset* + *len* exceeds the length * 0 on success, -E2BIG if *offset* + *len* exceeds the length
* of *src*'s data, -EINVAL if *src* is an invalid dynptr. * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
* *flags* is not 0.
* *
* long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len) * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
* Description * Description
* Write *len* bytes from *src* into *dst*, starting from *offset* * Write *len* bytes from *src* into *dst*, starting from *offset*
* into *dst*. * into *dst*.
* *flags* is currently unused.
* Return * Return
* 0 on success, -E2BIG if *offset* + *len* exceeds the length * 0 on success, -E2BIG if *offset* + *len* exceeds the length
* of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
* is a read-only dynptr. * is a read-only dynptr or if *flags* is not 0.
* *
* void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len) * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)
* Description * Description
@@ -5327,6 +5343,18 @@ union bpf_attr {
* **-EACCES** if the SYN cookie is not valid. * **-EACCES** if the SYN cookie is not valid.
* *
* **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
*
* u64 bpf_ktime_get_tai_ns(void)
* Description
* A nonsettable system-wide clock derived from wall-clock time but
* ignoring leap seconds. This clock does not experience
* discontinuities and backwards jumps caused by NTP inserting leap
* seconds as CLOCK_REALTIME does.
*
* See: **clock_gettime**\ (**CLOCK_TAI**)
* Return
* Current *ktime*.
*
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
@@ -5537,6 +5565,7 @@ union bpf_attr {
FN(tcp_raw_gen_syncookie_ipv6), \ FN(tcp_raw_gen_syncookie_ipv6), \
FN(tcp_raw_check_syncookie_ipv4), \ FN(tcp_raw_check_syncookie_ipv4), \
FN(tcp_raw_check_syncookie_ipv6), \ FN(tcp_raw_check_syncookie_ipv6), \
FN(ktime_get_tai_ns), \
/* */ /* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -6786,6 +6815,7 @@ enum bpf_core_relo_kind {
BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */ BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */ BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */
}; };
/* /*

View File

@@ -890,6 +890,7 @@ enum {
IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
IFLA_BOND_SLAVE_PRIO,
__IFLA_BOND_SLAVE_MAX, __IFLA_BOND_SLAVE_MAX,
}; };

View File

@@ -301,6 +301,7 @@ enum {
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 id; } && PERF_FORMAT_ID * { u64 id; } && PERF_FORMAT_ID
* { u64 lost; } && PERF_FORMAT_LOST
* } && !PERF_FORMAT_GROUP * } && !PERF_FORMAT_GROUP
* *
* { u64 nr; * { u64 nr;
@@ -308,6 +309,7 @@ enum {
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 value; * { u64 value;
* { u64 id; } && PERF_FORMAT_ID * { u64 id; } && PERF_FORMAT_ID
* { u64 lost; } && PERF_FORMAT_LOST
* } cntr[nr]; * } cntr[nr];
* } && PERF_FORMAT_GROUP * } && PERF_FORMAT_GROUP
* }; * };
@@ -317,8 +319,9 @@ enum perf_event_read_format {
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2, PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3, PERF_FORMAT_GROUP = 1U << 3,
PERF_FORMAT_LOST = 1U << 4,
PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
}; };
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
@@ -491,7 +494,7 @@ struct perf_event_query_bpf {
/* /*
* User provided buffer to store program ids * User provided buffer to store program ids
*/ */
__u32 ids[0]; __u32 ids[];
}; };
/* /*

View File

@@ -180,7 +180,7 @@ struct tc_u32_sel {
short hoff; short hoff;
__be32 hmask; __be32 hmask;
struct tc_u32_key keys[0]; struct tc_u32_key keys[];
}; };
struct tc_u32_mark { struct tc_u32_mark {
@@ -192,7 +192,7 @@ struct tc_u32_mark {
struct tc_u32_pcnt { struct tc_u32_pcnt {
__u64 rcnt; __u64 rcnt;
__u64 rhit; __u64 rhit;
__u64 kcnts[0]; __u64 kcnts[];
}; };
/* Flags */ /* Flags */

197
src/bpf.c
View File

@@ -84,9 +84,7 @@ static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
return ensure_good_fd(fd); return ensure_good_fd(fd);
} }
#define PROG_LOAD_ATTEMPTS 5 int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
{ {
int fd; int fd;
@@ -107,7 +105,7 @@ static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int
*/ */
int probe_memcg_account(void) int probe_memcg_account(void)
{ {
const size_t prog_load_attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd); const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
struct bpf_insn insns[] = { struct bpf_insn insns[] = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
@@ -117,13 +115,13 @@ int probe_memcg_account(void)
int prog_fd; int prog_fd;
/* attempt loading freplace trying to use custom BTF */ /* attempt loading freplace trying to use custom BTF */
memset(&attr, 0, prog_load_attr_sz); memset(&attr, 0, attr_sz);
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
attr.insns = ptr_to_u64(insns); attr.insns = ptr_to_u64(insns);
attr.insn_cnt = insn_cnt; attr.insn_cnt = insn_cnt;
attr.license = ptr_to_u64("GPL"); attr.license = ptr_to_u64("GPL");
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, prog_load_attr_sz); prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
if (prog_fd >= 0) { if (prog_fd >= 0) {
close(prog_fd); close(prog_fd);
return 1; return 1;
@@ -183,7 +181,7 @@ int bpf_map_create(enum bpf_map_type map_type,
return libbpf_err(-EINVAL); return libbpf_err(-EINVAL);
attr.map_type = map_type; attr.map_type = map_type;
if (map_name) if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
attr.key_size = key_size; attr.key_size = key_size;
attr.value_size = value_size; attr.value_size = value_size;
@@ -234,6 +232,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insn_cnt, const struct bpf_insn *insns, size_t insn_cnt,
const struct bpf_prog_load_opts *opts) const struct bpf_prog_load_opts *opts)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, fd_array);
void *finfo = NULL, *linfo = NULL; void *finfo = NULL, *linfo = NULL;
const char *func_info, *line_info; const char *func_info, *line_info;
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
@@ -253,7 +252,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
if (attempts == 0) if (attempts == 0)
attempts = PROG_LOAD_ATTEMPTS; attempts = PROG_LOAD_ATTEMPTS;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.prog_type = prog_type; attr.prog_type = prog_type;
attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
@@ -263,7 +262,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0); attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
attr.kern_version = OPTS_GET(opts, kern_version, 0); attr.kern_version = OPTS_GET(opts, kern_version, 0);
if (prog_name) if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
attr.license = ptr_to_u64(license); attr.license = ptr_to_u64(license);
@@ -316,7 +315,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.log_level = log_level; attr.log_level = log_level;
} }
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
if (fd >= 0) if (fd >= 0)
return fd; return fd;
@@ -356,7 +355,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
break; break;
} }
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
if (fd >= 0) if (fd >= 0)
goto done; goto done;
} }
@@ -370,7 +369,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.log_size = log_size; attr.log_size = log_size;
attr.log_level = 1; attr.log_level = 1;
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
} }
done: done:
/* free() doesn't affect errno, so we don't need to restore it */ /* free() doesn't affect errno, so we don't need to restore it */
@@ -382,127 +381,136 @@ done:
int bpf_map_update_elem(int fd, const void *key, const void *value, int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags) __u64 flags)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.map_fd = fd; attr.map_fd = fd;
attr.key = ptr_to_u64(key); attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value); attr.value = ptr_to_u64(value);
attr.flags = flags; attr.flags = flags;
ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_map_lookup_elem(int fd, const void *key, void *value) int bpf_map_lookup_elem(int fd, const void *key, void *value)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.map_fd = fd; attr.map_fd = fd;
attr.key = ptr_to_u64(key); attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value); attr.value = ptr_to_u64(value);
ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.map_fd = fd; attr.map_fd = fd;
attr.key = ptr_to_u64(key); attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value); attr.value = ptr_to_u64(value);
attr.flags = flags; attr.flags = flags;
ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.map_fd = fd; attr.map_fd = fd;
attr.key = ptr_to_u64(key); attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value); attr.value = ptr_to_u64(value);
ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags) int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.map_fd = fd; attr.map_fd = fd;
attr.key = ptr_to_u64(key); attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value); attr.value = ptr_to_u64(value);
attr.flags = flags; attr.flags = flags;
ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_map_delete_elem(int fd, const void *key) int bpf_map_delete_elem(int fd, const void *key)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.map_fd = fd; attr.map_fd = fd;
attr.key = ptr_to_u64(key); attr.key = ptr_to_u64(key);
ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags) int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.map_fd = fd; attr.map_fd = fd;
attr.key = ptr_to_u64(key); attr.key = ptr_to_u64(key);
attr.flags = flags; attr.flags = flags;
ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_map_get_next_key(int fd, const void *key, void *next_key) int bpf_map_get_next_key(int fd, const void *key, void *next_key)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, next_key);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.map_fd = fd; attr.map_fd = fd;
attr.key = ptr_to_u64(key); attr.key = ptr_to_u64(key);
attr.next_key = ptr_to_u64(next_key); attr.next_key = ptr_to_u64(next_key);
ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_map_freeze(int fd) int bpf_map_freeze(int fd)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, map_fd);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.map_fd = fd; attr.map_fd = fd;
ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr)); ret = sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
@@ -511,13 +519,14 @@ static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
__u32 *count, __u32 *count,
const struct bpf_map_batch_opts *opts) const struct bpf_map_batch_opts *opts)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, batch);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
if (!OPTS_VALID(opts, bpf_map_batch_opts)) if (!OPTS_VALID(opts, bpf_map_batch_opts))
return libbpf_err(-EINVAL); return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.batch.map_fd = fd; attr.batch.map_fd = fd;
attr.batch.in_batch = ptr_to_u64(in_batch); attr.batch.in_batch = ptr_to_u64(in_batch);
attr.batch.out_batch = ptr_to_u64(out_batch); attr.batch.out_batch = ptr_to_u64(out_batch);
@@ -527,7 +536,7 @@ static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0);
attr.batch.flags = OPTS_GET(opts, flags, 0); attr.batch.flags = OPTS_GET(opts, flags, 0);
ret = sys_bpf(cmd, &attr, sizeof(attr)); ret = sys_bpf(cmd, &attr, attr_sz);
*count = attr.batch.count; *count = attr.batch.count;
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
@@ -566,26 +575,37 @@ int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *co
int bpf_obj_pin(int fd, const char *pathname) int bpf_obj_pin(int fd, const char *pathname)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, file_flags);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.pathname = ptr_to_u64((void *)pathname); attr.pathname = ptr_to_u64((void *)pathname);
attr.bpf_fd = fd; attr.bpf_fd = fd;
ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr)); ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_obj_get(const char *pathname) int bpf_obj_get(const char *pathname)
{ {
return bpf_obj_get_opts(pathname, NULL);
}
int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
{
const size_t attr_sz = offsetofend(union bpf_attr, file_flags);
union bpf_attr attr; union bpf_attr attr;
int fd; int fd;
memset(&attr, 0, sizeof(attr)); if (!OPTS_VALID(opts, bpf_obj_get_opts))
attr.pathname = ptr_to_u64((void *)pathname); return libbpf_err(-EINVAL);
fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.pathname = ptr_to_u64((void *)pathname);
attr.file_flags = OPTS_GET(opts, file_flags, 0);
fd = sys_bpf_fd(BPF_OBJ_GET, &attr, attr_sz);
return libbpf_err_errno(fd); return libbpf_err_errno(fd);
} }
@@ -603,52 +623,50 @@ int bpf_prog_attach_opts(int prog_fd, int target_fd,
enum bpf_attach_type type, enum bpf_attach_type type,
const struct bpf_prog_attach_opts *opts) const struct bpf_prog_attach_opts *opts)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
if (!OPTS_VALID(opts, bpf_prog_attach_opts)) if (!OPTS_VALID(opts, bpf_prog_attach_opts))
return libbpf_err(-EINVAL); return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.target_fd = target_fd; attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd; attr.attach_bpf_fd = prog_fd;
attr.attach_type = type; attr.attach_type = type;
attr.attach_flags = OPTS_GET(opts, flags, 0); attr.attach_flags = OPTS_GET(opts, flags, 0);
attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0); attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
__attribute__((alias("bpf_prog_attach_opts")))
int bpf_prog_attach_xattr(int prog_fd, int target_fd,
enum bpf_attach_type type,
const struct bpf_prog_attach_opts *opts);
int bpf_prog_detach(int target_fd, enum bpf_attach_type type) int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.target_fd = target_fd; attr.target_fd = target_fd;
attr.attach_type = type; attr.attach_type = type;
ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.target_fd = target_fd; attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd; attr.attach_bpf_fd = prog_fd;
attr.attach_type = type; attr.attach_type = type;
ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
@@ -656,6 +674,7 @@ int bpf_link_create(int prog_fd, int target_fd,
enum bpf_attach_type attach_type, enum bpf_attach_type attach_type,
const struct bpf_link_create_opts *opts) const struct bpf_link_create_opts *opts)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, link_create);
__u32 target_btf_id, iter_info_len; __u32 target_btf_id, iter_info_len;
union bpf_attr attr; union bpf_attr attr;
int fd, err; int fd, err;
@@ -674,7 +693,7 @@ int bpf_link_create(int prog_fd, int target_fd,
return libbpf_err(-EINVAL); return libbpf_err(-EINVAL);
} }
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.link_create.prog_fd = prog_fd; attr.link_create.prog_fd = prog_fd;
attr.link_create.target_fd = target_fd; attr.link_create.target_fd = target_fd;
attr.link_create.attach_type = attach_type; attr.link_create.attach_type = attach_type;
@@ -718,7 +737,7 @@ int bpf_link_create(int prog_fd, int target_fd,
break; break;
} }
proceed: proceed:
fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr)); fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, attr_sz);
if (fd >= 0) if (fd >= 0)
return fd; return fd;
/* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
@@ -754,44 +773,47 @@ proceed:
int bpf_link_detach(int link_fd) int bpf_link_detach(int link_fd)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, link_detach);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.link_detach.link_fd = link_fd; attr.link_detach.link_fd = link_fd;
ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr)); ret = sys_bpf(BPF_LINK_DETACH, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_link_update(int link_fd, int new_prog_fd, int bpf_link_update(int link_fd, int new_prog_fd,
const struct bpf_link_update_opts *opts) const struct bpf_link_update_opts *opts)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, link_update);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
if (!OPTS_VALID(opts, bpf_link_update_opts)) if (!OPTS_VALID(opts, bpf_link_update_opts))
return libbpf_err(-EINVAL); return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.link_update.link_fd = link_fd; attr.link_update.link_fd = link_fd;
attr.link_update.new_prog_fd = new_prog_fd; attr.link_update.new_prog_fd = new_prog_fd;
attr.link_update.flags = OPTS_GET(opts, flags, 0); attr.link_update.flags = OPTS_GET(opts, flags, 0);
attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr)); ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }
int bpf_iter_create(int link_fd) int bpf_iter_create(int link_fd)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, iter_create);
union bpf_attr attr; union bpf_attr attr;
int fd; int fd;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.iter_create.link_fd = link_fd; attr.iter_create.link_fd = link_fd;
fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr)); fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, attr_sz);
return libbpf_err_errno(fd); return libbpf_err_errno(fd);
} }
@@ -799,13 +821,14 @@ int bpf_prog_query_opts(int target_fd,
enum bpf_attach_type type, enum bpf_attach_type type,
struct bpf_prog_query_opts *opts) struct bpf_prog_query_opts *opts)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, query);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
if (!OPTS_VALID(opts, bpf_prog_query_opts)) if (!OPTS_VALID(opts, bpf_prog_query_opts))
return libbpf_err(-EINVAL); return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.query.target_fd = target_fd; attr.query.target_fd = target_fd;
attr.query.attach_type = type; attr.query.attach_type = type;
@@ -814,7 +837,7 @@ int bpf_prog_query_opts(int target_fd,
attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL)); attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL)); attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr)); ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz);
OPTS_SET(opts, attach_flags, attr.query.attach_flags); OPTS_SET(opts, attach_flags, attr.query.attach_flags);
OPTS_SET(opts, prog_cnt, attr.query.prog_cnt); OPTS_SET(opts, prog_cnt, attr.query.prog_cnt);
@@ -843,13 +866,14 @@ int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, test);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
if (!OPTS_VALID(opts, bpf_test_run_opts)) if (!OPTS_VALID(opts, bpf_test_run_opts))
return libbpf_err(-EINVAL); return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.test.prog_fd = prog_fd; attr.test.prog_fd = prog_fd;
attr.test.batch_size = OPTS_GET(opts, batch_size, 0); attr.test.batch_size = OPTS_GET(opts, batch_size, 0);
attr.test.cpu = OPTS_GET(opts, cpu, 0); attr.test.cpu = OPTS_GET(opts, cpu, 0);
@@ -865,7 +889,7 @@ int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, attr_sz);
OPTS_SET(opts, data_size_out, attr.test.data_size_out); OPTS_SET(opts, data_size_out, attr.test.data_size_out);
OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
@@ -877,13 +901,14 @@ int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr; union bpf_attr attr;
int err; int err;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.start_id = start_id; attr.start_id = start_id;
err = sys_bpf(cmd, &attr, sizeof(attr)); err = sys_bpf(cmd, &attr, attr_sz);
if (!err) if (!err)
*next_id = attr.next_id; *next_id = attr.next_id;
@@ -912,80 +937,84 @@ int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
int bpf_prog_get_fd_by_id(__u32 id) int bpf_prog_get_fd_by_id(__u32 id)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr; union bpf_attr attr;
int fd; int fd;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.prog_id = id; attr.prog_id = id;
fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz);
return libbpf_err_errno(fd); return libbpf_err_errno(fd);
} }
int bpf_map_get_fd_by_id(__u32 id) int bpf_map_get_fd_by_id(__u32 id)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr; union bpf_attr attr;
int fd; int fd;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.map_id = id; attr.map_id = id;
fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
return libbpf_err_errno(fd); return libbpf_err_errno(fd);
} }
int bpf_btf_get_fd_by_id(__u32 id) int bpf_btf_get_fd_by_id(__u32 id)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr; union bpf_attr attr;
int fd; int fd;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.btf_id = id; attr.btf_id = id;
fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz);
return libbpf_err_errno(fd); return libbpf_err_errno(fd);
} }
int bpf_link_get_fd_by_id(__u32 id) int bpf_link_get_fd_by_id(__u32 id)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr; union bpf_attr attr;
int fd; int fd;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.link_id = id; attr.link_id = id;
fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr)); fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz);
return libbpf_err_errno(fd); return libbpf_err_errno(fd);
} }
int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, info);
union bpf_attr attr; union bpf_attr attr;
int err; int err;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.info.bpf_fd = bpf_fd; attr.info.bpf_fd = bpf_fd;
attr.info.info_len = *info_len; attr.info.info_len = *info_len;
attr.info.info = ptr_to_u64(info); attr.info.info = ptr_to_u64(info);
err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)); err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz);
if (!err) if (!err)
*info_len = attr.info.info_len; *info_len = attr.info.info_len;
return libbpf_err_errno(err); return libbpf_err_errno(err);
} }
int bpf_raw_tracepoint_open(const char *name, int prog_fd) int bpf_raw_tracepoint_open(const char *name, int prog_fd)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
union bpf_attr attr; union bpf_attr attr;
int fd; int fd;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.raw_tracepoint.name = ptr_to_u64(name); attr.raw_tracepoint.name = ptr_to_u64(name);
attr.raw_tracepoint.prog_fd = prog_fd; attr.raw_tracepoint.prog_fd = prog_fd;
fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
return libbpf_err_errno(fd); return libbpf_err_errno(fd);
} }
@@ -1041,16 +1070,18 @@ int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
__u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
__u64 *probe_addr) __u64 *probe_addr)
{ {
union bpf_attr attr = {}; const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query);
union bpf_attr attr;
int err; int err;
memset(&attr, 0, attr_sz);
attr.task_fd_query.pid = pid; attr.task_fd_query.pid = pid;
attr.task_fd_query.fd = fd; attr.task_fd_query.fd = fd;
attr.task_fd_query.flags = flags; attr.task_fd_query.flags = flags;
attr.task_fd_query.buf = ptr_to_u64(buf); attr.task_fd_query.buf = ptr_to_u64(buf);
attr.task_fd_query.buf_len = *buf_len; attr.task_fd_query.buf_len = *buf_len;
err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr)); err = sys_bpf(BPF_TASK_FD_QUERY, &attr, attr_sz);
*buf_len = attr.task_fd_query.buf_len; *buf_len = attr.task_fd_query.buf_len;
*prog_id = attr.task_fd_query.prog_id; *prog_id = attr.task_fd_query.prog_id;
@@ -1063,30 +1094,32 @@ int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
int bpf_enable_stats(enum bpf_stats_type type) int bpf_enable_stats(enum bpf_stats_type type)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, enable_stats);
union bpf_attr attr; union bpf_attr attr;
int fd; int fd;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.enable_stats.type = type; attr.enable_stats.type = type;
fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr)); fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, attr_sz);
return libbpf_err_errno(fd); return libbpf_err_errno(fd);
} }
int bpf_prog_bind_map(int prog_fd, int map_fd, int bpf_prog_bind_map(int prog_fd, int map_fd,
const struct bpf_prog_bind_opts *opts) const struct bpf_prog_bind_opts *opts)
{ {
const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map);
union bpf_attr attr; union bpf_attr attr;
int ret; int ret;
if (!OPTS_VALID(opts, bpf_prog_bind_opts)) if (!OPTS_VALID(opts, bpf_prog_bind_opts))
return libbpf_err(-EINVAL); return libbpf_err(-EINVAL);
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, attr_sz);
attr.prog_bind_map.prog_fd = prog_fd; attr.prog_bind_map.prog_fd = prog_fd;
attr.prog_bind_map.map_fd = map_fd; attr.prog_bind_map.map_fd = map_fd;
attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr)); ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
return libbpf_err_errno(ret); return libbpf_err_errno(ret);
} }

View File

@@ -270,8 +270,19 @@ LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values
__u32 *count, __u32 *count,
const struct bpf_map_batch_opts *opts); const struct bpf_map_batch_opts *opts);
struct bpf_obj_get_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
__u32 file_flags;
size_t :0;
};
#define bpf_obj_get_opts__last_field file_flags
LIBBPF_API int bpf_obj_pin(int fd, const char *pathname); LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
LIBBPF_API int bpf_obj_get(const char *pathname); LIBBPF_API int bpf_obj_get(const char *pathname);
LIBBPF_API int bpf_obj_get_opts(const char *pathname,
const struct bpf_obj_get_opts *opts);
struct bpf_prog_attach_opts { struct bpf_prog_attach_opts {
size_t sz; /* size of this struct for forward/backward compatibility */ size_t sz; /* size of this struct for forward/backward compatibility */

View File

@@ -29,6 +29,7 @@ enum bpf_type_id_kind {
enum bpf_type_info_kind { enum bpf_type_info_kind {
BPF_TYPE_EXISTS = 0, /* type existence in target kernel */ BPF_TYPE_EXISTS = 0, /* type existence in target kernel */
BPF_TYPE_SIZE = 1, /* type size in target kernel */ BPF_TYPE_SIZE = 1, /* type size in target kernel */
BPF_TYPE_MATCHES = 2, /* type match in target kernel */
}; };
/* second argument to __builtin_preserve_enum_value() built-in */ /* second argument to __builtin_preserve_enum_value() built-in */
@@ -183,6 +184,16 @@ enum bpf_enum_value_kind {
#define bpf_core_type_exists(type) \ #define bpf_core_type_exists(type) \
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS) __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
/*
* Convenience macro to check that provided named type
* (struct/union/enum/typedef) "matches" that in a target kernel.
* Returns:
* 1, if the type matches in the target kernel's BTF;
* 0, if the type does not match any in the target kernel
*/
#define bpf_core_type_matches(type) \
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES)
/* /*
* Convenience macro to get the byte size of a provided named type * Convenience macro to get the byte size of a provided named type
* (struct/union/enum/typedef) in a target kernel. * (struct/union/enum/typedef) in a target kernel.

View File

@@ -1005,7 +1005,8 @@ static long (*bpf_skb_change_tail)(struct __sk_buff *skb, __u32 len, __u64 flags
* Pull in non-linear data in case the *skb* is non-linear and not * Pull in non-linear data in case the *skb* is non-linear and not
* all of *len* are part of the linear section. Make *len* bytes * all of *len* are part of the linear section. Make *len* bytes
* from *skb* readable and writable. If a zero value is passed for * from *skb* readable and writable. If a zero value is passed for
* *len*, then the whole length of the *skb* is pulled. * *len*, then all bytes in the linear part of *skb* will be made
* readable and writable.
* *
* This helper is only needed for reading and writing with direct * This helper is only needed for reading and writing with direct
* packet access. * packet access.
@@ -1238,10 +1239,12 @@ static long (*bpf_setsockopt)(void *bpf_socket, int level, int optname, void *op
* There are two supported modes at this time: * There are two supported modes at this time:
* *
* * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
* (room space is added or removed below the layer 2 header). * (room space is added or removed between the layer 2 and
* layer 3 headers).
* *
* * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
* (room space is added or removed below the layer 3 header). * (room space is added or removed between the layer 3 and
* layer 4 headers).
* *
* The following flags are supported at this time: * The following flags are supported at this time:
* *
@@ -1741,8 +1744,18 @@ static long (*bpf_skb_get_xfrm_state)(struct __sk_buff *skb, __u32 index, struct
* **BPF_F_USER_STACK** * **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack. * Collect a user space stack instead of a kernel stack.
* **BPF_F_USER_BUILD_ID** * **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack, * Collect (build_id, file_offset) instead of ips for user
* only valid if **BPF_F_USER_STACK** is also specified. * stack, only valid if **BPF_F_USER_STACK** is also
* specified.
*
* *file_offset* is an offset relative to the beginning
* of the executable or shared object file backing the vma
* which the *ip* falls in. It is *not* an offset relative
* to that object's base address. Accordingly, it must be
* adjusted by adding (sh_addr - sh_offset), where
* sh_{addr,offset} correspond to the executable section
* containing *file_offset* in the object, for comparisons
* to symbols' st_value to be valid.
* *
* **bpf_get_stack**\ () can collect up to * **bpf_get_stack**\ () can collect up to
* **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
@@ -4450,25 +4463,28 @@ static void (*bpf_ringbuf_discard_dynptr)(struct bpf_dynptr *ptr, __u64 flags) =
* *
* Read *len* bytes from *src* into *dst*, starting from *offset* * Read *len* bytes from *src* into *dst*, starting from *offset*
* into *src*. * into *src*.
* *flags* is currently unused.
* *
* Returns * Returns
* 0 on success, -E2BIG if *offset* + *len* exceeds the length * 0 on success, -E2BIG if *offset* + *len* exceeds the length
* of *src*'s data, -EINVAL if *src* is an invalid dynptr. * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
* *flags* is not 0.
*/ */
static long (*bpf_dynptr_read)(void *dst, __u32 len, struct bpf_dynptr *src, __u32 offset) = (void *) 201; static long (*bpf_dynptr_read)(void *dst, __u32 len, struct bpf_dynptr *src, __u32 offset, __u64 flags) = (void *) 201;
/* /*
* bpf_dynptr_write * bpf_dynptr_write
* *
* Write *len* bytes from *src* into *dst*, starting from *offset* * Write *len* bytes from *src* into *dst*, starting from *offset*
* into *dst*. * into *dst*.
* *flags* is currently unused.
* *
* Returns * Returns
* 0 on success, -E2BIG if *offset* + *len* exceeds the length * 0 on success, -E2BIG if *offset* + *len* exceeds the length
* of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
* is a read-only dynptr. * is a read-only dynptr or if *flags* is not 0.
*/ */
static long (*bpf_dynptr_write)(struct bpf_dynptr *dst, __u32 offset, void *src, __u32 len) = (void *) 202; static long (*bpf_dynptr_write)(struct bpf_dynptr *dst, __u32 offset, void *src, __u32 len, __u64 flags) = (void *) 202;
/* /*
* bpf_dynptr_data * bpf_dynptr_data
@@ -4575,4 +4591,19 @@ static long (*bpf_tcp_raw_check_syncookie_ipv4)(struct iphdr *iph, struct tcphdr
*/ */
static long (*bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *iph, struct tcphdr *th) = (void *) 207; static long (*bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *iph, struct tcphdr *th) = (void *) 207;
/*
* bpf_ktime_get_tai_ns
*
* A nonsettable system-wide clock derived from wall-clock time but
* ignoring leap seconds. This clock does not experience
* discontinuities and backwards jumps caused by NTP inserting leap
* seconds as CLOCK_REALTIME does.
*
* See: **clock_gettime**\ (**CLOCK_TAI**)
*
* Returns
* Current *ktime*.
*/
static __u64 (*bpf_ktime_get_tai_ns)(void) = (void *) 208;

View File

@@ -22,12 +22,25 @@
* To allow use of SEC() with externs (e.g., for extern .maps declarations), * To allow use of SEC() with externs (e.g., for extern .maps declarations),
* make sure __attribute__((unused)) doesn't trigger compilation warning. * make sure __attribute__((unused)) doesn't trigger compilation warning.
*/ */
#if __GNUC__ && !__clang__
/*
* Pragma macros are broken on GCC
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90400
*/
#define SEC(name) __attribute__((section(name), used))
#else
#define SEC(name) \ #define SEC(name) \
_Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \ _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
__attribute__((section(name), used)) \ __attribute__((section(name), used)) \
_Pragma("GCC diagnostic pop") \ _Pragma("GCC diagnostic pop") \
#endif
/* Avoid 'linux/stddef.h' definition of '__always_inline'. */ /* Avoid 'linux/stddef.h' definition of '__always_inline'. */
#undef __always_inline #undef __always_inline
#define __always_inline inline __attribute__((always_inline)) #define __always_inline inline __attribute__((always_inline))

View File

@@ -2,6 +2,8 @@
#ifndef __BPF_TRACING_H__ #ifndef __BPF_TRACING_H__
#define __BPF_TRACING_H__ #define __BPF_TRACING_H__
#include <bpf/bpf_helpers.h>
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */ /* Scan the ARCH passed in from ARCH env variable (see Makefile) */
#if defined(__TARGET_ARCH_x86) #if defined(__TARGET_ARCH_x86)
#define bpf_target_x86 #define bpf_target_x86
@@ -140,7 +142,7 @@ struct pt_regs___s390 {
#define __PT_RC_REG gprs[2] #define __PT_RC_REG gprs[2]
#define __PT_SP_REG gprs[15] #define __PT_SP_REG gprs[15]
#define __PT_IP_REG psw.addr #define __PT_IP_REG psw.addr
#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; }) #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2) #define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2)
#elif defined(bpf_target_arm) #elif defined(bpf_target_arm)
@@ -174,7 +176,7 @@ struct pt_regs___arm64 {
#define __PT_RC_REG regs[0] #define __PT_RC_REG regs[0]
#define __PT_SP_REG sp #define __PT_SP_REG sp
#define __PT_IP_REG pc #define __PT_IP_REG pc
#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; }) #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0) #define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0)
#elif defined(bpf_target_mips) #elif defined(bpf_target_mips)
@@ -233,7 +235,7 @@ struct pt_regs___arm64 {
#define __PT_PARM5_REG a4 #define __PT_PARM5_REG a4
#define __PT_RET_REG ra #define __PT_RET_REG ra
#define __PT_FP_REG s0 #define __PT_FP_REG s0
#define __PT_RC_REG a5 #define __PT_RC_REG a0
#define __PT_SP_REG sp #define __PT_SP_REG sp
#define __PT_IP_REG pc #define __PT_IP_REG pc
/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */ /* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
@@ -424,7 +426,7 @@ struct pt_regs;
*/ */
#define BPF_PROG(name, args...) \ #define BPF_PROG(name, args...) \
name(unsigned long long *ctx); \ name(unsigned long long *ctx); \
static __attribute__((always_inline)) typeof(name(0)) \ static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx, ##args); \ ____##name(unsigned long long *ctx, ##args); \
typeof(name(0)) name(unsigned long long *ctx) \ typeof(name(0)) name(unsigned long long *ctx) \
{ \ { \
@@ -433,7 +435,7 @@ typeof(name(0)) name(unsigned long long *ctx) \
return ____##name(___bpf_ctx_cast(args)); \ return ____##name(___bpf_ctx_cast(args)); \
_Pragma("GCC diagnostic pop") \ _Pragma("GCC diagnostic pop") \
} \ } \
static __attribute__((always_inline)) typeof(name(0)) \ static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx, ##args) ____##name(unsigned long long *ctx, ##args)
struct pt_regs; struct pt_regs;
@@ -458,7 +460,7 @@ struct pt_regs;
*/ */
#define BPF_KPROBE(name, args...) \ #define BPF_KPROBE(name, args...) \
name(struct pt_regs *ctx); \ name(struct pt_regs *ctx); \
static __attribute__((always_inline)) typeof(name(0)) \ static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args); \ ____##name(struct pt_regs *ctx, ##args); \
typeof(name(0)) name(struct pt_regs *ctx) \ typeof(name(0)) name(struct pt_regs *ctx) \
{ \ { \
@@ -467,7 +469,7 @@ typeof(name(0)) name(struct pt_regs *ctx) \
return ____##name(___bpf_kprobe_args(args)); \ return ____##name(___bpf_kprobe_args(args)); \
_Pragma("GCC diagnostic pop") \ _Pragma("GCC diagnostic pop") \
} \ } \
static __attribute__((always_inline)) typeof(name(0)) \ static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args) ____##name(struct pt_regs *ctx, ##args)
#define ___bpf_kretprobe_args0() ctx #define ___bpf_kretprobe_args0() ctx
@@ -482,7 +484,7 @@ ____##name(struct pt_regs *ctx, ##args)
*/ */
#define BPF_KRETPROBE(name, args...) \ #define BPF_KRETPROBE(name, args...) \
name(struct pt_regs *ctx); \ name(struct pt_regs *ctx); \
static __attribute__((always_inline)) typeof(name(0)) \ static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args); \ ____##name(struct pt_regs *ctx, ##args); \
typeof(name(0)) name(struct pt_regs *ctx) \ typeof(name(0)) name(struct pt_regs *ctx) \
{ \ { \
@@ -493,39 +495,69 @@ typeof(name(0)) name(struct pt_regs *ctx) \
} \ } \
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
/* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
#define ___bpf_syscall_args0() ctx #define ___bpf_syscall_args0() ctx
#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs) #define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_SYSCALL(regs)
#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs) #define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_SYSCALL(regs)
#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs) #define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs)
#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs) #define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs)
#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs) #define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs)
#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args) #define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
/* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
#define ___bpf_syswrap_args0() ctx
#define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
/* /*
* BPF_KPROBE_SYSCALL is a variant of BPF_KPROBE, which is intended for * BPF_KSYSCALL is a variant of BPF_KPROBE, which is intended for
* tracing syscall functions, like __x64_sys_close. It hides the underlying * tracing syscall functions, like __x64_sys_close. It hides the underlying
* platform-specific low-level way of getting syscall input arguments from * platform-specific low-level way of getting syscall input arguments from
* struct pt_regs, and provides a familiar typed and named function arguments * struct pt_regs, and provides a familiar typed and named function arguments
* syntax and semantics of accessing syscall input parameters. * syntax and semantics of accessing syscall input parameters.
* *
* Original struct pt_regs* context is preserved as 'ctx' argument. This might * Original struct pt_regs * context is preserved as 'ctx' argument. This might
* be necessary when using BPF helpers like bpf_perf_event_output(). * be necessary when using BPF helpers like bpf_perf_event_output().
* *
* This macro relies on BPF CO-RE support. * At the moment BPF_KSYSCALL does not transparently handle all the calling
* convention quirks for the following syscalls:
*
* - mmap(): __ARCH_WANT_SYS_OLD_MMAP.
* - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and
* CONFIG_CLONE_BACKWARDS3.
* - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL.
* - compat syscalls.
*
* This may or may not change in the future. User needs to take extra measures
* to handle such quirks explicitly, if necessary.
*
* This macro relies on BPF CO-RE support and virtual __kconfig externs.
*/ */
#define BPF_KPROBE_SYSCALL(name, args...) \ #define BPF_KSYSCALL(name, args...) \
name(struct pt_regs *ctx); \ name(struct pt_regs *ctx); \
static __attribute__((always_inline)) typeof(name(0)) \ extern _Bool LINUX_HAS_SYSCALL_WRAPPER __kconfig; \
static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args); \ ____##name(struct pt_regs *ctx, ##args); \
typeof(name(0)) name(struct pt_regs *ctx) \ typeof(name(0)) name(struct pt_regs *ctx) \
{ \ { \
struct pt_regs *regs = PT_REGS_SYSCALL_REGS(ctx); \ struct pt_regs *regs = LINUX_HAS_SYSCALL_WRAPPER \
? (struct pt_regs *)PT_REGS_PARM1(ctx) \
: ctx; \
_Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
return ____##name(___bpf_syscall_args(args)); \ if (LINUX_HAS_SYSCALL_WRAPPER) \
return ____##name(___bpf_syswrap_args(args)); \
else \
return ____##name(___bpf_syscall_args(args)); \
_Pragma("GCC diagnostic pop") \ _Pragma("GCC diagnostic pop") \
} \ } \
static __attribute__((always_inline)) typeof(name(0)) \ static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args) ____##name(struct pt_regs *ctx, ##args)
#define BPF_KPROBE_SYSCALL BPF_KSYSCALL
#endif #endif

View File

@@ -1225,8 +1225,6 @@ int btf__load_into_kernel(struct btf *btf)
return btf_load_into_kernel(btf, NULL, 0, 0); return btf_load_into_kernel(btf, NULL, 0, 0);
} }
int btf__load(struct btf *) __attribute__((alias("btf__load_into_kernel")));
int btf__fd(const struct btf *btf) int btf__fd(const struct btf *btf)
{ {
return btf->fd; return btf->fd;

View File

@@ -116,7 +116,6 @@ LIBBPF_API struct btf *btf__parse_raw_split(const char *path, struct btf *base_b
LIBBPF_API struct btf *btf__load_vmlinux_btf(void); LIBBPF_API struct btf *btf__load_vmlinux_btf(void);
LIBBPF_API struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf); LIBBPF_API struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf);
LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id); LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id);
LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf); LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf);

View File

@@ -2045,7 +2045,7 @@ static int btf_dump_get_enum_value(struct btf_dump *d,
*value = *(__s64 *)data; *value = *(__s64 *)data;
return 0; return 0;
case 4: case 4:
*value = is_signed ? *(__s32 *)data : *(__u32 *)data; *value = is_signed ? (__s64)*(__s32 *)data : *(__u32 *)data;
return 0; return 0;
case 2: case 2:
*value = is_signed ? *(__s16 *)data : *(__u16 *)data; *value = is_signed ? *(__s16 *)data : *(__u16 *)data;

View File

@@ -533,7 +533,7 @@ void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
gen->attach_kind = kind; gen->attach_kind = kind;
ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s", ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
prefix, attach_name); prefix, attach_name);
if (ret == sizeof(gen->attach_target)) if (ret >= sizeof(gen->attach_target))
gen->error = -ENOSPC; gen->error = -ENOSPC;
} }

File diff suppressed because it is too large Load Diff

View File

@@ -260,6 +260,8 @@ LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog); LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog); LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload); LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
LIBBPF_API bool bpf_program__autoattach(const struct bpf_program *prog);
LIBBPF_API void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach);
struct bpf_insn; struct bpf_insn;
@@ -457,6 +459,52 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
const char *pattern, const char *pattern,
const struct bpf_kprobe_multi_opts *opts); const struct bpf_kprobe_multi_opts *opts);
struct bpf_ksyscall_opts {
/* size of this struct, for forward/backward compatiblity */
size_t sz;
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
__u64 bpf_cookie;
/* attach as return probe? */
bool retprobe;
size_t :0;
};
#define bpf_ksyscall_opts__last_field retprobe
/**
* @brief **bpf_program__attach_ksyscall()** attaches a BPF program
* to kernel syscall handler of a specified syscall. Optionally it's possible
* to request to install retprobe that will be triggered at syscall exit. It's
* also possible to associate BPF cookie (though options).
*
* Libbpf automatically will determine correct full kernel function name,
* which depending on system architecture and kernel version/configuration
* could be of the form __<arch>_sys_<syscall> or __se_sys_<syscall>, and will
* attach specified program using kprobe/kretprobe mechanism.
*
* **bpf_program__attach_ksyscall()** is an API counterpart of declarative
* **SEC("ksyscall/<syscall>")** annotation of BPF programs.
*
* At the moment **SEC("ksyscall")** and **bpf_program__attach_ksyscall()** do
* not handle all the calling convention quirks for mmap(), clone() and compat
* syscalls. It also only attaches to "native" syscall interfaces. If host
* system supports compat syscalls or defines 32-bit syscalls in 64-bit
* kernel, such syscall interfaces won't be attached to by libbpf.
*
* These limitations may or may not change in the future. Therefore it is
* recommended to use SEC("kprobe") for these syscalls or if working with
* compat and 32-bit interfaces is required.
*
* @param prog BPF program to attach
* @param syscall_name Symbolic name of the syscall (e.g., "bpf")
* @param opts Additional options (see **struct bpf_ksyscall_opts**)
* @return Reference to the newly created BPF link; or NULL is returned on
* error, error code is stored in errno
*/
LIBBPF_API struct bpf_link *
bpf_program__attach_ksyscall(const struct bpf_program *prog,
const char *syscall_name,
const struct bpf_ksyscall_opts *opts);
struct bpf_uprobe_opts { struct bpf_uprobe_opts {
/* size of this struct, for forward/backward compatiblity */ /* size of this struct, for forward/backward compatiblity */
size_t sz; size_t sz;
@@ -1053,6 +1101,22 @@ LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb);
LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx); LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx);
LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb); LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb);
LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx); LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx);
/**
* @brief **perf_buffer__buffer()** returns the per-cpu raw mmap()'ed underlying
* memory region of the ring buffer.
* This ring buffer can be used to implement a custom events consumer.
* The ring buffer starts with the *struct perf_event_mmap_page*, which
* holds the ring buffer managment fields, when accessing the header
* structure it's important to be SMP aware.
* You can refer to *perf_event_read_simple* for a simple example.
* @param pb the perf buffer structure
* @param buf_idx the buffer index to retreive
* @param buf (out) gets the base pointer of the mmap()'ed memory
* @param buf_size (out) gets the size of the mmap()'ed region
* @return 0 on success, negative error code for failure
*/
LIBBPF_API int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf,
size_t *buf_size);
struct bpf_prog_linfo; struct bpf_prog_linfo;
struct bpf_prog_info; struct bpf_prog_info;

View File

@@ -355,11 +355,16 @@ LIBBPF_0.8.0 {
LIBBPF_1.0.0 { LIBBPF_1.0.0 {
global: global:
bpf_obj_get_opts;
bpf_prog_query_opts; bpf_prog_query_opts;
bpf_program__attach_ksyscall;
bpf_program__autoattach;
bpf_program__set_autoattach;
btf__add_enum64; btf__add_enum64;
btf__add_enum64_value; btf__add_enum64_value;
libbpf_bpf_attach_type_str; libbpf_bpf_attach_type_str;
libbpf_bpf_link_type_str; libbpf_bpf_link_type_str;
libbpf_bpf_map_type_str; libbpf_bpf_map_type_str;
libbpf_bpf_prog_type_str; libbpf_bpf_prog_type_str;
perf_buffer__buffer;
}; };

View File

@@ -108,9 +108,9 @@ static inline bool str_has_sfx(const char *str, const char *sfx)
size_t str_len = strlen(str); size_t str_len = strlen(str);
size_t sfx_len = strlen(sfx); size_t sfx_len = strlen(sfx);
if (sfx_len <= str_len) if (sfx_len > str_len)
return strcmp(str + str_len - sfx_len, sfx); return false;
return false; return strcmp(str + str_len - sfx_len, sfx) == 0;
} }
/* Symbol versioning is different between static and shared library. /* Symbol versioning is different between static and shared library.
@@ -352,6 +352,8 @@ enum kern_feature_id {
FEAT_BPF_COOKIE, FEAT_BPF_COOKIE,
/* BTF_KIND_ENUM64 support and BTF_KIND_ENUM kflag support */ /* BTF_KIND_ENUM64 support and BTF_KIND_ENUM kflag support */
FEAT_BTF_ENUM64, FEAT_BTF_ENUM64,
/* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */
FEAT_SYSCALL_WRAPPER,
__FEAT_CNT, __FEAT_CNT,
}; };
@@ -571,4 +573,7 @@ static inline bool is_pow_of_2(size_t x)
return x && (x & (x - 1)) == 0; return x && (x & (x - 1)) == 0;
} }
#define PROG_LOAD_ATTEMPTS 5
int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts);
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */ #endif /* __LIBBPF_LIBBPF_INTERNAL_H */

View File

@@ -125,6 +125,8 @@ struct bpf_map;
struct btf; struct btf;
struct btf_ext; struct btf_ext;
LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
LIBBPF_API enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); LIBBPF_API enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
LIBBPF_API enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog); LIBBPF_API enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);

View File

@@ -193,7 +193,7 @@ static int probe_map_create(enum bpf_map_type map_type)
LIBBPF_OPTS(bpf_map_create_opts, opts); LIBBPF_OPTS(bpf_map_create_opts, opts);
int key_size, value_size, max_entries; int key_size, value_size, max_entries;
__u32 btf_key_type_id = 0, btf_value_type_id = 0; __u32 btf_key_type_id = 0, btf_value_type_id = 0;
int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err; int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err = 0;
key_size = sizeof(__u32); key_size = sizeof(__u32);
value_size = sizeof(__u32); value_size = sizeof(__u32);

View File

@@ -587,11 +587,12 @@ static int get_tc_info(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd) static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd)
{ {
struct bpf_prog_info info = {}; struct bpf_prog_info info;
__u32 info_len = sizeof(info); __u32 info_len = sizeof(info);
char name[256]; char name[256];
int len, ret; int len, ret;
memset(&info, 0, info_len);
ret = bpf_obj_get_info_by_fd(fd, &info, &info_len); ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
if (ret < 0) if (ret < 0)
return ret; return ret;

View File

@@ -95,6 +95,7 @@ static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id"; case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
case BPF_CORE_TYPE_ID_TARGET: return "target_type_id"; case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
case BPF_CORE_TYPE_EXISTS: return "type_exists"; case BPF_CORE_TYPE_EXISTS: return "type_exists";
case BPF_CORE_TYPE_MATCHES: return "type_matches";
case BPF_CORE_TYPE_SIZE: return "type_size"; case BPF_CORE_TYPE_SIZE: return "type_size";
case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists"; case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
case BPF_CORE_ENUMVAL_VALUE: return "enumval_value"; case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
@@ -123,6 +124,7 @@ static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
case BPF_CORE_TYPE_ID_LOCAL: case BPF_CORE_TYPE_ID_LOCAL:
case BPF_CORE_TYPE_ID_TARGET: case BPF_CORE_TYPE_ID_TARGET:
case BPF_CORE_TYPE_EXISTS: case BPF_CORE_TYPE_EXISTS:
case BPF_CORE_TYPE_MATCHES:
case BPF_CORE_TYPE_SIZE: case BPF_CORE_TYPE_SIZE:
return true; return true;
default: default:
@@ -251,7 +253,7 @@ recur:
* - field 'a' access (corresponds to '2' in low-level spec); * - field 'a' access (corresponds to '2' in low-level spec);
* - array element #3 access (corresponds to '3' in low-level spec). * - array element #3 access (corresponds to '3' in low-level spec).
* *
* Type-based relocations (TYPE_EXISTS/TYPE_SIZE, * Type-based relocations (TYPE_EXISTS/TYPE_MATCHES/TYPE_SIZE,
* TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
* spec and raw_spec are kept empty. * spec and raw_spec are kept empty.
* *
@@ -568,9 +570,14 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
targ_spec->relo_kind = local_spec->relo_kind; targ_spec->relo_kind = local_spec->relo_kind;
if (core_relo_is_type_based(local_spec->relo_kind)) { if (core_relo_is_type_based(local_spec->relo_kind)) {
return bpf_core_types_are_compat(local_spec->btf, if (local_spec->relo_kind == BPF_CORE_TYPE_MATCHES)
local_spec->root_type_id, return bpf_core_types_match(local_spec->btf,
targ_btf, targ_id); local_spec->root_type_id,
targ_btf, targ_id);
else
return bpf_core_types_are_compat(local_spec->btf,
local_spec->root_type_id,
targ_btf, targ_id);
} }
local_acc = &local_spec->spec[0]; local_acc = &local_spec->spec[0];
@@ -819,6 +826,7 @@ static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
*validate = false; *validate = false;
break; break;
case BPF_CORE_TYPE_EXISTS: case BPF_CORE_TYPE_EXISTS:
case BPF_CORE_TYPE_MATCHES:
*val = 1; *val = 1;
break; break;
case BPF_CORE_TYPE_SIZE: case BPF_CORE_TYPE_SIZE:
@@ -1410,3 +1418,273 @@ int bpf_core_calc_relo_insn(const char *prog_name,
return 0; return 0;
} }
static bool bpf_core_names_match(const struct btf *local_btf, size_t local_name_off,
const struct btf *targ_btf, size_t targ_name_off)
{
const char *local_n, *targ_n;
size_t local_len, targ_len;
local_n = btf__name_by_offset(local_btf, local_name_off);
targ_n = btf__name_by_offset(targ_btf, targ_name_off);
if (str_is_empty(targ_n))
return str_is_empty(local_n);
targ_len = bpf_core_essential_name_len(targ_n);
local_len = bpf_core_essential_name_len(local_n);
return targ_len == local_len && strncmp(local_n, targ_n, local_len) == 0;
}
static int bpf_core_enums_match(const struct btf *local_btf, const struct btf_type *local_t,
const struct btf *targ_btf, const struct btf_type *targ_t)
{
__u16 local_vlen = btf_vlen(local_t);
__u16 targ_vlen = btf_vlen(targ_t);
int i, j;
if (local_t->size != targ_t->size)
return 0;
if (local_vlen > targ_vlen)
return 0;
/* iterate over the local enum's variants and make sure each has
* a symbolic name correspondent in the target
*/
for (i = 0; i < local_vlen; i++) {
bool matched = false;
__u32 local_n_off, targ_n_off;
local_n_off = btf_is_enum(local_t) ? btf_enum(local_t)[i].name_off :
btf_enum64(local_t)[i].name_off;
for (j = 0; j < targ_vlen; j++) {
targ_n_off = btf_is_enum(targ_t) ? btf_enum(targ_t)[j].name_off :
btf_enum64(targ_t)[j].name_off;
if (bpf_core_names_match(local_btf, local_n_off, targ_btf, targ_n_off)) {
matched = true;
break;
}
}
if (!matched)
return 0;
}
return 1;
}
static int bpf_core_composites_match(const struct btf *local_btf, const struct btf_type *local_t,
const struct btf *targ_btf, const struct btf_type *targ_t,
bool behind_ptr, int level)
{
const struct btf_member *local_m = btf_members(local_t);
__u16 local_vlen = btf_vlen(local_t);
__u16 targ_vlen = btf_vlen(targ_t);
int i, j, err;
if (local_vlen > targ_vlen)
return 0;
/* check that all local members have a match in the target */
for (i = 0; i < local_vlen; i++, local_m++) {
const struct btf_member *targ_m = btf_members(targ_t);
bool matched = false;
for (j = 0; j < targ_vlen; j++, targ_m++) {
if (!bpf_core_names_match(local_btf, local_m->name_off,
targ_btf, targ_m->name_off))
continue;
err = __bpf_core_types_match(local_btf, local_m->type, targ_btf,
targ_m->type, behind_ptr, level - 1);
if (err < 0)
return err;
if (err > 0) {
matched = true;
break;
}
}
if (!matched)
return 0;
}
return 1;
}
/* Check that two types "match". This function assumes that root types were
* already checked for name match.
*
* The matching relation is defined as follows:
* - modifiers and typedefs are stripped (and, hence, effectively ignored)
* - generally speaking types need to be of same kind (struct vs. struct, union
* vs. union, etc.)
* - exceptions are struct/union behind a pointer which could also match a
* forward declaration of a struct or union, respectively, and enum vs.
* enum64 (see below)
* Then, depending on type:
* - integers:
* - match if size and signedness match
* - arrays & pointers:
* - target types are recursively matched
* - structs & unions:
* - local members need to exist in target with the same name
* - for each member we recursively check match unless it is already behind a
* pointer, in which case we only check matching names and compatible kind
* - enums:
* - local variants have to have a match in target by symbolic name (but not
* numeric value)
* - size has to match (but enum may match enum64 and vice versa)
* - function pointers:
* - number and position of arguments in local type has to match target
* - for each argument and the return value we recursively check match
*/
int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
__u32 targ_id, bool behind_ptr, int level)
{
const struct btf_type *local_t, *targ_t;
int depth = 32; /* max recursion depth */
__u16 local_k, targ_k;
if (level <= 0)
return -EINVAL;
local_t = btf_type_by_id(local_btf, local_id);
targ_t = btf_type_by_id(targ_btf, targ_id);
recur:
depth--;
if (depth < 0)
return -EINVAL;
local_t = skip_mods_and_typedefs(local_btf, local_id, &local_id);
targ_t = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
if (!local_t || !targ_t)
return -EINVAL;
/* While the name check happens after typedefs are skipped, root-level
* typedefs would still be name-matched as that's the contract with
* callers.
*/
if (!bpf_core_names_match(local_btf, local_t->name_off, targ_btf, targ_t->name_off))
return 0;
local_k = btf_kind(local_t);
targ_k = btf_kind(targ_t);
switch (local_k) {
case BTF_KIND_UNKN:
return local_k == targ_k;
case BTF_KIND_FWD: {
bool local_f = BTF_INFO_KFLAG(local_t->info);
if (behind_ptr) {
if (local_k == targ_k)
return local_f == BTF_INFO_KFLAG(targ_t->info);
/* for forward declarations kflag dictates whether the
* target is a struct (0) or union (1)
*/
return (targ_k == BTF_KIND_STRUCT && !local_f) ||
(targ_k == BTF_KIND_UNION && local_f);
} else {
if (local_k != targ_k)
return 0;
/* match if the forward declaration is for the same kind */
return local_f == BTF_INFO_KFLAG(targ_t->info);
}
}
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
if (!btf_is_any_enum(targ_t))
return 0;
return bpf_core_enums_match(local_btf, local_t, targ_btf, targ_t);
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
if (behind_ptr) {
bool targ_f = BTF_INFO_KFLAG(targ_t->info);
if (local_k == targ_k)
return 1;
if (targ_k != BTF_KIND_FWD)
return 0;
return (local_k == BTF_KIND_UNION) == targ_f;
} else {
if (local_k != targ_k)
return 0;
return bpf_core_composites_match(local_btf, local_t, targ_btf, targ_t,
behind_ptr, level);
}
case BTF_KIND_INT: {
__u8 local_sgn;
__u8 targ_sgn;
if (local_k != targ_k)
return 0;
local_sgn = btf_int_encoding(local_t) & BTF_INT_SIGNED;
targ_sgn = btf_int_encoding(targ_t) & BTF_INT_SIGNED;
return local_t->size == targ_t->size && local_sgn == targ_sgn;
}
case BTF_KIND_PTR:
if (local_k != targ_k)
return 0;
behind_ptr = true;
local_id = local_t->type;
targ_id = targ_t->type;
goto recur;
case BTF_KIND_ARRAY: {
const struct btf_array *local_array = btf_array(local_t);
const struct btf_array *targ_array = btf_array(targ_t);
if (local_k != targ_k)
return 0;
if (local_array->nelems != targ_array->nelems)
return 0;
local_id = local_array->type;
targ_id = targ_array->type;
goto recur;
}
case BTF_KIND_FUNC_PROTO: {
struct btf_param *local_p = btf_params(local_t);
struct btf_param *targ_p = btf_params(targ_t);
__u16 local_vlen = btf_vlen(local_t);
__u16 targ_vlen = btf_vlen(targ_t);
int i, err;
if (local_k != targ_k)
return 0;
if (local_vlen != targ_vlen)
return 0;
for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
err = __bpf_core_types_match(local_btf, local_p->type, targ_btf,
targ_p->type, behind_ptr, level - 1);
if (err <= 0)
return err;
}
/* tail recurse for return type check */
local_id = local_t->type;
targ_id = targ_t->type;
goto recur;
}
default:
pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
btf_kind_str(local_t), local_id, targ_id);
return 0;
}
}

View File

@@ -72,6 +72,10 @@ int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
const struct btf *targ_btf, __u32 targ_id, int level); const struct btf *targ_btf, __u32 targ_id, int level);
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
const struct btf *targ_btf, __u32 targ_id); const struct btf *targ_btf, __u32 targ_id);
int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
__u32 targ_id, bool behind_ptr, int level);
int bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
__u32 targ_id);
size_t bpf_core_essential_name_len(const char *name); size_t bpf_core_essential_name_len(const char *name);

View File

@@ -66,13 +66,13 @@ struct bpf_load_and_run_opts {
const char *errstr; const char *errstr;
}; };
long bpf_sys_bpf(__u32 cmd, void *attr, __u32 attr_size); long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
unsigned int size) unsigned int size)
{ {
#ifdef __KERNEL__ #ifdef __KERNEL__
return bpf_sys_bpf(cmd, attr, size); return kern_sys_bpf(cmd, attr, size);
#else #else
return syscall(__NR_bpf, cmd, attr, size); return syscall(__NR_bpf, cmd, attr, size);
#endif #endif
@@ -285,6 +285,8 @@ static inline int skel_link_create(int prog_fd, int target_fd,
static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
{ {
const size_t prog_load_attr_sz = offsetofend(union bpf_attr, fd_array);
const size_t test_run_attr_sz = offsetofend(union bpf_attr, test);
int map_fd = -1, prog_fd = -1, key = 0, err; int map_fd = -1, prog_fd = -1, key = 0, err;
union bpf_attr attr; union bpf_attr attr;
@@ -302,7 +304,7 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
goto out; goto out;
} }
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, prog_load_attr_sz);
attr.prog_type = BPF_PROG_TYPE_SYSCALL; attr.prog_type = BPF_PROG_TYPE_SYSCALL;
attr.insns = (long) opts->insns; attr.insns = (long) opts->insns;
attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn); attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
@@ -313,18 +315,18 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
attr.log_size = opts->ctx->log_size; attr.log_size = opts->ctx->log_size;
attr.log_buf = opts->ctx->log_buf; attr.log_buf = opts->ctx->log_buf;
attr.prog_flags = BPF_F_SLEEPABLE; attr.prog_flags = BPF_F_SLEEPABLE;
err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, prog_load_attr_sz);
if (prog_fd < 0) { if (prog_fd < 0) {
opts->errstr = "failed to load loader prog"; opts->errstr = "failed to load loader prog";
set_err; set_err;
goto out; goto out;
} }
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, test_run_attr_sz);
attr.test.prog_fd = prog_fd; attr.test.prog_fd = prog_fd;
attr.test.ctx_in = (long) opts->ctx; attr.test.ctx_in = (long) opts->ctx;
attr.test.ctx_size_in = opts->ctx->sz; attr.test.ctx_size_in = opts->ctx->sz;
err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr)); err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz);
if (err < 0 || (int)attr.test.retval < 0) { if (err < 0 || (int)attr.test.retval < 0) {
opts->errstr = "failed to execute loader prog"; opts->errstr = "failed to execute loader prog";
if (err < 0) { if (err < 0) {

View File

@@ -6,7 +6,6 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h> #include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
/* Below types and maps are internal implementation details of libbpf's USDT /* Below types and maps are internal implementation details of libbpf's USDT
* support and are subjects to change. Also, bpf_usdt_xxx() API helpers should * support and are subjects to change. Also, bpf_usdt_xxx() API helpers should
@@ -30,14 +29,6 @@
#ifndef BPF_USDT_MAX_IP_CNT #ifndef BPF_USDT_MAX_IP_CNT
#define BPF_USDT_MAX_IP_CNT (4 * BPF_USDT_MAX_SPEC_CNT) #define BPF_USDT_MAX_IP_CNT (4 * BPF_USDT_MAX_SPEC_CNT)
#endif #endif
/* We use BPF CO-RE to detect support for BPF cookie from BPF side. This is
* the only dependency on CO-RE, so if it's undesirable, user can override
* BPF_USDT_HAS_BPF_COOKIE to specify whether to BPF cookie is supported or not.
*/
#ifndef BPF_USDT_HAS_BPF_COOKIE
#define BPF_USDT_HAS_BPF_COOKIE \
bpf_core_enum_value_exists(enum bpf_func_id___usdt, BPF_FUNC_get_attach_cookie___usdt)
#endif
enum __bpf_usdt_arg_type { enum __bpf_usdt_arg_type {
BPF_USDT_ARG_CONST, BPF_USDT_ARG_CONST,
@@ -83,15 +74,12 @@ struct {
__type(value, __u32); __type(value, __u32);
} __bpf_usdt_ip_to_spec_id SEC(".maps") __weak; } __bpf_usdt_ip_to_spec_id SEC(".maps") __weak;
/* don't rely on user's BPF code to have latest definition of bpf_func_id */ extern const _Bool LINUX_HAS_BPF_COOKIE __kconfig;
enum bpf_func_id___usdt {
BPF_FUNC_get_attach_cookie___usdt = 0xBAD, /* value doesn't matter */
};
static __always_inline static __always_inline
int __bpf_usdt_spec_id(struct pt_regs *ctx) int __bpf_usdt_spec_id(struct pt_regs *ctx)
{ {
if (!BPF_USDT_HAS_BPF_COOKIE) { if (!LINUX_HAS_BPF_COOKIE) {
long ip = PT_REGS_IP(ctx); long ip = PT_REGS_IP(ctx);
int *spec_id_ptr; int *spec_id_ptr;
@@ -244,7 +232,7 @@ long bpf_usdt_cookie(struct pt_regs *ctx)
*/ */
#define BPF_USDT(name, args...) \ #define BPF_USDT(name, args...) \
name(struct pt_regs *ctx); \ name(struct pt_regs *ctx); \
static __attribute__((always_inline)) typeof(name(0)) \ static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args); \ ____##name(struct pt_regs *ctx, ##args); \
typeof(name(0)) name(struct pt_regs *ctx) \ typeof(name(0)) name(struct pt_regs *ctx) \
{ \ { \
@@ -253,7 +241,7 @@ typeof(name(0)) name(struct pt_regs *ctx) \
return ____##name(___bpf_usdt_args(args)); \ return ____##name(___bpf_usdt_args(args)); \
_Pragma("GCC diagnostic pop") \ _Pragma("GCC diagnostic pop") \
} \ } \
static __attribute__((always_inline)) typeof(name(0)) \ static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args) ____##name(struct pt_regs *ctx, ##args)
#endif /* __USDT_BPF_H__ */ #endif /* __USDT_BPF_H__ */

View File

@@ -652,11 +652,9 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
* *
* [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
*/ */
usdt_rel_ip = usdt_abs_ip = note.loc_addr; usdt_abs_ip = note.loc_addr;
if (base_addr) { if (base_addr)
usdt_abs_ip += base_addr - note.base_addr; usdt_abs_ip += base_addr - note.base_addr;
usdt_rel_ip += base_addr - note.base_addr;
}
/* When attaching uprobes (which is what USDTs basically are) /* When attaching uprobes (which is what USDTs basically are)
* kernel expects file offset to be specified, not a relative * kernel expects file offset to be specified, not a relative

1
travis-ci Symbolic link
View File

@@ -0,0 +1 @@
ci

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,63 +0,0 @@
#!/bin/bash
set -euo pipefail
source $(cd $(dirname $0) && pwd)/helpers.sh
ARCH=$(uname -m)
STATUS_FILE=/exitstatus
read_lists() {
(for path in "$@"; do
if [[ -s "$path" ]]; then
cat "$path"
fi;
done) | cut -d'#' -f1 | tr -s ' \t\n' ','
}
test_progs() {
if [[ "${KERNEL}" != '4.9.0' ]]; then
travis_fold start test_progs "Testing test_progs"
# "&& true" does not change the return code (it is not executed
# if the Python script fails), but it prevents exiting on a
# failure due to the "set -e".
./test_progs ${BLACKLIST:+-d$BLACKLIST} ${WHITELIST:+-a$WHITELIST} && true
echo "test_progs:$?" >> "${STATUS_FILE}"
travis_fold end test_progs
fi
travis_fold start test_progs-no_alu32 "Testing test_progs-no_alu32"
./test_progs-no_alu32 ${BLACKLIST:+-d$BLACKLIST} ${WHITELIST:+-a$WHITELIST} && true
echo "test_progs-no_alu32:$?" >> "${STATUS_FILE}"
travis_fold end test_progs-no_alu32
}
test_maps() {
travis_fold start test_maps "Testing test_maps"
./test_maps && true
echo "test_maps:$?" >> "${STATUS_FILE}"
travis_fold end test_maps
}
test_verifier() {
travis_fold start test_verifier "Testing test_verifier"
./test_verifier && true
echo "test_verifier:$?" >> "${STATUS_FILE}"
travis_fold end test_verifier
}
travis_fold end vm_init
configs_path=${PROJECT_NAME}/vmtest/configs
BLACKLIST=$(read_lists "$configs_path/blacklist/BLACKLIST-${KERNEL}" "$configs_path/blacklist/BLACKLIST-${KERNEL}.${ARCH}")
WHITELIST=$(read_lists "$configs_path/whitelist/WHITELIST-${KERNEL}" "$configs_path/whitelist/WHITELIST-${KERNEL}.${ARCH}")
cd ${PROJECT_NAME}/selftests/bpf
test_progs
if [[ "${KERNEL}" == 'latest' ]]; then
# test_maps
test_verifier
fi