Compare commits
193 Commits
v0.8.0_net
...
v1.0.1_net
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dac1c4b6a8 | ||
|
|
1714037104 | ||
|
|
d598cb20c7 | ||
|
|
ce321d6fd4 | ||
|
|
0f5b3a10ae | ||
|
|
5859c59e50 | ||
|
|
85f8b7c4dc | ||
|
|
9da0dcb621 | ||
|
|
82c4054376 | ||
|
|
b3a117773d | ||
|
|
fc2577c54c | ||
|
|
0420f75dbc | ||
|
|
aa25f218b4 | ||
|
|
9e9bf46c92 | ||
|
|
28903eb40e | ||
|
|
8138aa78bd | ||
|
|
8ac9773f52 | ||
|
|
b63791cbde | ||
|
|
0ff6d28aec | ||
|
|
861364fa45 | ||
|
|
21ec5ca723 | ||
|
|
255690da57 | ||
|
|
b1753eaf3b | ||
|
|
eeb2bc4061 | ||
|
|
a11587cc01 | ||
|
|
7fb6138fae | ||
|
|
c918b3e724 | ||
|
|
981001bf46 | ||
|
|
ee7d295f83 | ||
|
|
94d69cc07f | ||
|
|
12a41a80c5 | ||
|
|
10a32130e7 | ||
|
|
fad270918d | ||
|
|
c091b07808 | ||
|
|
efd33720cd | ||
|
|
9aedff8d03 | ||
|
|
51e63f7229 | ||
|
|
c53af98d1a | ||
|
|
2c44349e09 | ||
|
|
58361243ec | ||
|
|
c32e1cf948 | ||
|
|
c4f44c7c11 | ||
|
|
a7a525d47a | ||
|
|
cfbd763ef8 | ||
|
|
862b60f205 | ||
|
|
a0325403af | ||
|
|
7436656dbf | ||
|
|
7984737fbf | ||
|
|
a0d1e22c77 | ||
|
|
e58c615210 | ||
|
|
aec0b1cd7d | ||
|
|
a202bd7433 | ||
|
|
ba81a5b778 | ||
|
|
f7cee4152f | ||
|
|
06c4624c8c | ||
|
|
c8f4b9c878 | ||
|
|
079bc8536d | ||
|
|
8be13ee80b | ||
|
|
3db7585378 | ||
|
|
69938da6d7 | ||
|
|
bfdf7653e0 | ||
|
|
d700dcf162 | ||
|
|
c03b9f6d0b | ||
|
|
66b788c1a4 | ||
|
|
e3c2b8a48d | ||
|
|
13a26d78f3 | ||
|
|
6b92311c3a | ||
|
|
6fdbfb00f1 | ||
|
|
45dca19bd2 | ||
|
|
2fe1958ec8 | ||
|
|
cbd9b7e5d8 | ||
|
|
0cc6bfab39 | ||
|
|
41c612167e | ||
|
|
69d537ba0b | ||
|
|
bd1e5cff31 | ||
|
|
3d484ca473 | ||
|
|
c25544735b | ||
|
|
179c7940eb | ||
|
|
f6692dc4e8 | ||
|
|
693de729d0 | ||
|
|
0667206913 | ||
|
|
a2ebd9ceff | ||
|
|
0e43565ad8 | ||
|
|
5b795f7b30 | ||
|
|
3fa2c28d2c | ||
|
|
0fa013e705 | ||
|
|
d8e2c9d965 | ||
|
|
b2d7228d7c | ||
|
|
427f2a0c83 | ||
|
|
8663289b51 | ||
|
|
77e514d626 | ||
|
|
b44b214118 | ||
|
|
610707057a | ||
|
|
7e567b8761 | ||
|
|
1fe0248c61 | ||
|
|
0862e4e54d | ||
|
|
fd6c9d906a | ||
|
|
d56d93baff | ||
|
|
1648fa16b5 | ||
|
|
9b6f4eb157 | ||
|
|
b3fe4be0b3 | ||
|
|
6d5026e434 | ||
|
|
ca60209447 | ||
|
|
b31ca3fa0e | ||
|
|
295a4aae35 | ||
|
|
8498996f9f | ||
|
|
aa13a6ff58 | ||
|
|
bace4782cd | ||
|
|
ab2221de84 | ||
|
|
d8a50bfe35 | ||
|
|
95971ddd48 | ||
|
|
7410ddc0f4 | ||
|
|
1b80b97a30 | ||
|
|
434b56c497 | ||
|
|
d060a88aa5 | ||
|
|
9340d9b650 | ||
|
|
70599f3a1e | ||
|
|
b78c75fcb3 | ||
|
|
f42d136c1c | ||
|
|
812a95fdf7 | ||
|
|
f9f7f2d30a | ||
|
|
25ba007681 | ||
|
|
9bdb296ec6 | ||
|
|
f009af7889 | ||
|
|
62e8af46d2 | ||
|
|
fcd1b668c6 | ||
|
|
0eb12dca7e | ||
|
|
fedeba74b7 | ||
|
|
bf51e3c336 | ||
|
|
d8454ba8ad | ||
|
|
ec3bbc05c0 | ||
|
|
d32e7ea952 | ||
|
|
6abeb4203d | ||
|
|
e28a540c59 | ||
|
|
e8802d6319 | ||
|
|
9476dce6fe | ||
|
|
8ee1202ff4 | ||
|
|
7013b92fef | ||
|
|
20f0330235 | ||
|
|
29869d6ef0 | ||
|
|
72dbaf2ac3 | ||
|
|
bc3673cdd5 | ||
|
|
78909b8caf | ||
|
|
ec718073b0 | ||
|
|
9c73b6d422 | ||
|
|
0c84902331 | ||
|
|
4cb682229d | ||
|
|
0304a3c027 | ||
|
|
a459010926 | ||
|
|
e5ff285a44 | ||
|
|
2d91c46d1a | ||
|
|
d3e41fc1aa | ||
|
|
645500dd7d | ||
|
|
5497411f48 | ||
|
|
74b22b6c8a | ||
|
|
416351822c | ||
|
|
3f9d041e19 | ||
|
|
a945df2439 | ||
|
|
f429a582bf | ||
|
|
25238de149 | ||
|
|
c3f8eecb16 | ||
|
|
25fd7a1cf5 | ||
|
|
0167a88355 | ||
|
|
23e3d8cf31 | ||
|
|
9a976c6b98 | ||
|
|
e93b1010f3 | ||
|
|
76fc1ad6d5 | ||
|
|
33c5f2bec3 | ||
|
|
d4998cbb6c | ||
|
|
eb1d1ad83f | ||
|
|
8aa946389d | ||
|
|
ad0783c430 | ||
|
|
55638904af | ||
|
|
a5d75daa8c | ||
|
|
37218f49fa | ||
|
|
bdbce77631 | ||
|
|
242c116f04 | ||
|
|
4d9cd51e7e | ||
|
|
f035838503 | ||
|
|
7ed5bf8f4c | ||
|
|
1a0f5d1c87 | ||
|
|
c68a2738fd | ||
|
|
97009215cb | ||
|
|
4c39a3e1aa | ||
|
|
cb11988cf4 | ||
|
|
7e8d4234ac | ||
|
|
00f40c01fb | ||
|
|
881eba7ef5 | ||
|
|
4eb6485c08 | ||
|
|
eaf9123419 | ||
|
|
cc904c1a74 | ||
|
|
f3b96c873d | ||
|
|
47595c2f08 |
5
.github/actions/build-selftests/action.yml
vendored
@@ -18,9 +18,10 @@ runs:
|
||||
steps:
|
||||
- shell: bash
|
||||
run: |
|
||||
echo "::group::Setup Env"
|
||||
source $GITHUB_ACTION_PATH/../../../ci/vmtest/helpers.sh
|
||||
foldable start "Setup Env"
|
||||
sudo apt-get install -y qemu-kvm zstd binutils-dev elfutils libcap-dev libelf-dev libdw-dev python3-docutils
|
||||
echo "::endgroup::"
|
||||
foldable end
|
||||
- shell: bash
|
||||
run: |
|
||||
export KERNEL=${{ inputs.kernel }}
|
||||
|
||||
@@ -6,9 +6,9 @@ THISDIR="$(cd $(dirname $0) && pwd)"
|
||||
|
||||
source ${THISDIR}/helpers.sh
|
||||
|
||||
travis_fold start prepare_selftests "Building selftests"
|
||||
foldable start prepare_selftests "Building selftests"
|
||||
|
||||
LLVM_VER=15
|
||||
LLVM_VER=16
|
||||
LIBBPF_PATH="${REPO_ROOT}"
|
||||
|
||||
PREPARE_SELFTESTS_SCRIPT=${THISDIR}/prepare_selftests-${KERNEL}.sh
|
||||
@@ -39,4 +39,4 @@ cd ${LIBBPF_PATH}
|
||||
rm selftests/bpf/.gitignore
|
||||
git add selftests
|
||||
|
||||
travis_fold end prepare_selftests
|
||||
foldable end prepare_selftests
|
||||
|
||||
20
.github/actions/build-selftests/helpers.sh
vendored
@@ -1,26 +1,18 @@
|
||||
# $1 - start or end
|
||||
# $2 - fold identifier, no spaces
|
||||
# $3 - fold section description
|
||||
travis_fold() {
|
||||
foldable() {
|
||||
local YELLOW='\033[1;33m'
|
||||
local NOCOLOR='\033[0m'
|
||||
if [ -z ${GITHUB_WORKFLOW+x} ]; then
|
||||
echo travis_fold:$1:$2
|
||||
if [ $1 = "start" ]; then
|
||||
line="::group::$2"
|
||||
if [ ! -z "${3:-}" ]; then
|
||||
echo -e "${YELLOW}$3${NOCOLOR}"
|
||||
line="$line - ${YELLOW}$3${NOCOLOR}"
|
||||
fi
|
||||
echo
|
||||
else
|
||||
if [ $1 = "start" ]; then
|
||||
line="::group::$2"
|
||||
if [ ! -z "${3:-}" ]; then
|
||||
line="$line - ${YELLOW}$3${NOCOLOR}"
|
||||
fi
|
||||
else
|
||||
line="::endgroup::"
|
||||
fi
|
||||
echo -e "$line"
|
||||
line="::endgroup::"
|
||||
fi
|
||||
echo -e "$line"
|
||||
}
|
||||
|
||||
__print() {
|
||||
|
||||
143535
.github/actions/build-selftests/vmlinux.h
vendored
25
.github/actions/vmtest/action.yml
vendored
@@ -16,7 +16,7 @@ inputs:
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
# setup envinronment
|
||||
# setup environment
|
||||
- name: Setup environment
|
||||
uses: libbpf/ci/setup-build-env@master
|
||||
with:
|
||||
@@ -41,23 +41,26 @@ runs:
|
||||
- name: Prepare to build BPF selftests
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Prepare buidling selftest"
|
||||
source $GITHUB_ACTION_PATH/../../../ci/vmtest/helpers.sh
|
||||
foldable start "Prepare building selftest"
|
||||
cd .kernel
|
||||
cp ${{ github.workspace }}/travis-ci/vmtest/configs/config-latest.${{ inputs.arch }} .config
|
||||
cat tools/testing/selftests/bpf/config \
|
||||
tools/testing/selftests/bpf/config.${{ inputs.arch }} > .config
|
||||
make olddefconfig && make prepare
|
||||
cd -
|
||||
echo "::endgroup::"
|
||||
foldable end
|
||||
# 2. if kernel == LATEST, build kernel image from tree
|
||||
- name: Build kernel image
|
||||
if: ${{ inputs.kernel == 'LATEST' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Build Kernel Image"
|
||||
source $GITHUB_ACTION_PATH/../../../ci/vmtest/helpers.sh
|
||||
foldable start "Build Kernel Image"
|
||||
cd .kernel
|
||||
make -j $((4*$(nproc))) all > /dev/null
|
||||
cp vmlinux ${{ github.workspace }}
|
||||
cd -
|
||||
echo "::endgroup::"
|
||||
foldable end
|
||||
# else, just download prebuilt kernel image
|
||||
- name: Download prebuilt kernel
|
||||
if: ${{ inputs.kernel != 'LATEST' }}
|
||||
@@ -75,13 +78,19 @@ runs:
|
||||
- name: prepare rootfs
|
||||
uses: libbpf/ci/prepare-rootfs@master
|
||||
with:
|
||||
kernel: ${{ inputs.kernel }}
|
||||
project-name: 'libbpf'
|
||||
arch: ${{ inputs.arch }}
|
||||
kernel: ${{ inputs.kernel }}
|
||||
kernel-root: '.kernel'
|
||||
image-output: '/tmp/root.img'
|
||||
# 5. run selftest in QEMU
|
||||
- name: Run selftests
|
||||
env:
|
||||
KERNEL: ${{ inputs.kernel }}
|
||||
REPO_ROOT: ${{ github.workspace }}
|
||||
uses: libbpf/ci/run-qemu@master
|
||||
with:
|
||||
arch: ${{ inputs.arch }}
|
||||
img: '/tmp/root.img'
|
||||
vmlinuz: 'vmlinuz'
|
||||
arch: ${{ inputs.arch }}
|
||||
kernel-root: '.kernel'
|
||||
|
||||
4
.github/workflows/build.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
- name: gcc-10 ASan+UBSan
|
||||
target: RUN_GCC10_ASAN
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
name: Checkout
|
||||
- uses: ./.github/actions/setup
|
||||
name: Setup
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
- arch: s390x
|
||||
- arch: x86
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
name: Checkout
|
||||
- uses: ./.github/actions/setup
|
||||
name: Pre-Setup
|
||||
|
||||
7
.github/workflows/coverity.yml
vendored
@@ -11,16 +11,17 @@ jobs:
|
||||
if: github.repository == 'libbpf/libbpf'
|
||||
name: Coverity
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/actions/setup
|
||||
- name: Run coverity
|
||||
run: |
|
||||
echo ::group::Setup CI env
|
||||
source "${GITHUB_WORKSPACE}"/ci/vmtest/helpers.sh
|
||||
foldable start "Setup CI env"
|
||||
source /tmp/ci_setup
|
||||
export COVERITY_SCAN_NOTIFICATION_EMAIL="${AUTHOR_EMAIL}"
|
||||
export COVERITY_SCAN_BRANCH_PATTERN=${GITHUB_REF##refs/*/}
|
||||
export TRAVIS_BRANCH=${COVERITY_SCAN_BRANCH_PATTERN}
|
||||
echo ::endgroup::
|
||||
foldable end
|
||||
scripts/coverity.sh
|
||||
env:
|
||||
COVERITY_SCAN_TOKEN: ${{ secrets.COVERITY_SCAN_TOKEN }}
|
||||
|
||||
2
.github/workflows/ondemand.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: vmtest with customized pahole/Kernel
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/actions/setup
|
||||
- uses: ./.github/actions/vmtest
|
||||
with:
|
||||
|
||||
2
.github/workflows/pahole.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
env:
|
||||
STAGING: tmp.master
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/actions/setup
|
||||
- uses: ./.github/actions/vmtest
|
||||
with:
|
||||
|
||||
2
.github/workflows/test.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
runs_on: z15
|
||||
arch: 's390x'
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
name: Checkout
|
||||
- uses: ./.github/actions/setup
|
||||
name: Setup
|
||||
|
||||
130
.travis.yml
@@ -1,130 +0,0 @@
|
||||
sudo: required
|
||||
language: bash
|
||||
dist: focal
|
||||
services:
|
||||
- docker
|
||||
|
||||
env:
|
||||
global:
|
||||
- PROJECT_NAME='libbpf'
|
||||
- AUTHOR_EMAIL="$(git log -1 --pretty=\"%aE\")"
|
||||
- REPO_ROOT="$TRAVIS_BUILD_DIR"
|
||||
- CI_ROOT="$REPO_ROOT/travis-ci"
|
||||
- VMTEST_ROOT="$CI_ROOT/vmtest"
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- qemu-kvm
|
||||
- zstd
|
||||
- binutils-dev
|
||||
- elfutils
|
||||
- libcap-dev
|
||||
- libelf-dev
|
||||
- libdw-dev
|
||||
|
||||
stages:
|
||||
# Run Coverity periodically instead of for each PR for following reasons:
|
||||
# 1) Coverity jobs are heavily rate-limited
|
||||
# 2) Due to security restrictions of encrypted environment variables
|
||||
# in Travis CI, pull requests made from forks can't access encrypted
|
||||
# env variables, making Coverity unusable
|
||||
# See: https://docs.travis-ci.com/user/pull-requests#pull-requests-and-security-restrictions
|
||||
- name: Coverity
|
||||
if: type = cron
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- stage: Builds & Tests
|
||||
name: Kernel 5.5.0 + selftests
|
||||
language: bash
|
||||
env: KERNEL=5.5.0
|
||||
script: $CI_ROOT/vmtest/run_vmtest.sh || travis_terminate 1
|
||||
|
||||
- name: Kernel LATEST + selftests
|
||||
language: bash
|
||||
env: KERNEL=LATEST
|
||||
script: $CI_ROOT/vmtest/run_vmtest.sh || travis_terminate 1
|
||||
|
||||
- name: Kernel 4.9.0 + selftests
|
||||
language: bash
|
||||
env: KERNEL=4.9.0
|
||||
script: $CI_ROOT/vmtest/run_vmtest.sh || travis_terminate 1
|
||||
|
||||
- name: Debian Build
|
||||
language: bash
|
||||
install: $CI_ROOT/managers/debian.sh SETUP
|
||||
script: $CI_ROOT/managers/debian.sh RUN || travis_terminate 1
|
||||
after_script: $CI_ROOT/managers/debian.sh CLEANUP
|
||||
|
||||
- name: Debian Build (ASan+UBSan)
|
||||
language: bash
|
||||
install: $CI_ROOT/managers/debian.sh SETUP
|
||||
script: $CI_ROOT/managers/debian.sh RUN_ASAN || travis_terminate 1
|
||||
after_script: $CI_ROOT/managers/debian.sh CLEANUP
|
||||
|
||||
- name: Debian Build (clang)
|
||||
language: bash
|
||||
install: $CI_ROOT/managers/debian.sh SETUP
|
||||
script: $CI_ROOT/managers/debian.sh RUN_CLANG || travis_terminate 1
|
||||
after_script: $CI_ROOT/managers/debian.sh CLEANUP
|
||||
|
||||
- name: Debian Build (clang ASan+UBSan)
|
||||
language: bash
|
||||
install: $CI_ROOT/managers/debian.sh SETUP
|
||||
script: $CI_ROOT/managers/debian.sh RUN_CLANG_ASAN || travis_terminate 1
|
||||
after_script: $CI_ROOT/managers/debian.sh CLEANUP
|
||||
|
||||
- name: Debian Build (gcc-10)
|
||||
language: bash
|
||||
install: $CI_ROOT/managers/debian.sh SETUP
|
||||
script: $CI_ROOT/managers/debian.sh RUN_GCC10 || travis_terminate 1
|
||||
after_script: $CI_ROOT/managers/debian.sh CLEANUP
|
||||
|
||||
- name: Debian Build (gcc-10 ASan+UBSan)
|
||||
language: bash
|
||||
install: $CI_ROOT/managers/debian.sh SETUP
|
||||
script: $CI_ROOT/managers/debian.sh RUN_GCC10_ASAN || travis_terminate 1
|
||||
after_script: $CI_ROOT/managers/debian.sh CLEANUP
|
||||
|
||||
- name: Ubuntu Focal Build
|
||||
language: bash
|
||||
script: sudo $CI_ROOT/managers/ubuntu.sh || travis_terminate 1
|
||||
|
||||
- name: Ubuntu Focal Build (arm)
|
||||
arch: arm64
|
||||
language: bash
|
||||
script: sudo $CI_ROOT/managers/ubuntu.sh || travis_terminate 1
|
||||
|
||||
- name: Ubuntu Focal Build (s390x)
|
||||
arch: s390x
|
||||
language: bash
|
||||
script: sudo $CI_ROOT/managers/ubuntu.sh || travis_terminate 1
|
||||
|
||||
- name: Ubuntu Focal Build (ppc64le)
|
||||
arch: ppc64le
|
||||
language: bash
|
||||
script: sudo $CI_ROOT/managers/ubuntu.sh || travis_terminate 1
|
||||
|
||||
- stage: Coverity
|
||||
language: bash
|
||||
env:
|
||||
# Coverity configuration
|
||||
# COVERITY_SCAN_TOKEN=xxx
|
||||
# Encrypted using `travis encrypt --repo libbpf/libbpf COVERITY_SCAN_TOKEN=xxx`
|
||||
- secure: "I9OsMRHbb82IUivDp+I+w/jEQFOJgBDAqYqf1ollqCM1QhocxMcS9bwIAgfPhdXi2hohV7sRrVMZstahY67FAvJLGxNopi4tAPDIAaIFxgO0yDxMhaTMx5xDfMwlIm2FOP/9gB9BQsd6M7CmoQZgXYwBIv7xd1ooxoQrh2rOK1YrRl7UQu3+c3zPTjDfIYZzR3bFttMqZ9/c4U0v8Ry5IFXrel3hCshndHA1TtttJrUSrILlZcmVc1ch7JIy6zCbCU/2lGv0B/7rWXfF8MT7O9jPtFOhJ1DEcd2zhw2n4j9YT3a8OhtnM61LA6ask632mwCOsxpFLTun7AzuR1Cb5mdPHsxhxnCHcXXARa2mJjem0QG1NhwxwJE8sbRDapojexxCvweYlEN40ofwMDSnj/qNt95XIcrk0tiIhGFx0gVNWvAdmZwx+N4mwGPMTAN0AEOFjpgI+ZdB89m+tL/CbEgE1flc8QxUxJhcp5OhH6yR0z9qYOp0nXIbHsIaCiRvt/7LqFRQfheifztWVz4mdQlCdKS9gcOQ09oKicPevKO1L0Ue3cb7Ug7jOpMs+cdh3XokJtUeYEr1NijMHT9+CTAhhO5RToWXIZRon719z3fwoUBNDREATwVFMlVxqSO/pbYgaKminigYbl785S89YYaZ6E5UvaKRHM6KHKMDszs="
|
||||
- COVERITY_SCAN_PROJECT_NAME="libbpf"
|
||||
- COVERITY_SCAN_NOTIFICATION_EMAIL="${AUTHOR_EMAIL}"
|
||||
- COVERITY_SCAN_BRANCH_PATTERN="$TRAVIS_BRANCH"
|
||||
# Note: `make -C src/` as a BUILD_COMMAND will not work here
|
||||
- COVERITY_SCAN_BUILD_COMMAND_PREPEND="cd src/"
|
||||
- COVERITY_SCAN_BUILD_COMMAND="make"
|
||||
install:
|
||||
- sudo echo 'deb-src http://archive.ubuntu.com/ubuntu/ focal main restricted universe multiverse' >>/etc/apt/sources.list
|
||||
- sudo apt-get update
|
||||
- sudo apt-get -y build-dep libelf-dev
|
||||
- sudo apt-get install -y libelf-dev pkg-config
|
||||
script:
|
||||
- scripts/coverity.sh || travis_terminate 1
|
||||
allow_failures:
|
||||
- env: KERNEL=x.x.x
|
||||
@@ -1 +1 @@
|
||||
f3f19f939c11925dadd3f4776f99f8c278a7017b
|
||||
60240bc26114543fcbfcd8a28466e67e77b20388
|
||||
|
||||
@@ -1 +1 @@
|
||||
ac6a65868a5a45db49d5ee8524df3b701110d844
|
||||
87dbdc230d162bf9ee1ac77c8ade178b6b1e199e
|
||||
|
||||
59
README.md
@@ -1,17 +1,32 @@
|
||||
This is a mirror of [bpf-next Linux source
|
||||
tree](https://kernel.googlesource.com/pub/scm/linux/kernel/git/bpf/bpf-next)'s
|
||||
`tools/lib/bpf` directory plus its supporting header files.
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="assets/libbpf-logo-sideways-darkbg.png" width="40%">
|
||||
<img src="assets/libbpf-logo-sideways.png" width="40%">
|
||||
</picture>
|
||||
|
||||
All the gory details of syncing can be found in `scripts/sync-kernel.sh`
|
||||
script.
|
||||
libbpf
|
||||
[](https://github.com/libbpf/libbpf/actions/workflows/test.yml)
|
||||
[](https://lgtm.com/projects/g/libbpf/libbpf/alerts/)
|
||||
[](https://scan.coverity.com/projects/libbpf)
|
||||
[](https://oss-fuzz-build-logs.storage.googleapis.com/index.html#libbpf)
|
||||
======
|
||||
|
||||
Some header files in this repo (`include/linux/*.h`) are reduced versions of
|
||||
their counterpart files at
|
||||
[bpf-next](https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/)'s
|
||||
`tools/include/linux/*.h` to make compilation successful.
|
||||
**This is the official home of the libbpf library.**
|
||||
|
||||
BPF/libbpf usage and questions
|
||||
==============================
|
||||
*Please use this Github repository for building and packaging libbpf
|
||||
and when using it in your projects through Git submodule.*
|
||||
|
||||
Libbpf *authoritative source code* is developed as part of [bpf-next Linux source
|
||||
tree](https://kernel.googlesource.com/pub/scm/linux/kernel/git/bpf/bpf-next) under
|
||||
`tools/lib/bpf` subdirectory and is periodically synced to Github. As such, all the
|
||||
libbpf changes should be sent to [BPF mailing list](http://vger.kernel.org/vger-lists.html#bpf),
|
||||
please don't open PRs here unless you are changing Github-specific parts of libbpf
|
||||
(e.g., Github-specific Makefile).
|
||||
|
||||
Libbpf and general BPF usage questions
|
||||
======================================
|
||||
|
||||
Libbpf documentation can be found [here](https://libbpf.readthedocs.io/en/latest/api.html).
|
||||
It's an ongoing effort and has ways to go, but please take a look and consider contributing as well.
|
||||
|
||||
Please check out [libbpf-bootstrap](https://github.com/libbpf/libbpf-bootstrap)
|
||||
and [the companion blog post](https://nakryiko.com/posts/libbpf-bootstrap/) for
|
||||
@@ -36,12 +51,8 @@ to help you with whatever issue you have. This repository's PRs and issues
|
||||
should be opened only for dealing with issues pertaining to specific way this
|
||||
libbpf mirror repo is set up and organized.
|
||||
|
||||
Build
|
||||
[](https://github.com/libbpf/libbpf/actions/workflows/test.yml)
|
||||
[](https://lgtm.com/projects/g/libbpf/libbpf/alerts/)
|
||||
[](https://scan.coverity.com/projects/libbpf)
|
||||
[](https://oss-fuzz-build-logs.storage.googleapis.com/index.html#libbpf)
|
||||
=====
|
||||
Building libbpf
|
||||
===============
|
||||
libelf is an internal dependency of libbpf and thus it is required to link
|
||||
against and must be installed on the system for applications to work.
|
||||
pkg-config is used by default to find libelf, and the program called can be
|
||||
@@ -146,7 +157,7 @@ Benefits of packaging from the mirror over packaging from kernel sources:
|
||||
gracefully degrading functionality. Thus libbpf is not tied to a specific
|
||||
kernel version and can/should be packaged and versioned independently.
|
||||
- Continuous integration testing via
|
||||
[TravisCI](https://travis-ci.org/libbpf/libbpf).
|
||||
[GitHub Actions](https://github.com/libbpf/libbpf/actions).
|
||||
- Static code analysis via [LGTM](https://lgtm.com/projects/g/libbpf/libbpf)
|
||||
and [Coverity](https://scan.coverity.com/projects/libbpf).
|
||||
|
||||
@@ -156,6 +167,18 @@ Package dependencies of libbpf, package names may vary across distros:
|
||||
|
||||
[](https://repology.org/project/libbpf/versions)
|
||||
|
||||
|
||||
bpf-next to Github sync
|
||||
=======================
|
||||
|
||||
All the gory details of syncing can be found in `scripts/sync-kernel.sh`
|
||||
script.
|
||||
|
||||
Some header files in this repo (`include/linux/*.h`) are reduced versions of
|
||||
their counterpart files at
|
||||
[bpf-next](https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/)'s
|
||||
`tools/include/linux/*.h` to make compilation successful.
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
|
||||
BIN
assets/libbpf-logo-compact-darkbg.png
Normal file
|
After Width: | Height: | Size: 262 KiB |
BIN
assets/libbpf-logo-compact-mono.png
Normal file
|
After Width: | Height: | Size: 128 KiB |
BIN
assets/libbpf-logo-compact.png
Normal file
|
After Width: | Height: | Size: 116 KiB |
BIN
assets/libbpf-logo-sideways-darkbg.png
Normal file
|
After Width: | Height: | Size: 284 KiB |
BIN
assets/libbpf-logo-sideways-mono.png
Normal file
|
After Width: | Height: | Size: 142 KiB |
BIN
assets/libbpf-logo-sideways.png
Normal file
|
After Width: | Height: | Size: 140 KiB |
BIN
assets/libbpf-logo-sparse-darkbg.png
Normal file
|
After Width: | Height: | Size: 352 KiB |
BIN
assets/libbpf-logo-sparse-mono.png
Normal file
|
After Width: | Height: | Size: 206 KiB |
BIN
assets/libbpf-logo-sparse.png
Normal file
|
After Width: | Height: | Size: 236 KiB |
@@ -7,7 +7,8 @@ ENV_VARS="${ENV_VARS:-}"
|
||||
DOCKER_RUN="${DOCKER_RUN:-docker run}"
|
||||
REPO_ROOT="${REPO_ROOT:-$PWD}"
|
||||
ADDITIONAL_DEPS=(clang pkg-config gcc-10)
|
||||
CFLAGS="-g -O2 -Werror -Wall"
|
||||
EXTRA_CFLAGS=""
|
||||
EXTRA_LDFLAGS=""
|
||||
|
||||
function info() {
|
||||
echo -e "\033[33;1m$1\033[0m"
|
||||
@@ -55,17 +56,17 @@ for phase in "${PHASES[@]}"; do
|
||||
elif [[ "$phase" = *"GCC10"* ]]; then
|
||||
ENV_VARS="-e CC=gcc-10 -e CXX=g++-10"
|
||||
CC="gcc-10"
|
||||
CFLAGS="${CFLAGS} -Wno-stringop-truncation"
|
||||
else
|
||||
CFLAGS="${CFLAGS} -Wno-stringop-truncation"
|
||||
EXTRA_CFLAGS="${EXTRA_CFLAGS} -Wno-stringop-truncation"
|
||||
fi
|
||||
if [[ "$phase" = *"ASAN"* ]]; then
|
||||
CFLAGS="${CFLAGS} -fsanitize=address,undefined"
|
||||
EXTRA_CFLAGS="${EXTRA_CFLAGS} -fsanitize=address,undefined"
|
||||
EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -fsanitize=address,undefined"
|
||||
fi
|
||||
docker_exec mkdir build install
|
||||
docker_exec ${CC} --version
|
||||
info "build"
|
||||
docker_exec make -j$((4*$(nproc))) CFLAGS="${CFLAGS}" -C ./src -B OBJDIR=../build
|
||||
docker_exec make -j$((4*$(nproc))) EXTRA_CFLAGS="${EXTRA_CFLAGS}" EXTRA_LDFLAGS="${EXTRA_LDFLAGS}" -C ./src -B OBJDIR=../build
|
||||
info "ldd build/libbpf.so:"
|
||||
docker_exec ldd build/libbpf.so
|
||||
if ! docker_exec ldd build/libbpf.so | grep -q libelf; then
|
||||
@@ -75,7 +76,7 @@ for phase in "${PHASES[@]}"; do
|
||||
info "install"
|
||||
docker_exec make -j$((4*$(nproc))) -C src OBJDIR=../build DESTDIR=../install install
|
||||
info "link binary"
|
||||
docker_exec bash -c "CFLAGS=\"${CFLAGS}\" ./travis-ci/managers/test_compile.sh"
|
||||
docker_exec bash -c "EXTRA_CFLAGS=\"${EXTRA_CFLAGS}\" EXTRA_LDFLAGS=\"${EXTRA_LDFLAGS}\" ./ci/managers/test_compile.sh"
|
||||
;;
|
||||
CLEANUP)
|
||||
info "Cleanup phase"
|
||||
15
ci/managers/test_compile.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
set -euox pipefail
|
||||
|
||||
EXTRA_CFLAGS=${EXTRA_CFLAGS:-}
|
||||
EXTRA_LDFLAGS=${EXTRA_LDFLAGS:-}
|
||||
|
||||
cat << EOF > main.c
|
||||
#include <bpf/libbpf.h>
|
||||
int main() {
|
||||
return bpf_object__open(0) < 0;
|
||||
}
|
||||
EOF
|
||||
|
||||
# static linking
|
||||
${CC:-cc} ${EXTRA_CFLAGS} ${EXTRA_LDFLAGS} -o main -I./include/uapi -I./install/usr/include main.c ./build/libbpf.a -lelf -lz
|
||||
@@ -10,14 +10,15 @@ source "$(dirname $0)/travis_wait.bash"
|
||||
|
||||
cd $REPO_ROOT
|
||||
|
||||
CFLAGS="-g -O2 -Werror -Wall -fsanitize=address,undefined -Wno-stringop-truncation"
|
||||
EXTRA_CFLAGS="-Werror -Wall -fsanitize=address,undefined"
|
||||
EXTRA_LDFLAGS="-Werror -Wall -fsanitize=address,undefined"
|
||||
mkdir build install
|
||||
cc --version
|
||||
make -j$((4*$(nproc))) CFLAGS="${CFLAGS}" -C ./src -B OBJDIR=../build
|
||||
make -j$((4*$(nproc))) EXTRA_CFLAGS="${EXTRA_CFLAGS}" EXTRA_LDFLAGS="${EXTRA_LDFLAGS}" -C ./src -B OBJDIR=../build
|
||||
ldd build/libbpf.so
|
||||
if ! ldd build/libbpf.so | grep -q libelf; then
|
||||
echo "FAIL: No reference to libelf.so in libbpf.so!"
|
||||
exit 1
|
||||
fi
|
||||
make -j$((4*$(nproc))) -C src OBJDIR=../build DESTDIR=../install install
|
||||
CFLAGS=${CFLAGS} $(dirname $0)/test_compile.sh
|
||||
EXTRA_CFLAGS=${EXTRA_CFLAGS} EXTRA_LDFLAGS=${EXTRA_LDFLAGS} $(dirname $0)/test_compile.sh
|
||||
@@ -20,7 +20,19 @@ root=$(mktemp -d -p "$PWD")
|
||||
trap 'rm -r "$root"' EXIT
|
||||
|
||||
# Install packages.
|
||||
packages=binutils,busybox,elfutils,ethtool,iproute2,libcap2,libelf1,strace,zlib1g
|
||||
packages=(
|
||||
binutils
|
||||
busybox
|
||||
elfutils
|
||||
ethtool
|
||||
iproute2
|
||||
iptables
|
||||
libcap2
|
||||
libelf1
|
||||
strace
|
||||
zlib1g
|
||||
)
|
||||
packages=$(IFS=, && echo "${packages[*]}")
|
||||
debootstrap --include="$packages" --variant=minbase "$@" bookworm "$root"
|
||||
|
||||
# Remove the init scripts (tests use their own). Also remove various
|
||||
@@ -31,11 +43,6 @@ rm -rf \
|
||||
"$root"/var/cache/apt/archives/* \
|
||||
"$root"/var/lib/apt/lists/*
|
||||
|
||||
# Save some more space by removing coreutils - the tests use busybox. Before
|
||||
# doing that, delete the buggy postrm script, which uses the rm command.
|
||||
rm -f "$root/var/lib/dpkg/info/coreutils.postrm"
|
||||
chroot "$root" dpkg --remove --force-remove-essential coreutils
|
||||
|
||||
# Apply common tweaks.
|
||||
"$(dirname "$0")"/mkrootfs_tweak.sh "$root"
|
||||
|
||||
@@ -36,6 +36,7 @@ $ sudo systemctl daemon-reload
|
||||
$ sudo tee /etc/actions-runner-libbpf
|
||||
repo=<owner>/<name>
|
||||
access_token=<ghp_***>
|
||||
runner_name=<hostname>
|
||||
```
|
||||
|
||||
Access token should have the repo scope, consult
|
||||
@@ -24,7 +24,9 @@ RUN apt-get update && apt-get -y install \
|
||||
rsync \
|
||||
software-properties-common \
|
||||
sudo \
|
||||
tree
|
||||
tree \
|
||||
iproute2 \
|
||||
iputils-ping
|
||||
|
||||
# amd64 dependencies.
|
||||
COPY --from=ld-prefix / /usr/x86_64-linux-gnu/
|
||||
@@ -33,7 +35,7 @@ RUN ln -fs /etc/resolv.conf /usr/x86_64-linux-gnu/etc/
|
||||
ENV QEMU_LD_PREFIX=/usr/x86_64-linux-gnu
|
||||
|
||||
# amd64 Github Actions Runner.
|
||||
ARG version=2.285.0
|
||||
ARG version=2.296.0
|
||||
RUN useradd -m actions-runner
|
||||
RUN echo "actions-runner ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
|
||||
RUN echo "Defaults env_keep += \"DEBIAN_FRONTEND\"" >>/etc/sudoers
|
||||
@@ -7,7 +7,7 @@
|
||||
#
|
||||
# - repo=<owner>/<name>
|
||||
# - access_token=<ghp_***>
|
||||
#
|
||||
# - runner_name=<hostname>
|
||||
|
||||
set -e -u
|
||||
|
||||
@@ -34,6 +34,7 @@ registration_token=$(jq --raw-output .token "$token_file")
|
||||
--url "https://github.com/$repo" \
|
||||
--token "$registration_token" \
|
||||
--labels z15 \
|
||||
--name "$runner_name" \
|
||||
--ephemeral
|
||||
|
||||
# Run one job.
|
||||
@@ -1,4 +1,4 @@
|
||||
attach_probe
|
||||
# attach_probe
|
||||
autoload
|
||||
bpf_verif_scale
|
||||
cgroup_attach_autodetach
|
||||
@@ -10,7 +10,6 @@ core_reloc
|
||||
core_retro
|
||||
cpu_mask
|
||||
endian
|
||||
fexit_stress
|
||||
get_branch_snapshot
|
||||
get_stackid_cannot_attach
|
||||
global_data
|
||||
@@ -1,5 +1,5 @@
|
||||
# This file is not used and is there for historic purposes only.
|
||||
# See WHITELIST-5.5.0 instead.
|
||||
# See ALLOWLIST-5.5.0 instead.
|
||||
|
||||
# PERMANENTLY DISABLED
|
||||
align # verifier output format changed
|
||||
@@ -71,6 +71,7 @@ sk_lookup # v5.9+
|
||||
sk_storage_tracing # missing bpf_sk_storage_get() helper
|
||||
skb_ctx # ctx_{size, }_{in, out} in BPF_PROG_TEST_RUN is missing
|
||||
skb_helpers # helpers added in 5.8+
|
||||
skeleton # creates too big ARRAY map
|
||||
snprintf # v5.13+
|
||||
snprintf_btf # v5.10+
|
||||
sock_fields # v5.10+
|
||||
0
ci/vmtest/configs/DENYLIST-latest
Normal file
6
ci/vmtest/configs/DENYLIST-latest.s390x
Normal file
@@ -0,0 +1,6 @@
|
||||
# TEMPORARY until bpf-next -> bpf merge
|
||||
lru_bug # prog 'printk': failed to auto-attach: -524
|
||||
|
||||
# TEMPORARY
|
||||
usdt/basic # failing verifier due to bounds check after LLVM update
|
||||
usdt/multispec # same as above
|
||||
@@ -1,26 +1,18 @@
|
||||
# $1 - start or end
|
||||
# $2 - fold identifier, no spaces
|
||||
# $3 - fold section description
|
||||
travis_fold() {
|
||||
foldable() {
|
||||
local YELLOW='\033[1;33m'
|
||||
local NOCOLOR='\033[0m'
|
||||
if [ -z ${GITHUB_WORKFLOW+x} ]; then
|
||||
echo travis_fold:$1:$2
|
||||
if [ $1 = "start" ]; then
|
||||
line="::group::$2"
|
||||
if [ ! -z "${3:-}" ]; then
|
||||
echo -e "${YELLOW}$3${NOCOLOR}"
|
||||
line="$line - ${YELLOW}$3${NOCOLOR}"
|
||||
fi
|
||||
echo
|
||||
else
|
||||
if [ $1 = "start" ]; then
|
||||
line="::group::$2"
|
||||
if [ ! -z "${3:-}" ]; then
|
||||
line="$line - ${YELLOW}$3${NOCOLOR}"
|
||||
fi
|
||||
else
|
||||
line="::endgroup::"
|
||||
fi
|
||||
echo -e "$line"
|
||||
line="::endgroup::"
|
||||
fi
|
||||
echo -e "$line"
|
||||
}
|
||||
|
||||
__print() {
|
||||
87
ci/vmtest/run_selftests.sh
Executable file
@@ -0,0 +1,87 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
source $(cd $(dirname $0) && pwd)/helpers.sh
|
||||
|
||||
ARCH=$(uname -m)
|
||||
|
||||
STATUS_FILE=/exitstatus
|
||||
|
||||
read_lists() {
|
||||
(for path in "$@"; do
|
||||
if [[ -s "$path" ]]; then
|
||||
cat "$path"
|
||||
fi;
|
||||
done) | cut -d'#' -f1 | tr -s ' \t\n' ','
|
||||
}
|
||||
|
||||
test_progs() {
|
||||
if [[ "${KERNEL}" != '4.9.0' ]]; then
|
||||
foldable start test_progs "Testing test_progs"
|
||||
# "&& true" does not change the return code (it is not executed
|
||||
# if the Python script fails), but it prevents exiting on a
|
||||
# failure due to the "set -e".
|
||||
./test_progs ${DENYLIST:+-d$DENYLIST} ${ALLOWLIST:+-a$ALLOWLIST} && true
|
||||
echo "test_progs:$?" >> "${STATUS_FILE}"
|
||||
foldable end test_progs
|
||||
fi
|
||||
}
|
||||
|
||||
test_progs_no_alu32() {
|
||||
foldable start test_progs-no_alu32 "Testing test_progs-no_alu32"
|
||||
./test_progs-no_alu32 ${DENYLIST:+-d$DENYLIST} ${ALLOWLIST:+-a$ALLOWLIST} && true
|
||||
echo "test_progs-no_alu32:$?" >> "${STATUS_FILE}"
|
||||
foldable end test_progs-no_alu32
|
||||
}
|
||||
|
||||
test_maps() {
|
||||
if [[ "${KERNEL}" == 'latest' ]]; then
|
||||
foldable start test_maps "Testing test_maps"
|
||||
./test_maps && true
|
||||
echo "test_maps:$?" >> "${STATUS_FILE}"
|
||||
foldable end test_maps
|
||||
fi
|
||||
}
|
||||
|
||||
test_verifier() {
|
||||
if [[ "${KERNEL}" == 'latest' ]]; then
|
||||
foldable start test_verifier "Testing test_verifier"
|
||||
./test_verifier && true
|
||||
echo "test_verifier:$?" >> "${STATUS_FILE}"
|
||||
foldable end test_verifier
|
||||
fi
|
||||
}
|
||||
|
||||
foldable end vm_init
|
||||
|
||||
configs_path=/${PROJECT_NAME}/selftests/bpf
|
||||
local_configs_path=${PROJECT_NAME}/vmtest/configs
|
||||
DENYLIST=$(read_lists \
|
||||
"$configs_path/DENYLIST" \
|
||||
"$configs_path/DENYLIST.${ARCH}" \
|
||||
"$local_configs_path/DENYLIST-${KERNEL}" \
|
||||
"$local_configs_path/DENYLIST-${KERNEL}.${ARCH}" \
|
||||
)
|
||||
ALLOWLIST=$(read_lists \
|
||||
"$configs_path/ALLOWLIST" \
|
||||
"$configs_path/ALLOWLIST.${ARCH}" \
|
||||
"$local_configs_path/ALLOWLIST-${KERNEL}" \
|
||||
"$local_configs_path/ALLOWLIST-${KERNEL}.${ARCH}" \
|
||||
)
|
||||
|
||||
echo "DENYLIST: ${DENYLIST}"
|
||||
echo "ALLOWLIST: ${ALLOWLIST}"
|
||||
|
||||
cd ${PROJECT_NAME}/selftests/bpf
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
test_progs
|
||||
test_progs_no_alu32
|
||||
test_maps
|
||||
test_verifier
|
||||
else
|
||||
for test_name in "$@"; do
|
||||
"${test_name}"
|
||||
done
|
||||
fi
|
||||
@@ -9,8 +9,8 @@ described here. It's recommended to follow these conventions whenever a
|
||||
new function or type is added to keep libbpf API clean and consistent.
|
||||
|
||||
All types and functions provided by libbpf API should have one of the
|
||||
following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``,
|
||||
``btf_dump_``, ``ring_buffer_``, ``perf_buffer_``.
|
||||
following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``btf_dump_``,
|
||||
``ring_buffer_``, ``perf_buffer_``.
|
||||
|
||||
System call wrappers
|
||||
--------------------
|
||||
@@ -59,15 +59,6 @@ Auxiliary functions and types that don't fit well in any of categories
|
||||
described above should have ``libbpf_`` prefix, e.g.
|
||||
``libbpf_get_error`` or ``libbpf_prog_type_by_name``.
|
||||
|
||||
AF_XDP functions
|
||||
-------------------
|
||||
|
||||
AF_XDP functions should have an ``xsk_`` prefix, e.g.
|
||||
``xsk_umem__get_data`` or ``xsk_umem__create``. The interface consists
|
||||
of both low-level ring access functions and high-level configuration
|
||||
functions. These can be mixed and matched. Note that these functions
|
||||
are not reentrant for performance reasons.
|
||||
|
||||
ABI
|
||||
---
|
||||
|
||||
|
||||
@@ -87,10 +87,29 @@ struct bpf_cgroup_storage_key {
|
||||
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
|
||||
};
|
||||
|
||||
enum bpf_cgroup_iter_order {
|
||||
BPF_CGROUP_ITER_ORDER_UNSPEC = 0,
|
||||
BPF_CGROUP_ITER_SELF_ONLY, /* process only a single object. */
|
||||
BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */
|
||||
BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */
|
||||
BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */
|
||||
};
|
||||
|
||||
union bpf_iter_link_info {
|
||||
struct {
|
||||
__u32 map_fd;
|
||||
} map;
|
||||
struct {
|
||||
enum bpf_cgroup_iter_order order;
|
||||
|
||||
/* At most one of cgroup_fd and cgroup_id can be non-zero. If
|
||||
* both are zero, the walk starts from the default cgroup v2
|
||||
* root. For walking v1 hierarchy, one should always explicitly
|
||||
* specify cgroup_fd.
|
||||
*/
|
||||
__u32 cgroup_fd;
|
||||
__u64 cgroup_id;
|
||||
} cgroup;
|
||||
};
|
||||
|
||||
/* BPF syscall commands, see bpf(2) man-page for more details. */
|
||||
@@ -909,6 +928,7 @@ enum bpf_map_type {
|
||||
BPF_MAP_TYPE_INODE_STORAGE,
|
||||
BPF_MAP_TYPE_TASK_STORAGE,
|
||||
BPF_MAP_TYPE_BLOOM_FILTER,
|
||||
BPF_MAP_TYPE_USER_RINGBUF,
|
||||
};
|
||||
|
||||
/* Note that tracing related programs such as
|
||||
@@ -998,6 +1018,7 @@ enum bpf_attach_type {
|
||||
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
|
||||
BPF_PERF_EVENT,
|
||||
BPF_TRACE_KPROBE_MULTI,
|
||||
BPF_LSM_CGROUP,
|
||||
__MAX_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
@@ -1232,7 +1253,7 @@ enum {
|
||||
|
||||
/* Query effective (directly attached + inherited from ancestor cgroups)
|
||||
* programs that will be executed for events within a cgroup.
|
||||
* attach_flags with this flag are returned only for directly attached programs.
|
||||
* attach_flags with this flag are always returned 0.
|
||||
*/
|
||||
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
|
||||
|
||||
@@ -1431,6 +1452,10 @@ union bpf_attr {
|
||||
__u32 attach_flags;
|
||||
__aligned_u64 prog_ids;
|
||||
__u32 prog_cnt;
|
||||
/* output: per-program attach_flags.
|
||||
* not allowed to be set during effective query.
|
||||
*/
|
||||
__aligned_u64 prog_attach_flags;
|
||||
} query;
|
||||
|
||||
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
|
||||
@@ -2359,7 +2384,8 @@ union bpf_attr {
|
||||
* Pull in non-linear data in case the *skb* is non-linear and not
|
||||
* all of *len* are part of the linear section. Make *len* bytes
|
||||
* from *skb* readable and writable. If a zero value is passed for
|
||||
* *len*, then the whole length of the *skb* is pulled.
|
||||
* *len*, then all bytes in the linear part of *skb* will be made
|
||||
* readable and writable.
|
||||
*
|
||||
* This helper is only needed for reading and writing with direct
|
||||
* packet access.
|
||||
@@ -2570,10 +2596,12 @@ union bpf_attr {
|
||||
* There are two supported modes at this time:
|
||||
*
|
||||
* * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
|
||||
* (room space is added or removed below the layer 2 header).
|
||||
* (room space is added or removed between the layer 2 and
|
||||
* layer 3 headers).
|
||||
*
|
||||
* * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
|
||||
* (room space is added or removed below the layer 3 header).
|
||||
* (room space is added or removed between the layer 3 and
|
||||
* layer 4 headers).
|
||||
*
|
||||
* The following flags are supported at this time:
|
||||
*
|
||||
@@ -3005,8 +3033,18 @@ union bpf_attr {
|
||||
* **BPF_F_USER_STACK**
|
||||
* Collect a user space stack instead of a kernel stack.
|
||||
* **BPF_F_USER_BUILD_ID**
|
||||
* Collect buildid+offset instead of ips for user stack,
|
||||
* only valid if **BPF_F_USER_STACK** is also specified.
|
||||
* Collect (build_id, file_offset) instead of ips for user
|
||||
* stack, only valid if **BPF_F_USER_STACK** is also
|
||||
* specified.
|
||||
*
|
||||
* *file_offset* is an offset relative to the beginning
|
||||
* of the executable or shared object file backing the vma
|
||||
* which the *ip* falls in. It is *not* an offset relative
|
||||
* to that object's base address. Accordingly, it must be
|
||||
* adjusted by adding (sh_addr - sh_offset), where
|
||||
* sh_{addr,offset} correspond to the executable section
|
||||
* containing *file_offset* in the object, for comparisons
|
||||
* to symbols' st_value to be valid.
|
||||
*
|
||||
* **bpf_get_stack**\ () can collect up to
|
||||
* **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
|
||||
@@ -3597,10 +3635,11 @@ union bpf_attr {
|
||||
*
|
||||
* *iph* points to the start of the IPv4 or IPv6 header, while
|
||||
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
|
||||
* **sizeof**\ (**struct ip6hdr**).
|
||||
* **sizeof**\ (**struct ipv6hdr**).
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains **sizeof**\ (**struct tcphdr**).
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
|
||||
* error otherwise.
|
||||
@@ -3783,10 +3822,11 @@ union bpf_attr {
|
||||
*
|
||||
* *iph* points to the start of the IPv4 or IPv6 header, while
|
||||
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
|
||||
* **sizeof**\ (**struct ip6hdr**).
|
||||
* **sizeof**\ (**struct ipv6hdr**).
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header.
|
||||
* contains the length of the TCP header with options (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
* followed by 16 bits which hold the MSS value for that cookie,
|
||||
@@ -4420,7 +4460,7 @@ union bpf_attr {
|
||||
*
|
||||
* **-EEXIST** if the option already exists.
|
||||
*
|
||||
* **-EFAULT** on failrue to parse the existing header options.
|
||||
* **-EFAULT** on failure to parse the existing header options.
|
||||
*
|
||||
* **-EPERM** if the helper cannot be used under the current
|
||||
* *skops*\ **->op**.
|
||||
@@ -4629,7 +4669,7 @@ union bpf_attr {
|
||||
* a *map* with *task* as the **key**. From this
|
||||
* perspective, the usage is not much different from
|
||||
* **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
|
||||
* helper enforces the key must be an task_struct and the map must also
|
||||
* helper enforces the key must be a task_struct and the map must also
|
||||
* be a **BPF_MAP_TYPE_TASK_STORAGE**.
|
||||
*
|
||||
* Underneath, the value is stored locally at *task* instead of
|
||||
@@ -4687,7 +4727,7 @@ union bpf_attr {
|
||||
*
|
||||
* long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size)
|
||||
* Description
|
||||
* Returns the stored IMA hash of the *inode* (if it's avaialable).
|
||||
* Returns the stored IMA hash of the *inode* (if it's available).
|
||||
* If the hash is larger than *size*, then only *size*
|
||||
* bytes will be copied to *dst*
|
||||
* Return
|
||||
@@ -4711,12 +4751,12 @@ union bpf_attr {
|
||||
*
|
||||
* The argument *len_diff* can be used for querying with a planned
|
||||
* size change. This allows to check MTU prior to changing packet
|
||||
* ctx. Providing an *len_diff* adjustment that is larger than the
|
||||
* ctx. Providing a *len_diff* adjustment that is larger than the
|
||||
* actual packet size (resulting in negative packet size) will in
|
||||
* principle not exceed the MTU, why it is not considered a
|
||||
* failure. Other BPF-helpers are needed for performing the
|
||||
* planned size change, why the responsability for catch a negative
|
||||
* packet size belong in those helpers.
|
||||
* principle not exceed the MTU, which is why it is not considered
|
||||
* a failure. Other BPF helpers are needed for performing the
|
||||
* planned size change; therefore the responsibility for catching
|
||||
* a negative packet size belongs in those helpers.
|
||||
*
|
||||
* Specifying *ifindex* zero means the MTU check is performed
|
||||
* against the current net device. This is practical if this isn't
|
||||
@@ -4914,6 +4954,7 @@ union bpf_attr {
|
||||
* Get address of the traced function (for tracing and kprobe programs).
|
||||
* Return
|
||||
* Address of the traced function.
|
||||
* 0 for kprobes placed within the function (not at the entry).
|
||||
*
|
||||
* u64 bpf_get_attach_cookie(void *ctx)
|
||||
* Description
|
||||
@@ -5043,12 +5084,12 @@ union bpf_attr {
|
||||
*
|
||||
* long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
|
||||
* Description
|
||||
* Get **n**-th argument (zero based) of the traced function (for tracing programs)
|
||||
* Get **n**-th argument register (zero based) of the traced function (for tracing programs)
|
||||
* returned in **value**.
|
||||
*
|
||||
* Return
|
||||
* 0 on success.
|
||||
* **-EINVAL** if n >= arguments count of traced function.
|
||||
* **-EINVAL** if n >= argument register count of traced function.
|
||||
*
|
||||
* long bpf_get_func_ret(void *ctx, u64 *value)
|
||||
* Description
|
||||
@@ -5061,24 +5102,37 @@ union bpf_attr {
|
||||
*
|
||||
* long bpf_get_func_arg_cnt(void *ctx)
|
||||
* Description
|
||||
* Get number of arguments of the traced function (for tracing programs).
|
||||
* Get number of registers of the traced function (for tracing programs) where
|
||||
* function arguments are stored in these registers.
|
||||
*
|
||||
* Return
|
||||
* The number of arguments of the traced function.
|
||||
* The number of argument registers of the traced function.
|
||||
*
|
||||
* int bpf_get_retval(void)
|
||||
* Description
|
||||
* Get the syscall's return value that will be returned to userspace.
|
||||
* Get the BPF program's return value that will be returned to the upper layers.
|
||||
*
|
||||
* This helper is currently supported by cgroup programs only.
|
||||
* This helper is currently supported by cgroup programs and only by the hooks
|
||||
* where BPF program's return value is returned to the userspace via errno.
|
||||
* Return
|
||||
* The syscall's return value.
|
||||
* The BPF program's return value.
|
||||
*
|
||||
* int bpf_set_retval(int retval)
|
||||
* Description
|
||||
* Set the syscall's return value that will be returned to userspace.
|
||||
* Set the BPF program's return value that will be returned to the upper layers.
|
||||
*
|
||||
* This helper is currently supported by cgroup programs and only by the hooks
|
||||
* where BPF program's return value is returned to the userspace via errno.
|
||||
*
|
||||
* Note that there is the following corner case where the program exports an error
|
||||
* via bpf_set_retval but signals success via 'return 1':
|
||||
*
|
||||
* bpf_set_retval(-EPERM);
|
||||
* return 1;
|
||||
*
|
||||
* In this case, the BPF program's return value will use helper's -EPERM. This
|
||||
* still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case.
|
||||
*
|
||||
* This helper is currently supported by cgroup programs only.
|
||||
* Return
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*
|
||||
@@ -5172,6 +5226,209 @@ union bpf_attr {
|
||||
* Return
|
||||
* Map value associated to *key* on *cpu*, or **NULL** if no entry
|
||||
* was found or *cpu* is invalid.
|
||||
*
|
||||
* struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
|
||||
* Description
|
||||
* Get a dynptr to local memory *data*.
|
||||
*
|
||||
* *data* must be a ptr to a map value.
|
||||
* The maximum *size* supported is DYNPTR_MAX_SIZE.
|
||||
* *flags* is currently unused.
|
||||
* Return
|
||||
* 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
|
||||
* -EINVAL if flags is not 0.
|
||||
*
|
||||
* long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
|
||||
* Description
|
||||
* Reserve *size* bytes of payload in a ring buffer *ringbuf*
|
||||
* through the dynptr interface. *flags* must be 0.
|
||||
*
|
||||
* Please note that a corresponding bpf_ringbuf_submit_dynptr or
|
||||
* bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
|
||||
* reservation fails. This is enforced by the verifier.
|
||||
* Return
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*
|
||||
* void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
|
||||
* Description
|
||||
* Submit reserved ring buffer sample, pointed to by *data*,
|
||||
* through the dynptr interface. This is a no-op if the dynptr is
|
||||
* invalid/null.
|
||||
*
|
||||
* For more information on *flags*, please see
|
||||
* 'bpf_ringbuf_submit'.
|
||||
* Return
|
||||
* Nothing. Always succeeds.
|
||||
*
|
||||
* void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
|
||||
* Description
|
||||
* Discard reserved ring buffer sample through the dynptr
|
||||
* interface. This is a no-op if the dynptr is invalid/null.
|
||||
*
|
||||
* For more information on *flags*, please see
|
||||
* 'bpf_ringbuf_discard'.
|
||||
* Return
|
||||
* Nothing. Always succeeds.
|
||||
*
|
||||
* long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags)
|
||||
* Description
|
||||
* Read *len* bytes from *src* into *dst*, starting from *offset*
|
||||
* into *src*.
|
||||
* *flags* is currently unused.
|
||||
* Return
|
||||
* 0 on success, -E2BIG if *offset* + *len* exceeds the length
|
||||
* of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
|
||||
* *flags* is not 0.
|
||||
*
|
||||
* long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
|
||||
* Description
|
||||
* Write *len* bytes from *src* into *dst*, starting from *offset*
|
||||
* into *dst*.
|
||||
* *flags* is currently unused.
|
||||
* Return
|
||||
* 0 on success, -E2BIG if *offset* + *len* exceeds the length
|
||||
* of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
|
||||
* is a read-only dynptr or if *flags* is not 0.
|
||||
*
|
||||
* void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)
|
||||
* Description
|
||||
* Get a pointer to the underlying dynptr data.
|
||||
*
|
||||
* *len* must be a statically known value. The returned data slice
|
||||
* is invalidated whenever the dynptr is invalidated.
|
||||
* Return
|
||||
* Pointer to the underlying dynptr data, NULL if the dynptr is
|
||||
* read-only, if the dynptr is invalid, or if the offset and length
|
||||
* is out of bounds.
|
||||
*
|
||||
* s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len)
|
||||
* Description
|
||||
* Try to issue a SYN cookie for the packet with corresponding
|
||||
* IPv4/TCP headers, *iph* and *th*, without depending on a
|
||||
* listening socket.
|
||||
*
|
||||
* *iph* points to the IPv4 header.
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
* followed by 16 bits which hold the MSS value for that cookie,
|
||||
* and the top 16 bits are unused.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EINVAL** if *th_len* is invalid.
|
||||
*
|
||||
* s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len)
|
||||
* Description
|
||||
* Try to issue a SYN cookie for the packet with corresponding
|
||||
* IPv6/TCP headers, *iph* and *th*, without depending on a
|
||||
* listening socket.
|
||||
*
|
||||
* *iph* points to the IPv6 header.
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
* Return
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
* followed by 16 bits which hold the MSS value for that cookie,
|
||||
* and the top 16 bits are unused.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EINVAL** if *th_len* is invalid.
|
||||
*
|
||||
* **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
|
||||
*
|
||||
* long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th)
|
||||
* Description
|
||||
* Check whether *iph* and *th* contain a valid SYN cookie ACK
|
||||
* without depending on a listening socket.
|
||||
*
|
||||
* *iph* points to the IPv4 header.
|
||||
*
|
||||
* *th* points to the TCP header.
|
||||
* Return
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EACCES** if the SYN cookie is not valid.
|
||||
*
|
||||
* long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th)
|
||||
* Description
|
||||
* Check whether *iph* and *th* contain a valid SYN cookie ACK
|
||||
* without depending on a listening socket.
|
||||
*
|
||||
* *iph* points to the IPv6 header.
|
||||
*
|
||||
* *th* points to the TCP header.
|
||||
* Return
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EACCES** if the SYN cookie is not valid.
|
||||
*
|
||||
* **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
|
||||
*
|
||||
* u64 bpf_ktime_get_tai_ns(void)
|
||||
* Description
|
||||
* A nonsettable system-wide clock derived from wall-clock time but
|
||||
* ignoring leap seconds. This clock does not experience
|
||||
* discontinuities and backwards jumps caused by NTP inserting leap
|
||||
* seconds as CLOCK_REALTIME does.
|
||||
*
|
||||
* See: **clock_gettime**\ (**CLOCK_TAI**)
|
||||
* Return
|
||||
* Current *ktime*.
|
||||
*
|
||||
* long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags)
|
||||
* Description
|
||||
* Drain samples from the specified user ring buffer, and invoke
|
||||
* the provided callback for each such sample:
|
||||
*
|
||||
* long (\*callback_fn)(struct bpf_dynptr \*dynptr, void \*ctx);
|
||||
*
|
||||
* If **callback_fn** returns 0, the helper will continue to try
|
||||
* and drain the next sample, up to a maximum of
|
||||
* BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1,
|
||||
* the helper will skip the rest of the samples and return. Other
|
||||
* return values are not used now, and will be rejected by the
|
||||
* verifier.
|
||||
* Return
|
||||
* The number of drained samples if no error was encountered while
|
||||
* draining samples, or 0 if no samples were present in the ring
|
||||
* buffer. If a user-space producer was epoll-waiting on this map,
|
||||
* and at least one sample was drained, they will receive an event
|
||||
* notification notifying them of available space in the ring
|
||||
* buffer. If the BPF_RB_NO_WAKEUP flag is passed to this
|
||||
* function, no wakeup notification will be sent. If the
|
||||
* BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will
|
||||
* be sent even if no sample was drained.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EBUSY** if the ring buffer is contended, and another calling
|
||||
* context was concurrently draining the ring buffer.
|
||||
*
|
||||
* **-EINVAL** if user-space is not properly tracking the ring
|
||||
* buffer due to the producer position not being aligned to 8
|
||||
* bytes, a sample not being aligned to 8 bytes, or the producer
|
||||
* position not matching the advertised length of a sample.
|
||||
*
|
||||
* **-E2BIG** if user-space has tried to publish a sample which is
|
||||
* larger than the size of the ring buffer, or which cannot fit
|
||||
* within a struct bpf_dynptr.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
@@ -5370,6 +5627,20 @@ union bpf_attr {
|
||||
FN(ima_file_hash), \
|
||||
FN(kptr_xchg), \
|
||||
FN(map_lookup_percpu_elem), \
|
||||
FN(skc_to_mptcp_sock), \
|
||||
FN(dynptr_from_mem), \
|
||||
FN(ringbuf_reserve_dynptr), \
|
||||
FN(ringbuf_submit_dynptr), \
|
||||
FN(ringbuf_discard_dynptr), \
|
||||
FN(dynptr_read), \
|
||||
FN(dynptr_write), \
|
||||
FN(dynptr_data), \
|
||||
FN(tcp_raw_gen_syncookie_ipv4), \
|
||||
FN(tcp_raw_gen_syncookie_ipv6), \
|
||||
FN(tcp_raw_check_syncookie_ipv4), \
|
||||
FN(tcp_raw_check_syncookie_ipv6), \
|
||||
FN(ktime_get_tai_ns), \
|
||||
FN(user_ringbuf_drain), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
@@ -5432,6 +5703,11 @@ enum {
|
||||
BPF_F_SEQ_NUMBER = (1ULL << 3),
|
||||
};
|
||||
|
||||
/* BPF_FUNC_skb_get_tunnel_key flags. */
|
||||
enum {
|
||||
BPF_F_TUNINFO_FLAGS = (1ULL << 4),
|
||||
};
|
||||
|
||||
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
|
||||
* BPF_FUNC_perf_event_read_value flags.
|
||||
*/
|
||||
@@ -5621,7 +5897,10 @@ struct bpf_tunnel_key {
|
||||
};
|
||||
__u8 tunnel_tos;
|
||||
__u8 tunnel_ttl;
|
||||
__u16 tunnel_ext; /* Padding, future use. */
|
||||
union {
|
||||
__u16 tunnel_ext; /* compat */
|
||||
__be16 tunnel_flags;
|
||||
};
|
||||
__u32 tunnel_label;
|
||||
union {
|
||||
__u32 local_ipv4;
|
||||
@@ -5665,6 +5944,11 @@ enum bpf_ret_code {
|
||||
* represented by BPF_REDIRECT above).
|
||||
*/
|
||||
BPF_LWT_REROUTE = 128,
|
||||
/* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR
|
||||
* to indicate that no custom dissection was performed, and
|
||||
* fallback to standard dissector is requested.
|
||||
*/
|
||||
BPF_FLOW_DISSECTOR_CONTINUE = 129,
|
||||
};
|
||||
|
||||
struct bpf_sock {
|
||||
@@ -5910,6 +6194,8 @@ struct bpf_prog_info {
|
||||
__u64 run_cnt;
|
||||
__u64 recursion_misses;
|
||||
__u32 verified_insns;
|
||||
__u32 attach_btf_obj_id;
|
||||
__u32 attach_btf_id;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_map_info {
|
||||
@@ -5961,11 +6247,22 @@ struct bpf_link_info {
|
||||
struct {
|
||||
__aligned_u64 target_name; /* in/out: target_name buffer ptr */
|
||||
__u32 target_name_len; /* in/out: target_name buffer len */
|
||||
|
||||
/* If the iter specific field is 32 bits, it can be put
|
||||
* in the first or second union. Otherwise it should be
|
||||
* put in the second union.
|
||||
*/
|
||||
union {
|
||||
struct {
|
||||
__u32 map_id;
|
||||
} map;
|
||||
};
|
||||
union {
|
||||
struct {
|
||||
__u64 cgroup_id;
|
||||
__u32 order;
|
||||
} cgroup;
|
||||
};
|
||||
} iter;
|
||||
struct {
|
||||
__u32 netns_ino;
|
||||
@@ -6521,6 +6818,11 @@ struct bpf_timer {
|
||||
__u64 :64;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_dynptr {
|
||||
__u64 :64;
|
||||
__u64 :64;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_sysctl {
|
||||
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
|
||||
* Allows 1,2,4-byte read, but no write.
|
||||
@@ -6612,6 +6914,7 @@ enum bpf_core_relo_kind {
|
||||
BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
|
||||
BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
|
||||
BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
|
||||
BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -36,10 +36,10 @@ struct btf_type {
|
||||
* bits 24-28: kind (e.g. int, ptr, array...etc)
|
||||
* bits 29-30: unused
|
||||
* bit 31: kind_flag, currently used by
|
||||
* struct, union and fwd
|
||||
* struct, union, enum, fwd and enum64
|
||||
*/
|
||||
__u32 info;
|
||||
/* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
|
||||
/* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64.
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
@@ -63,7 +63,7 @@ enum {
|
||||
BTF_KIND_ARRAY = 3, /* Array */
|
||||
BTF_KIND_STRUCT = 4, /* Struct */
|
||||
BTF_KIND_UNION = 5, /* Union */
|
||||
BTF_KIND_ENUM = 6, /* Enumeration */
|
||||
BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */
|
||||
BTF_KIND_FWD = 7, /* Forward */
|
||||
BTF_KIND_TYPEDEF = 8, /* Typedef */
|
||||
BTF_KIND_VOLATILE = 9, /* Volatile */
|
||||
@@ -76,6 +76,7 @@ enum {
|
||||
BTF_KIND_FLOAT = 16, /* Floating point */
|
||||
BTF_KIND_DECL_TAG = 17, /* Decl Tag */
|
||||
BTF_KIND_TYPE_TAG = 18, /* Type Tag */
|
||||
BTF_KIND_ENUM64 = 19, /* Enumeration up to 64-bit values */
|
||||
|
||||
NR_BTF_KINDS,
|
||||
BTF_KIND_MAX = NR_BTF_KINDS - 1,
|
||||
@@ -186,4 +187,14 @@ struct btf_decl_tag {
|
||||
__s32 component_idx;
|
||||
};
|
||||
|
||||
/* BTF_KIND_ENUM64 is followed by multiple "struct btf_enum64".
|
||||
* The exact number of btf_enum64 is stored in the vlen (of the
|
||||
* info in "struct btf_type").
|
||||
*/
|
||||
struct btf_enum64 {
|
||||
__u32 name_off;
|
||||
__u32 val_lo32;
|
||||
__u32 val_hi32;
|
||||
};
|
||||
|
||||
#endif /* _UAPI__LINUX_BTF_H__ */
|
||||
|
||||
@@ -348,6 +348,8 @@ enum {
|
||||
IFLA_PARENT_DEV_NAME,
|
||||
IFLA_PARENT_DEV_BUS_NAME,
|
||||
IFLA_GRO_MAX_SIZE,
|
||||
IFLA_TSO_MAX_SIZE,
|
||||
IFLA_TSO_MAX_SEGS,
|
||||
|
||||
__IFLA_MAX
|
||||
};
|
||||
@@ -888,6 +890,7 @@ enum {
|
||||
IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
|
||||
IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
|
||||
IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
|
||||
IFLA_BOND_SLAVE_PRIO,
|
||||
__IFLA_BOND_SLAVE_MAX,
|
||||
};
|
||||
|
||||
|
||||
@@ -301,6 +301,7 @@ enum {
|
||||
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
|
||||
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* { u64 lost; } && PERF_FORMAT_LOST
|
||||
* } && !PERF_FORMAT_GROUP
|
||||
*
|
||||
* { u64 nr;
|
||||
@@ -308,6 +309,7 @@ enum {
|
||||
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
|
||||
* { u64 value;
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* { u64 lost; } && PERF_FORMAT_LOST
|
||||
* } cntr[nr];
|
||||
* } && PERF_FORMAT_GROUP
|
||||
* };
|
||||
@@ -317,8 +319,9 @@ enum perf_event_read_format {
|
||||
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
|
||||
PERF_FORMAT_ID = 1U << 2,
|
||||
PERF_FORMAT_GROUP = 1U << 3,
|
||||
PERF_FORMAT_LOST = 1U << 4,
|
||||
|
||||
PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
|
||||
PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
|
||||
};
|
||||
|
||||
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
|
||||
@@ -491,7 +494,7 @@ struct perf_event_query_bpf {
|
||||
/*
|
||||
* User provided buffer to store program ids
|
||||
*/
|
||||
__u32 ids[0];
|
||||
__u32 ids[];
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -180,7 +180,7 @@ struct tc_u32_sel {
|
||||
|
||||
short hoff;
|
||||
__be32 hmask;
|
||||
struct tc_u32_key keys[0];
|
||||
struct tc_u32_key keys[];
|
||||
};
|
||||
|
||||
struct tc_u32_mark {
|
||||
@@ -192,7 +192,7 @@ struct tc_u32_mark {
|
||||
struct tc_u32_pcnt {
|
||||
__u64 rcnt;
|
||||
__u64 rhit;
|
||||
__u64 kcnts[0];
|
||||
__u64 kcnts[];
|
||||
};
|
||||
|
||||
/* Flags */
|
||||
|
||||
22
src/Makefile
@@ -8,8 +8,8 @@ else
|
||||
msg = @printf ' %-8s %s%s\n' "$(1)" "$(2)" "$(if $(3), $(3))";
|
||||
endif
|
||||
|
||||
LIBBPF_MAJOR_VERSION := 0
|
||||
LIBBPF_MINOR_VERSION := 8
|
||||
LIBBPF_MAJOR_VERSION := 1
|
||||
LIBBPF_MINOR_VERSION := 1
|
||||
LIBBPF_PATCH_VERSION := 0
|
||||
LIBBPF_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).$(LIBBPF_PATCH_VERSION)
|
||||
LIBBPF_MAJMIN_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).0
|
||||
@@ -18,6 +18,15 @@ ifneq ($(LIBBPF_MAJMIN_VERSION), $(LIBBPF_MAP_VERSION))
|
||||
$(error Libbpf release ($(LIBBPF_VERSION)) and map ($(LIBBPF_MAP_VERSION)) versions are out of sync!)
|
||||
endif
|
||||
|
||||
define allow-override
|
||||
$(if $(or $(findstring environment,$(origin $(1))),\
|
||||
$(findstring command line,$(origin $(1)))),,\
|
||||
$(eval $(1) = $(2)))
|
||||
endef
|
||||
|
||||
$(call allow-override,CC,$(CROSS_COMPILE)cc)
|
||||
$(call allow-override,LD,$(CROSS_COMPILE)ld)
|
||||
|
||||
TOPDIR = ..
|
||||
|
||||
INCLUDES := -I. -I$(TOPDIR)/include -I$(TOPDIR)/include/uapi
|
||||
@@ -26,8 +35,9 @@ ALL_CFLAGS := $(INCLUDES)
|
||||
SHARED_CFLAGS += -fPIC -fvisibility=hidden -DSHARED
|
||||
|
||||
CFLAGS ?= -g -O2 -Werror -Wall -std=gnu89
|
||||
ALL_CFLAGS += $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
|
||||
ALL_LDFLAGS += $(LDFLAGS)
|
||||
ALL_CFLAGS += $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 $(EXTRA_CFLAGS)
|
||||
ALL_LDFLAGS += $(LDFLAGS) $(EXTRA_LDFLAGS)
|
||||
|
||||
ifdef NO_PKG_CONFIG
|
||||
ALL_LDFLAGS += -lelf -lz
|
||||
else
|
||||
@@ -40,7 +50,7 @@ OBJDIR ?= .
|
||||
SHARED_OBJDIR := $(OBJDIR)/sharedobjs
|
||||
STATIC_OBJDIR := $(OBJDIR)/staticobjs
|
||||
OBJS := bpf.o btf.o libbpf.o libbpf_errno.o netlink.o \
|
||||
nlattr.o str_error.o libbpf_probes.o bpf_prog_linfo.o xsk.o \
|
||||
nlattr.o str_error.o libbpf_probes.o bpf_prog_linfo.o \
|
||||
btf_dump.o hashmap.o ringbuf.o strset.o linker.o gen_loader.o \
|
||||
relo_core.o usdt.o
|
||||
SHARED_OBJS := $(addprefix $(SHARED_OBJDIR)/,$(OBJS))
|
||||
@@ -54,7 +64,7 @@ ifndef BUILD_STATIC_ONLY
|
||||
VERSION_SCRIPT := libbpf.map
|
||||
endif
|
||||
|
||||
HEADERS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
|
||||
HEADERS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h \
|
||||
bpf_helpers.h bpf_helper_defs.h bpf_tracing.h \
|
||||
bpf_endian.h bpf_core_read.h skel_internal.h libbpf_version.h \
|
||||
usdt.bpf.h
|
||||
|
||||
495
src/bpf.c
@@ -84,9 +84,7 @@ static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
|
||||
return ensure_good_fd(fd);
|
||||
}
|
||||
|
||||
#define PROG_LOAD_ATTEMPTS 5
|
||||
|
||||
static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
|
||||
int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
|
||||
{
|
||||
int fd;
|
||||
|
||||
@@ -107,7 +105,7 @@ static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int
|
||||
*/
|
||||
int probe_memcg_account(void)
|
||||
{
|
||||
const size_t prog_load_attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_EXIT_INSN(),
|
||||
@@ -117,13 +115,13 @@ int probe_memcg_account(void)
|
||||
int prog_fd;
|
||||
|
||||
/* attempt loading freplace trying to use custom BTF */
|
||||
memset(&attr, 0, prog_load_attr_sz);
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
|
||||
attr.insns = ptr_to_u64(insns);
|
||||
attr.insn_cnt = insn_cnt;
|
||||
attr.license = ptr_to_u64("GPL");
|
||||
|
||||
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, prog_load_attr_sz);
|
||||
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
|
||||
if (prog_fd >= 0) {
|
||||
close(prog_fd);
|
||||
return 1;
|
||||
@@ -147,10 +145,6 @@ int bump_rlimit_memlock(void)
|
||||
{
|
||||
struct rlimit rlim;
|
||||
|
||||
/* this the default in libbpf 1.0, but for now user has to opt-in explicitly */
|
||||
if (!(libbpf_mode & LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK))
|
||||
return 0;
|
||||
|
||||
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
|
||||
if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
|
||||
return 0;
|
||||
@@ -187,7 +181,7 @@ int bpf_map_create(enum bpf_map_type map_type,
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
attr.map_type = map_type;
|
||||
if (map_name)
|
||||
if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
|
||||
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
|
||||
attr.key_size = key_size;
|
||||
attr.value_size = value_size;
|
||||
@@ -208,86 +202,6 @@ int bpf_map_create(enum bpf_map_type map_type,
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_map_create_opts, p);
|
||||
|
||||
p.map_flags = create_attr->map_flags;
|
||||
p.numa_node = create_attr->numa_node;
|
||||
p.btf_fd = create_attr->btf_fd;
|
||||
p.btf_key_type_id = create_attr->btf_key_type_id;
|
||||
p.btf_value_type_id = create_attr->btf_value_type_id;
|
||||
p.map_ifindex = create_attr->map_ifindex;
|
||||
if (create_attr->map_type == BPF_MAP_TYPE_STRUCT_OPS)
|
||||
p.btf_vmlinux_value_type_id = create_attr->btf_vmlinux_value_type_id;
|
||||
else
|
||||
p.inner_map_fd = create_attr->inner_map_fd;
|
||||
|
||||
return bpf_map_create(create_attr->map_type, create_attr->name,
|
||||
create_attr->key_size, create_attr->value_size,
|
||||
create_attr->max_entries, &p);
|
||||
}
|
||||
|
||||
int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int value_size, int max_entries,
|
||||
__u32 map_flags, int node)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts);
|
||||
|
||||
opts.map_flags = map_flags;
|
||||
if (node >= 0) {
|
||||
opts.numa_node = node;
|
||||
opts.map_flags |= BPF_F_NUMA_NODE;
|
||||
}
|
||||
|
||||
return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts);
|
||||
}
|
||||
|
||||
int bpf_create_map(enum bpf_map_type map_type, int key_size,
|
||||
int value_size, int max_entries, __u32 map_flags)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
|
||||
|
||||
return bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
|
||||
}
|
||||
|
||||
int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int value_size, int max_entries,
|
||||
__u32 map_flags)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
|
||||
|
||||
return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts);
|
||||
}
|
||||
|
||||
int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int inner_map_fd, int max_entries,
|
||||
__u32 map_flags, int node)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts);
|
||||
|
||||
opts.inner_map_fd = inner_map_fd;
|
||||
opts.map_flags = map_flags;
|
||||
if (node >= 0) {
|
||||
opts.map_flags |= BPF_F_NUMA_NODE;
|
||||
opts.numa_node = node;
|
||||
}
|
||||
|
||||
return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts);
|
||||
}
|
||||
|
||||
int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int inner_map_fd, int max_entries,
|
||||
__u32 map_flags)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts,
|
||||
.inner_map_fd = inner_map_fd,
|
||||
.map_flags = map_flags,
|
||||
);
|
||||
|
||||
return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts);
|
||||
}
|
||||
|
||||
static void *
|
||||
alloc_zero_tailing_info(const void *orecord, __u32 cnt,
|
||||
__u32 actual_rec_size, __u32 expected_rec_size)
|
||||
@@ -313,12 +227,12 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt,
|
||||
return info;
|
||||
}
|
||||
|
||||
DEFAULT_VERSION(bpf_prog_load_v0_6_0, bpf_prog_load, LIBBPF_0.6.0)
|
||||
int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts)
|
||||
int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, fd_array);
|
||||
void *finfo = NULL, *linfo = NULL;
|
||||
const char *func_info, *line_info;
|
||||
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
|
||||
@@ -338,7 +252,7 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
if (attempts == 0)
|
||||
attempts = PROG_LOAD_ATTEMPTS;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
|
||||
attr.prog_type = prog_type;
|
||||
attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
|
||||
@@ -348,7 +262,7 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
|
||||
attr.kern_version = OPTS_GET(opts, kern_version, 0);
|
||||
|
||||
if (prog_name)
|
||||
if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
|
||||
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
|
||||
attr.license = ptr_to_u64(license);
|
||||
|
||||
@@ -401,7 +315,7 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
attr.log_level = log_level;
|
||||
}
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
if (fd >= 0)
|
||||
return fd;
|
||||
|
||||
@@ -441,7 +355,7 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
break;
|
||||
}
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
if (fd >= 0)
|
||||
goto done;
|
||||
}
|
||||
@@ -455,7 +369,7 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
attr.log_size = log_size;
|
||||
attr.log_level = 1;
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
}
|
||||
done:
|
||||
/* free() doesn't affect errno, so we don't need to restore it */
|
||||
@@ -464,218 +378,139 @@ done:
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
__attribute__((alias("bpf_load_program_xattr2")))
|
||||
int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
|
||||
char *log_buf, size_t log_buf_sz);
|
||||
|
||||
static int bpf_load_program_xattr2(const struct bpf_load_program_attr *load_attr,
|
||||
char *log_buf, size_t log_buf_sz)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, p);
|
||||
|
||||
if (!load_attr || !log_buf != !log_buf_sz)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
p.expected_attach_type = load_attr->expected_attach_type;
|
||||
switch (load_attr->prog_type) {
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
p.attach_btf_id = load_attr->attach_btf_id;
|
||||
break;
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
p.attach_btf_id = load_attr->attach_btf_id;
|
||||
p.attach_prog_fd = load_attr->attach_prog_fd;
|
||||
break;
|
||||
default:
|
||||
p.prog_ifindex = load_attr->prog_ifindex;
|
||||
p.kern_version = load_attr->kern_version;
|
||||
}
|
||||
p.log_level = load_attr->log_level;
|
||||
p.log_buf = log_buf;
|
||||
p.log_size = log_buf_sz;
|
||||
p.prog_btf_fd = load_attr->prog_btf_fd;
|
||||
p.func_info_rec_size = load_attr->func_info_rec_size;
|
||||
p.func_info_cnt = load_attr->func_info_cnt;
|
||||
p.func_info = load_attr->func_info;
|
||||
p.line_info_rec_size = load_attr->line_info_rec_size;
|
||||
p.line_info_cnt = load_attr->line_info_cnt;
|
||||
p.line_info = load_attr->line_info;
|
||||
p.prog_flags = load_attr->prog_flags;
|
||||
|
||||
return bpf_prog_load(load_attr->prog_type, load_attr->name, load_attr->license,
|
||||
load_attr->insns, load_attr->insns_cnt, &p);
|
||||
}
|
||||
|
||||
int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
size_t insns_cnt, const char *license,
|
||||
__u32 kern_version, char *log_buf,
|
||||
size_t log_buf_sz)
|
||||
{
|
||||
struct bpf_load_program_attr load_attr;
|
||||
|
||||
memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
|
||||
load_attr.prog_type = type;
|
||||
load_attr.expected_attach_type = 0;
|
||||
load_attr.name = NULL;
|
||||
load_attr.insns = insns;
|
||||
load_attr.insns_cnt = insns_cnt;
|
||||
load_attr.license = license;
|
||||
load_attr.kern_version = kern_version;
|
||||
|
||||
return bpf_load_program_xattr2(&load_attr, log_buf, log_buf_sz);
|
||||
}
|
||||
|
||||
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
size_t insns_cnt, __u32 prog_flags, const char *license,
|
||||
__u32 kern_version, char *log_buf, size_t log_buf_sz,
|
||||
int log_level)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
bump_rlimit_memlock();
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.prog_type = type;
|
||||
attr.insn_cnt = (__u32)insns_cnt;
|
||||
attr.insns = ptr_to_u64(insns);
|
||||
attr.license = ptr_to_u64(license);
|
||||
attr.log_buf = ptr_to_u64(log_buf);
|
||||
attr.log_size = log_buf_sz;
|
||||
attr.log_level = log_level;
|
||||
log_buf[0] = 0;
|
||||
attr.kern_version = kern_version;
|
||||
attr.prog_flags = prog_flags;
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_map_update_elem(int fd, const void *key, const void *value,
|
||||
__u64 flags)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, flags);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_fd = fd;
|
||||
attr.key = ptr_to_u64(key);
|
||||
attr.value = ptr_to_u64(value);
|
||||
attr.flags = flags;
|
||||
|
||||
ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_map_lookup_elem(int fd, const void *key, void *value)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, flags);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_fd = fd;
|
||||
attr.key = ptr_to_u64(key);
|
||||
attr.value = ptr_to_u64(value);
|
||||
|
||||
ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, flags);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_fd = fd;
|
||||
attr.key = ptr_to_u64(key);
|
||||
attr.value = ptr_to_u64(value);
|
||||
attr.flags = flags;
|
||||
|
||||
ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, flags);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_fd = fd;
|
||||
attr.key = ptr_to_u64(key);
|
||||
attr.value = ptr_to_u64(value);
|
||||
|
||||
ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, flags);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_fd = fd;
|
||||
attr.key = ptr_to_u64(key);
|
||||
attr.value = ptr_to_u64(value);
|
||||
attr.flags = flags;
|
||||
|
||||
ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_map_delete_elem(int fd, const void *key)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, flags);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_fd = fd;
|
||||
attr.key = ptr_to_u64(key);
|
||||
|
||||
ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, flags);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_fd = fd;
|
||||
attr.key = ptr_to_u64(key);
|
||||
attr.flags = flags;
|
||||
|
||||
ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_map_get_next_key(int fd, const void *key, void *next_key)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, next_key);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_fd = fd;
|
||||
attr.key = ptr_to_u64(key);
|
||||
attr.next_key = ptr_to_u64(next_key);
|
||||
|
||||
ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_map_freeze(int fd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, map_fd);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_fd = fd;
|
||||
|
||||
ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
@@ -684,13 +519,14 @@ static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
|
||||
__u32 *count,
|
||||
const struct bpf_map_batch_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, batch);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_map_batch_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.batch.map_fd = fd;
|
||||
attr.batch.in_batch = ptr_to_u64(in_batch);
|
||||
attr.batch.out_batch = ptr_to_u64(out_batch);
|
||||
@@ -700,7 +536,7 @@ static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
|
||||
attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0);
|
||||
attr.batch.flags = OPTS_GET(opts, flags, 0);
|
||||
|
||||
ret = sys_bpf(cmd, &attr, sizeof(attr));
|
||||
ret = sys_bpf(cmd, &attr, attr_sz);
|
||||
*count = attr.batch.count;
|
||||
|
||||
return libbpf_err_errno(ret);
|
||||
@@ -739,26 +575,37 @@ int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *co
|
||||
|
||||
int bpf_obj_pin(int fd, const char *pathname)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, file_flags);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.pathname = ptr_to_u64((void *)pathname);
|
||||
attr.bpf_fd = fd;
|
||||
|
||||
ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_obj_get(const char *pathname)
|
||||
{
|
||||
return bpf_obj_get_opts(pathname, NULL);
|
||||
}
|
||||
|
||||
int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, file_flags);
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.pathname = ptr_to_u64((void *)pathname);
|
||||
if (!OPTS_VALID(opts, bpf_obj_get_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.pathname = ptr_to_u64((void *)pathname);
|
||||
attr.file_flags = OPTS_GET(opts, file_flags, 0);
|
||||
|
||||
fd = sys_bpf_fd(BPF_OBJ_GET, &attr, attr_sz);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@@ -776,52 +623,50 @@ int bpf_prog_attach_opts(int prog_fd, int target_fd,
|
||||
enum bpf_attach_type type,
|
||||
const struct bpf_prog_attach_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_prog_attach_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.target_fd = target_fd;
|
||||
attr.attach_bpf_fd = prog_fd;
|
||||
attr.attach_type = type;
|
||||
attr.attach_flags = OPTS_GET(opts, flags, 0);
|
||||
attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
|
||||
|
||||
ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
__attribute__((alias("bpf_prog_attach_opts")))
|
||||
int bpf_prog_attach_xattr(int prog_fd, int target_fd,
|
||||
enum bpf_attach_type type,
|
||||
const struct bpf_prog_attach_opts *opts);
|
||||
|
||||
int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.target_fd = target_fd;
|
||||
attr.attach_type = type;
|
||||
|
||||
ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.target_fd = target_fd;
|
||||
attr.attach_bpf_fd = prog_fd;
|
||||
attr.attach_type = type;
|
||||
|
||||
ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
@@ -829,6 +674,7 @@ int bpf_link_create(int prog_fd, int target_fd,
|
||||
enum bpf_attach_type attach_type,
|
||||
const struct bpf_link_create_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, link_create);
|
||||
__u32 target_btf_id, iter_info_len;
|
||||
union bpf_attr attr;
|
||||
int fd, err;
|
||||
@@ -847,7 +693,7 @@ int bpf_link_create(int prog_fd, int target_fd,
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.link_create.prog_fd = prog_fd;
|
||||
attr.link_create.target_fd = target_fd;
|
||||
attr.link_create.attach_type = attach_type;
|
||||
@@ -891,7 +737,7 @@ int bpf_link_create(int prog_fd, int target_fd,
|
||||
break;
|
||||
}
|
||||
proceed:
|
||||
fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, attr_sz);
|
||||
if (fd >= 0)
|
||||
return fd;
|
||||
/* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
|
||||
@@ -927,134 +773,107 @@ proceed:
|
||||
|
||||
int bpf_link_detach(int link_fd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, link_detach);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.link_detach.link_fd = link_fd;
|
||||
|
||||
ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_LINK_DETACH, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_link_update(int link_fd, int new_prog_fd,
|
||||
const struct bpf_link_update_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, link_update);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_link_update_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.link_update.link_fd = link_fd;
|
||||
attr.link_update.new_prog_fd = new_prog_fd;
|
||||
attr.link_update.flags = OPTS_GET(opts, flags, 0);
|
||||
attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
|
||||
|
||||
ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_iter_create(int link_fd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, iter_create);
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.iter_create.link_fd = link_fd;
|
||||
|
||||
fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, attr_sz);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_prog_query_opts(int target_fd,
|
||||
enum bpf_attach_type type,
|
||||
struct bpf_prog_query_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, query);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_prog_query_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
|
||||
attr.query.target_fd = target_fd;
|
||||
attr.query.attach_type = type;
|
||||
attr.query.query_flags = OPTS_GET(opts, query_flags, 0);
|
||||
attr.query.prog_cnt = OPTS_GET(opts, prog_cnt, 0);
|
||||
attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
|
||||
attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
|
||||
|
||||
ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz);
|
||||
|
||||
OPTS_SET(opts, attach_flags, attr.query.attach_flags);
|
||||
OPTS_SET(opts, prog_cnt, attr.query.prog_cnt);
|
||||
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
|
||||
__u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
LIBBPF_OPTS(bpf_prog_query_opts, opts);
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.query.target_fd = target_fd;
|
||||
attr.query.attach_type = type;
|
||||
attr.query.query_flags = query_flags;
|
||||
attr.query.prog_cnt = *prog_cnt;
|
||||
attr.query.prog_ids = ptr_to_u64(prog_ids);
|
||||
opts.query_flags = query_flags;
|
||||
opts.prog_ids = prog_ids;
|
||||
opts.prog_cnt = *prog_cnt;
|
||||
|
||||
ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
|
||||
ret = bpf_prog_query_opts(target_fd, type, &opts);
|
||||
|
||||
if (attach_flags)
|
||||
*attach_flags = attr.query.attach_flags;
|
||||
*prog_cnt = attr.query.prog_cnt;
|
||||
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
|
||||
void *data_out, __u32 *size_out, __u32 *retval,
|
||||
__u32 *duration)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.test.prog_fd = prog_fd;
|
||||
attr.test.data_in = ptr_to_u64(data);
|
||||
attr.test.data_out = ptr_to_u64(data_out);
|
||||
attr.test.data_size_in = size;
|
||||
attr.test.repeat = repeat;
|
||||
|
||||
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
|
||||
|
||||
if (size_out)
|
||||
*size_out = attr.test.data_size_out;
|
||||
if (retval)
|
||||
*retval = attr.test.retval;
|
||||
if (duration)
|
||||
*duration = attr.test.duration;
|
||||
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
if (!test_attr->data_out && test_attr->data_size_out > 0)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.test.prog_fd = test_attr->prog_fd;
|
||||
attr.test.data_in = ptr_to_u64(test_attr->data_in);
|
||||
attr.test.data_out = ptr_to_u64(test_attr->data_out);
|
||||
attr.test.data_size_in = test_attr->data_size_in;
|
||||
attr.test.data_size_out = test_attr->data_size_out;
|
||||
attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in);
|
||||
attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out);
|
||||
attr.test.ctx_size_in = test_attr->ctx_size_in;
|
||||
attr.test.ctx_size_out = test_attr->ctx_size_out;
|
||||
attr.test.repeat = test_attr->repeat;
|
||||
|
||||
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
|
||||
|
||||
test_attr->data_size_out = attr.test.data_size_out;
|
||||
test_attr->ctx_size_out = attr.test.ctx_size_out;
|
||||
test_attr->retval = attr.test.retval;
|
||||
test_attr->duration = attr.test.duration;
|
||||
*attach_flags = opts.attach_flags;
|
||||
*prog_cnt = opts.prog_cnt;
|
||||
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, test);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_test_run_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.test.prog_fd = prog_fd;
|
||||
attr.test.batch_size = OPTS_GET(opts, batch_size, 0);
|
||||
attr.test.cpu = OPTS_GET(opts, cpu, 0);
|
||||
@@ -1070,7 +889,7 @@ int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
|
||||
attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
|
||||
attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
|
||||
|
||||
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, attr_sz);
|
||||
|
||||
OPTS_SET(opts, data_size_out, attr.test.data_size_out);
|
||||
OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
|
||||
@@ -1082,13 +901,14 @@ int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
|
||||
|
||||
static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
|
||||
union bpf_attr attr;
|
||||
int err;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.start_id = start_id;
|
||||
|
||||
err = sys_bpf(cmd, &attr, sizeof(attr));
|
||||
err = sys_bpf(cmd, &attr, attr_sz);
|
||||
if (!err)
|
||||
*next_id = attr.next_id;
|
||||
|
||||
@@ -1117,80 +937,84 @@ int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
|
||||
|
||||
int bpf_prog_get_fd_by_id(__u32 id)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.prog_id = id;
|
||||
|
||||
fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_map_get_fd_by_id(__u32 id)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_id = id;
|
||||
|
||||
fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_btf_get_fd_by_id(__u32 id)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.btf_id = id;
|
||||
|
||||
fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_link_get_fd_by_id(__u32 id)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.link_id = id;
|
||||
|
||||
fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, info);
|
||||
union bpf_attr attr;
|
||||
int err;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.info.bpf_fd = bpf_fd;
|
||||
attr.info.info_len = *info_len;
|
||||
attr.info.info = ptr_to_u64(info);
|
||||
|
||||
err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
|
||||
|
||||
err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz);
|
||||
if (!err)
|
||||
*info_len = attr.info.info_len;
|
||||
|
||||
return libbpf_err_errno(err);
|
||||
}
|
||||
|
||||
int bpf_raw_tracepoint_open(const char *name, int prog_fd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.raw_tracepoint.name = ptr_to_u64(name);
|
||||
attr.raw_tracepoint.prog_fd = prog_fd;
|
||||
|
||||
fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@@ -1242,41 +1066,22 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_loa
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_btf_load_opts, opts);
|
||||
int fd;
|
||||
|
||||
retry:
|
||||
if (do_log && log_buf && log_buf_size) {
|
||||
opts.log_buf = log_buf;
|
||||
opts.log_size = log_buf_size;
|
||||
opts.log_level = 1;
|
||||
}
|
||||
|
||||
fd = bpf_btf_load(btf, btf_size, &opts);
|
||||
if (fd < 0 && !do_log && log_buf && log_buf_size) {
|
||||
do_log = true;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
|
||||
__u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
|
||||
__u64 *probe_addr)
|
||||
{
|
||||
union bpf_attr attr = {};
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query);
|
||||
union bpf_attr attr;
|
||||
int err;
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.task_fd_query.pid = pid;
|
||||
attr.task_fd_query.fd = fd;
|
||||
attr.task_fd_query.flags = flags;
|
||||
attr.task_fd_query.buf = ptr_to_u64(buf);
|
||||
attr.task_fd_query.buf_len = *buf_len;
|
||||
|
||||
err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
|
||||
err = sys_bpf(BPF_TASK_FD_QUERY, &attr, attr_sz);
|
||||
|
||||
*buf_len = attr.task_fd_query.buf_len;
|
||||
*prog_id = attr.task_fd_query.prog_id;
|
||||
@@ -1289,30 +1094,32 @@ int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
|
||||
|
||||
int bpf_enable_stats(enum bpf_stats_type type)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, enable_stats);
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.enable_stats.type = type;
|
||||
|
||||
fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, attr_sz);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_prog_bind_map(int prog_fd, int map_fd,
|
||||
const struct bpf_prog_bind_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map);
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_prog_bind_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.prog_bind_map.prog_fd = prog_fd;
|
||||
attr.prog_bind_map.map_fd = map_fd;
|
||||
attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
|
||||
|
||||
ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
|
||||
ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
151
src/bpf.h
@@ -61,48 +61,6 @@ LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
|
||||
__u32 max_entries,
|
||||
const struct bpf_map_create_opts *opts);
|
||||
|
||||
struct bpf_create_map_attr {
|
||||
const char *name;
|
||||
enum bpf_map_type map_type;
|
||||
__u32 map_flags;
|
||||
__u32 key_size;
|
||||
__u32 value_size;
|
||||
__u32 max_entries;
|
||||
__u32 numa_node;
|
||||
__u32 btf_fd;
|
||||
__u32 btf_key_type_id;
|
||||
__u32 btf_value_type_id;
|
||||
__u32 map_ifindex;
|
||||
union {
|
||||
__u32 inner_map_fd;
|
||||
__u32 btf_vmlinux_value_type_id;
|
||||
};
|
||||
};
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int value_size,
|
||||
int max_entries, __u32 map_flags, int node);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int value_size,
|
||||
int max_entries, __u32 map_flags);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size,
|
||||
int value_size, int max_entries, __u32 map_flags);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map_in_map_node(enum bpf_map_type map_type,
|
||||
const char *name, int key_size,
|
||||
int inner_map_fd, int max_entries,
|
||||
__u32 map_flags, int node);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
|
||||
LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type,
|
||||
const char *name, int key_size,
|
||||
int inner_map_fd, int max_entries,
|
||||
__u32 map_flags);
|
||||
|
||||
struct bpf_prog_load_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
|
||||
@@ -145,54 +103,6 @@ LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts);
|
||||
/* this "specialization" should go away in libbpf 1.0 */
|
||||
LIBBPF_API int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts);
|
||||
|
||||
/* This is an elaborate way to not conflict with deprecated bpf_prog_load()
|
||||
* API, defined in libbpf.h. Once we hit libbpf 1.0, all this will be gone.
|
||||
* With this approach, if someone is calling bpf_prog_load() with
|
||||
* 4 arguments, they will use the deprecated API, which keeps backwards
|
||||
* compatibility (both source code and binary). If bpf_prog_load() is called
|
||||
* with 6 arguments, though, it gets redirected to __bpf_prog_load.
|
||||
* So looking forward to libbpf 1.0 when this hack will be gone and
|
||||
* __bpf_prog_load() will be called just bpf_prog_load().
|
||||
*/
|
||||
#ifndef bpf_prog_load
|
||||
#define bpf_prog_load(...) ___libbpf_overload(___bpf_prog_load, __VA_ARGS__)
|
||||
#define ___bpf_prog_load4(file, type, pobj, prog_fd) \
|
||||
bpf_prog_load_deprecated(file, type, pobj, prog_fd)
|
||||
#define ___bpf_prog_load6(prog_type, prog_name, license, insns, insn_cnt, opts) \
|
||||
bpf_prog_load(prog_type, prog_name, license, insns, insn_cnt, opts)
|
||||
#endif /* bpf_prog_load */
|
||||
|
||||
struct bpf_load_program_attr {
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
const char *name;
|
||||
const struct bpf_insn *insns;
|
||||
size_t insns_cnt;
|
||||
const char *license;
|
||||
union {
|
||||
__u32 kern_version;
|
||||
__u32 attach_prog_fd;
|
||||
};
|
||||
union {
|
||||
__u32 prog_ifindex;
|
||||
__u32 attach_btf_id;
|
||||
};
|
||||
__u32 prog_btf_fd;
|
||||
__u32 func_info_rec_size;
|
||||
const void *func_info;
|
||||
__u32 func_info_cnt;
|
||||
__u32 line_info_rec_size;
|
||||
const void *line_info;
|
||||
__u32 line_info_cnt;
|
||||
__u32 log_level;
|
||||
__u32 prog_flags;
|
||||
};
|
||||
|
||||
/* Flags to direct loading requirements */
|
||||
#define MAPS_RELAX_COMPAT 0x01
|
||||
@@ -200,22 +110,6 @@ struct bpf_load_program_attr {
|
||||
/* Recommended log buffer size */
|
||||
#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
|
||||
LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
|
||||
char *log_buf, size_t log_buf_sz);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
|
||||
LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
|
||||
const struct bpf_insn *insns, size_t insns_cnt,
|
||||
const char *license, __u32 kern_version,
|
||||
char *log_buf, size_t log_buf_sz);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
|
||||
LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
|
||||
const struct bpf_insn *insns,
|
||||
size_t insns_cnt, __u32 prog_flags,
|
||||
const char *license, __u32 kern_version,
|
||||
char *log_buf, size_t log_buf_sz,
|
||||
int log_level);
|
||||
|
||||
struct bpf_btf_load_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
|
||||
@@ -229,10 +123,6 @@ struct bpf_btf_load_opts {
|
||||
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
|
||||
const struct bpf_btf_load_opts *opts);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_btf_load() instead")
|
||||
LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf,
|
||||
__u32 log_buf_size, bool do_log);
|
||||
|
||||
LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
|
||||
__u64 flags);
|
||||
|
||||
@@ -380,8 +270,19 @@ LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values
|
||||
__u32 *count,
|
||||
const struct bpf_map_batch_opts *opts);
|
||||
|
||||
struct bpf_obj_get_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
|
||||
__u32 file_flags;
|
||||
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_obj_get_opts__last_field file_flags
|
||||
|
||||
LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
|
||||
LIBBPF_API int bpf_obj_get(const char *pathname);
|
||||
LIBBPF_API int bpf_obj_get_opts(const char *pathname,
|
||||
const struct bpf_obj_get_opts *opts);
|
||||
|
||||
struct bpf_prog_attach_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
@@ -395,10 +296,6 @@ LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
|
||||
LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int attachable_fd,
|
||||
enum bpf_attach_type type,
|
||||
const struct bpf_prog_attach_opts *opts);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_prog_attach_opts() instead")
|
||||
LIBBPF_API int bpf_prog_attach_xattr(int prog_fd, int attachable_fd,
|
||||
enum bpf_attach_type type,
|
||||
const struct bpf_prog_attach_opts *opts);
|
||||
LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
|
||||
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
|
||||
enum bpf_attach_type type);
|
||||
@@ -464,17 +361,6 @@ struct bpf_prog_test_run_attr {
|
||||
* out: length of cxt_out */
|
||||
};
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead")
|
||||
LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr);
|
||||
|
||||
/*
|
||||
* bpf_prog_test_run does not check that data_out is large enough. Consider
|
||||
* using bpf_prog_test_run_opts instead.
|
||||
*/
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead")
|
||||
LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data,
|
||||
__u32 size, void *data_out, __u32 *size_out,
|
||||
__u32 *retval, __u32 *duration);
|
||||
LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
|
||||
LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
|
||||
LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id);
|
||||
@@ -484,9 +370,24 @@ LIBBPF_API int bpf_map_get_fd_by_id(__u32 id);
|
||||
LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
|
||||
LIBBPF_API int bpf_link_get_fd_by_id(__u32 id);
|
||||
LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
|
||||
|
||||
struct bpf_prog_query_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
__u32 query_flags;
|
||||
__u32 attach_flags; /* output argument */
|
||||
__u32 *prog_ids;
|
||||
__u32 prog_cnt; /* input+output argument */
|
||||
__u32 *prog_attach_flags;
|
||||
};
|
||||
#define bpf_prog_query_opts__last_field prog_attach_flags
|
||||
|
||||
LIBBPF_API int bpf_prog_query_opts(int target_fd,
|
||||
enum bpf_attach_type type,
|
||||
struct bpf_prog_query_opts *opts);
|
||||
LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
|
||||
__u32 query_flags, __u32 *attach_flags,
|
||||
__u32 *prog_ids, __u32 *prog_cnt);
|
||||
|
||||
LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
|
||||
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
|
||||
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
|
||||
|
||||
@@ -29,6 +29,7 @@ enum bpf_type_id_kind {
|
||||
enum bpf_type_info_kind {
|
||||
BPF_TYPE_EXISTS = 0, /* type existence in target kernel */
|
||||
BPF_TYPE_SIZE = 1, /* type size in target kernel */
|
||||
BPF_TYPE_MATCHES = 2, /* type match in target kernel */
|
||||
};
|
||||
|
||||
/* second argument to __builtin_preserve_enum_value() built-in */
|
||||
@@ -183,6 +184,16 @@ enum bpf_enum_value_kind {
|
||||
#define bpf_core_type_exists(type) \
|
||||
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
|
||||
|
||||
/*
|
||||
* Convenience macro to check that provided named type
|
||||
* (struct/union/enum/typedef) "matches" that in a target kernel.
|
||||
* Returns:
|
||||
* 1, if the type matches in the target kernel's BTF;
|
||||
* 0, if the type does not match any in the target kernel
|
||||
*/
|
||||
#define bpf_core_type_matches(type) \
|
||||
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES)
|
||||
|
||||
/*
|
||||
* Convenience macro to get the byte size of a provided named type
|
||||
* (struct/union/enum/typedef) in a target kernel.
|
||||
|
||||
@@ -38,6 +38,10 @@ struct inode;
|
||||
struct socket;
|
||||
struct file;
|
||||
struct bpf_timer;
|
||||
struct mptcp_sock;
|
||||
struct bpf_dynptr;
|
||||
struct iphdr;
|
||||
struct ipv6hdr;
|
||||
|
||||
/*
|
||||
* bpf_map_lookup_elem
|
||||
@@ -1001,7 +1005,8 @@ static long (*bpf_skb_change_tail)(struct __sk_buff *skb, __u32 len, __u64 flags
|
||||
* Pull in non-linear data in case the *skb* is non-linear and not
|
||||
* all of *len* are part of the linear section. Make *len* bytes
|
||||
* from *skb* readable and writable. If a zero value is passed for
|
||||
* *len*, then the whole length of the *skb* is pulled.
|
||||
* *len*, then all bytes in the linear part of *skb* will be made
|
||||
* readable and writable.
|
||||
*
|
||||
* This helper is only needed for reading and writing with direct
|
||||
* packet access.
|
||||
@@ -1234,10 +1239,12 @@ static long (*bpf_setsockopt)(void *bpf_socket, int level, int optname, void *op
|
||||
* There are two supported modes at this time:
|
||||
*
|
||||
* * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
|
||||
* (room space is added or removed below the layer 2 header).
|
||||
* (room space is added or removed between the layer 2 and
|
||||
* layer 3 headers).
|
||||
*
|
||||
* * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
|
||||
* (room space is added or removed below the layer 3 header).
|
||||
* (room space is added or removed between the layer 3 and
|
||||
* layer 4 headers).
|
||||
*
|
||||
* The following flags are supported at this time:
|
||||
*
|
||||
@@ -1737,8 +1744,18 @@ static long (*bpf_skb_get_xfrm_state)(struct __sk_buff *skb, __u32 index, struct
|
||||
* **BPF_F_USER_STACK**
|
||||
* Collect a user space stack instead of a kernel stack.
|
||||
* **BPF_F_USER_BUILD_ID**
|
||||
* Collect buildid+offset instead of ips for user stack,
|
||||
* only valid if **BPF_F_USER_STACK** is also specified.
|
||||
* Collect (build_id, file_offset) instead of ips for user
|
||||
* stack, only valid if **BPF_F_USER_STACK** is also
|
||||
* specified.
|
||||
*
|
||||
* *file_offset* is an offset relative to the beginning
|
||||
* of the executable or shared object file backing the vma
|
||||
* which the *ip* falls in. It is *not* an offset relative
|
||||
* to that object's base address. Accordingly, it must be
|
||||
* adjusted by adding (sh_addr - sh_offset), where
|
||||
* sh_{addr,offset} correspond to the executable section
|
||||
* containing *file_offset* in the object, for comparisons
|
||||
* to symbols' st_value to be valid.
|
||||
*
|
||||
* **bpf_get_stack**\ () can collect up to
|
||||
* **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
|
||||
@@ -2461,10 +2478,11 @@ static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx, struct bpf_sock_tuple *
|
||||
*
|
||||
* *iph* points to the start of the IPv4 or IPv6 header, while
|
||||
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
|
||||
* **sizeof**\ (**struct ip6hdr**).
|
||||
* **sizeof**\ (**struct ipv6hdr**).
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains **sizeof**\ (**struct tcphdr**).
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
*
|
||||
* Returns
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
|
||||
@@ -2687,10 +2705,11 @@ static long (*bpf_send_signal)(__u32 sig) = (void *) 109;
|
||||
*
|
||||
* *iph* points to the start of the IPv4 or IPv6 header, while
|
||||
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
|
||||
* **sizeof**\ (**struct ip6hdr**).
|
||||
* **sizeof**\ (**struct ipv6hdr**).
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header.
|
||||
* contains the length of the TCP header with options (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
*
|
||||
* Returns
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
@@ -3407,7 +3426,7 @@ static long (*bpf_load_hdr_opt)(struct bpf_sock_ops *skops, void *searchby_res,
|
||||
*
|
||||
* **-EEXIST** if the option already exists.
|
||||
*
|
||||
* **-EFAULT** on failrue to parse the existing header options.
|
||||
* **-EFAULT** on failure to parse the existing header options.
|
||||
*
|
||||
* **-EPERM** if the helper cannot be used under the current
|
||||
* *skops*\ **->op**.
|
||||
@@ -3667,7 +3686,7 @@ static long (*bpf_redirect_peer)(__u32 ifindex, __u64 flags) = (void *) 155;
|
||||
* a *map* with *task* as the **key**. From this
|
||||
* perspective, the usage is not much different from
|
||||
* **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
|
||||
* helper enforces the key must be an task_struct and the map must also
|
||||
* helper enforces the key must be a task_struct and the map must also
|
||||
* be a **BPF_MAP_TYPE_TASK_STORAGE**.
|
||||
*
|
||||
* Underneath, the value is stored locally at *task* instead of
|
||||
@@ -3745,7 +3764,7 @@ static __u64 (*bpf_ktime_get_coarse_ns)(void) = (void *) 160;
|
||||
/*
|
||||
* bpf_ima_inode_hash
|
||||
*
|
||||
* Returns the stored IMA hash of the *inode* (if it's avaialable).
|
||||
* Returns the stored IMA hash of the *inode* (if it's available).
|
||||
* If the hash is larger than *size*, then only *size*
|
||||
* bytes will be copied to *dst*
|
||||
*
|
||||
@@ -3777,12 +3796,12 @@ static struct socket *(*bpf_sock_from_file)(struct file *file) = (void *) 162;
|
||||
*
|
||||
* The argument *len_diff* can be used for querying with a planned
|
||||
* size change. This allows to check MTU prior to changing packet
|
||||
* ctx. Providing an *len_diff* adjustment that is larger than the
|
||||
* ctx. Providing a *len_diff* adjustment that is larger than the
|
||||
* actual packet size (resulting in negative packet size) will in
|
||||
* principle not exceed the MTU, why it is not considered a
|
||||
* failure. Other BPF-helpers are needed for performing the
|
||||
* planned size change, why the responsability for catch a negative
|
||||
* packet size belong in those helpers.
|
||||
* principle not exceed the MTU, which is why it is not considered
|
||||
* a failure. Other BPF helpers are needed for performing the
|
||||
* planned size change; therefore the responsibility for catching
|
||||
* a negative packet size belongs in those helpers.
|
||||
*
|
||||
* Specifying *ifindex* zero means the MTU check is performed
|
||||
* against the current net device. This is practical if this isn't
|
||||
@@ -4021,6 +4040,7 @@ static long (*bpf_timer_cancel)(struct bpf_timer *timer) = (void *) 172;
|
||||
*
|
||||
* Returns
|
||||
* Address of the traced function.
|
||||
* 0 for kprobes placed within the function (not at the entry).
|
||||
*/
|
||||
static __u64 (*bpf_get_func_ip)(void *ctx) = (void *) 173;
|
||||
|
||||
@@ -4189,13 +4209,13 @@ static long (*bpf_strncmp)(const char *s1, __u32 s1_sz, const char *s2) = (void
|
||||
/*
|
||||
* bpf_get_func_arg
|
||||
*
|
||||
* Get **n**-th argument (zero based) of the traced function (for tracing programs)
|
||||
* Get **n**-th argument register (zero based) of the traced function (for tracing programs)
|
||||
* returned in **value**.
|
||||
*
|
||||
*
|
||||
* Returns
|
||||
* 0 on success.
|
||||
* **-EINVAL** if n >= arguments count of traced function.
|
||||
* **-EINVAL** if n >= argument register count of traced function.
|
||||
*/
|
||||
static long (*bpf_get_func_arg)(void *ctx, __u32 n, __u64 *value) = (void *) 183;
|
||||
|
||||
@@ -4215,32 +4235,45 @@ static long (*bpf_get_func_ret)(void *ctx, __u64 *value) = (void *) 184;
|
||||
/*
|
||||
* bpf_get_func_arg_cnt
|
||||
*
|
||||
* Get number of arguments of the traced function (for tracing programs).
|
||||
* Get number of registers of the traced function (for tracing programs) where
|
||||
* function arguments are stored in these registers.
|
||||
*
|
||||
*
|
||||
* Returns
|
||||
* The number of arguments of the traced function.
|
||||
* The number of argument registers of the traced function.
|
||||
*/
|
||||
static long (*bpf_get_func_arg_cnt)(void *ctx) = (void *) 185;
|
||||
|
||||
/*
|
||||
* bpf_get_retval
|
||||
*
|
||||
* Get the syscall's return value that will be returned to userspace.
|
||||
* Get the BPF program's return value that will be returned to the upper layers.
|
||||
*
|
||||
* This helper is currently supported by cgroup programs only.
|
||||
* This helper is currently supported by cgroup programs and only by the hooks
|
||||
* where BPF program's return value is returned to the userspace via errno.
|
||||
*
|
||||
* Returns
|
||||
* The syscall's return value.
|
||||
* The BPF program's return value.
|
||||
*/
|
||||
static int (*bpf_get_retval)(void) = (void *) 186;
|
||||
|
||||
/*
|
||||
* bpf_set_retval
|
||||
*
|
||||
* Set the syscall's return value that will be returned to userspace.
|
||||
* Set the BPF program's return value that will be returned to the upper layers.
|
||||
*
|
||||
* This helper is currently supported by cgroup programs and only by the hooks
|
||||
* where BPF program's return value is returned to the userspace via errno.
|
||||
*
|
||||
* Note that there is the following corner case where the program exports an error
|
||||
* via bpf_set_retval but signals success via 'return 1':
|
||||
*
|
||||
* bpf_set_retval(-EPERM);
|
||||
* return 1;
|
||||
*
|
||||
* In this case, the BPF program's return value will use helper's -EPERM. This
|
||||
* still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case.
|
||||
*
|
||||
* This helper is currently supported by cgroup programs only.
|
||||
*
|
||||
* Returns
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
@@ -4370,4 +4403,263 @@ static void *(*bpf_kptr_xchg)(void *map_value, void *ptr) = (void *) 194;
|
||||
*/
|
||||
static void *(*bpf_map_lookup_percpu_elem)(void *map, const void *key, __u32 cpu) = (void *) 195;
|
||||
|
||||
/*
|
||||
* bpf_skc_to_mptcp_sock
|
||||
*
|
||||
* Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
|
||||
*
|
||||
* Returns
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*/
|
||||
static struct mptcp_sock *(*bpf_skc_to_mptcp_sock)(void *sk) = (void *) 196;
|
||||
|
||||
/*
|
||||
* bpf_dynptr_from_mem
|
||||
*
|
||||
* Get a dynptr to local memory *data*.
|
||||
*
|
||||
* *data* must be a ptr to a map value.
|
||||
* The maximum *size* supported is DYNPTR_MAX_SIZE.
|
||||
* *flags* is currently unused.
|
||||
*
|
||||
* Returns
|
||||
* 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
|
||||
* -EINVAL if flags is not 0.
|
||||
*/
|
||||
static long (*bpf_dynptr_from_mem)(void *data, __u32 size, __u64 flags, struct bpf_dynptr *ptr) = (void *) 197;
|
||||
|
||||
/*
|
||||
* bpf_ringbuf_reserve_dynptr
|
||||
*
|
||||
* Reserve *size* bytes of payload in a ring buffer *ringbuf*
|
||||
* through the dynptr interface. *flags* must be 0.
|
||||
*
|
||||
* Please note that a corresponding bpf_ringbuf_submit_dynptr or
|
||||
* bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
|
||||
* reservation fails. This is enforced by the verifier.
|
||||
*
|
||||
* Returns
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*/
|
||||
static long (*bpf_ringbuf_reserve_dynptr)(void *ringbuf, __u32 size, __u64 flags, struct bpf_dynptr *ptr) = (void *) 198;
|
||||
|
||||
/*
|
||||
* bpf_ringbuf_submit_dynptr
|
||||
*
|
||||
* Submit reserved ring buffer sample, pointed to by *data*,
|
||||
* through the dynptr interface. This is a no-op if the dynptr is
|
||||
* invalid/null.
|
||||
*
|
||||
* For more information on *flags*, please see
|
||||
* 'bpf_ringbuf_submit'.
|
||||
*
|
||||
* Returns
|
||||
* Nothing. Always succeeds.
|
||||
*/
|
||||
static void (*bpf_ringbuf_submit_dynptr)(struct bpf_dynptr *ptr, __u64 flags) = (void *) 199;
|
||||
|
||||
/*
|
||||
* bpf_ringbuf_discard_dynptr
|
||||
*
|
||||
* Discard reserved ring buffer sample through the dynptr
|
||||
* interface. This is a no-op if the dynptr is invalid/null.
|
||||
*
|
||||
* For more information on *flags*, please see
|
||||
* 'bpf_ringbuf_discard'.
|
||||
*
|
||||
* Returns
|
||||
* Nothing. Always succeeds.
|
||||
*/
|
||||
static void (*bpf_ringbuf_discard_dynptr)(struct bpf_dynptr *ptr, __u64 flags) = (void *) 200;
|
||||
|
||||
/*
|
||||
* bpf_dynptr_read
|
||||
*
|
||||
* Read *len* bytes from *src* into *dst*, starting from *offset*
|
||||
* into *src*.
|
||||
* *flags* is currently unused.
|
||||
*
|
||||
* Returns
|
||||
* 0 on success, -E2BIG if *offset* + *len* exceeds the length
|
||||
* of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
|
||||
* *flags* is not 0.
|
||||
*/
|
||||
static long (*bpf_dynptr_read)(void *dst, __u32 len, struct bpf_dynptr *src, __u32 offset, __u64 flags) = (void *) 201;
|
||||
|
||||
/*
|
||||
* bpf_dynptr_write
|
||||
*
|
||||
* Write *len* bytes from *src* into *dst*, starting from *offset*
|
||||
* into *dst*.
|
||||
* *flags* is currently unused.
|
||||
*
|
||||
* Returns
|
||||
* 0 on success, -E2BIG if *offset* + *len* exceeds the length
|
||||
* of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
|
||||
* is a read-only dynptr or if *flags* is not 0.
|
||||
*/
|
||||
static long (*bpf_dynptr_write)(struct bpf_dynptr *dst, __u32 offset, void *src, __u32 len, __u64 flags) = (void *) 202;
|
||||
|
||||
/*
|
||||
* bpf_dynptr_data
|
||||
*
|
||||
* Get a pointer to the underlying dynptr data.
|
||||
*
|
||||
* *len* must be a statically known value. The returned data slice
|
||||
* is invalidated whenever the dynptr is invalidated.
|
||||
*
|
||||
* Returns
|
||||
* Pointer to the underlying dynptr data, NULL if the dynptr is
|
||||
* read-only, if the dynptr is invalid, or if the offset and length
|
||||
* is out of bounds.
|
||||
*/
|
||||
static void *(*bpf_dynptr_data)(struct bpf_dynptr *ptr, __u32 offset, __u32 len) = (void *) 203;
|
||||
|
||||
/*
|
||||
* bpf_tcp_raw_gen_syncookie_ipv4
|
||||
*
|
||||
* Try to issue a SYN cookie for the packet with corresponding
|
||||
* IPv4/TCP headers, *iph* and *th*, without depending on a
|
||||
* listening socket.
|
||||
*
|
||||
* *iph* points to the IPv4 header.
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
*
|
||||
* Returns
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
* followed by 16 bits which hold the MSS value for that cookie,
|
||||
* and the top 16 bits are unused.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EINVAL** if *th_len* is invalid.
|
||||
*/
|
||||
static __s64 (*bpf_tcp_raw_gen_syncookie_ipv4)(struct iphdr *iph, struct tcphdr *th, __u32 th_len) = (void *) 204;
|
||||
|
||||
/*
|
||||
* bpf_tcp_raw_gen_syncookie_ipv6
|
||||
*
|
||||
* Try to issue a SYN cookie for the packet with corresponding
|
||||
* IPv6/TCP headers, *iph* and *th*, without depending on a
|
||||
* listening socket.
|
||||
*
|
||||
* *iph* points to the IPv6 header.
|
||||
*
|
||||
* *th* points to the start of the TCP header, while *th_len*
|
||||
* contains the length of the TCP header (at least
|
||||
* **sizeof**\ (**struct tcphdr**)).
|
||||
*
|
||||
* Returns
|
||||
* On success, lower 32 bits hold the generated SYN cookie in
|
||||
* followed by 16 bits which hold the MSS value for that cookie,
|
||||
* and the top 16 bits are unused.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EINVAL** if *th_len* is invalid.
|
||||
*
|
||||
* **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
|
||||
*/
|
||||
static __s64 (*bpf_tcp_raw_gen_syncookie_ipv6)(struct ipv6hdr *iph, struct tcphdr *th, __u32 th_len) = (void *) 205;
|
||||
|
||||
/*
|
||||
* bpf_tcp_raw_check_syncookie_ipv4
|
||||
*
|
||||
* Check whether *iph* and *th* contain a valid SYN cookie ACK
|
||||
* without depending on a listening socket.
|
||||
*
|
||||
* *iph* points to the IPv4 header.
|
||||
*
|
||||
* *th* points to the TCP header.
|
||||
*
|
||||
* Returns
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EACCES** if the SYN cookie is not valid.
|
||||
*/
|
||||
static long (*bpf_tcp_raw_check_syncookie_ipv4)(struct iphdr *iph, struct tcphdr *th) = (void *) 206;
|
||||
|
||||
/*
|
||||
* bpf_tcp_raw_check_syncookie_ipv6
|
||||
*
|
||||
* Check whether *iph* and *th* contain a valid SYN cookie ACK
|
||||
* without depending on a listening socket.
|
||||
*
|
||||
* *iph* points to the IPv6 header.
|
||||
*
|
||||
* *th* points to the TCP header.
|
||||
*
|
||||
* Returns
|
||||
* 0 if *iph* and *th* are a valid SYN cookie ACK.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EACCES** if the SYN cookie is not valid.
|
||||
*
|
||||
* **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
|
||||
*/
|
||||
static long (*bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *iph, struct tcphdr *th) = (void *) 207;
|
||||
|
||||
/*
|
||||
* bpf_ktime_get_tai_ns
|
||||
*
|
||||
* A nonsettable system-wide clock derived from wall-clock time but
|
||||
* ignoring leap seconds. This clock does not experience
|
||||
* discontinuities and backwards jumps caused by NTP inserting leap
|
||||
* seconds as CLOCK_REALTIME does.
|
||||
*
|
||||
* See: **clock_gettime**\ (**CLOCK_TAI**)
|
||||
*
|
||||
* Returns
|
||||
* Current *ktime*.
|
||||
*/
|
||||
static __u64 (*bpf_ktime_get_tai_ns)(void) = (void *) 208;
|
||||
|
||||
/*
|
||||
* bpf_user_ringbuf_drain
|
||||
*
|
||||
* Drain samples from the specified user ring buffer, and invoke
|
||||
* the provided callback for each such sample:
|
||||
*
|
||||
* long (\*callback_fn)(struct bpf_dynptr \*dynptr, void \*ctx);
|
||||
*
|
||||
* If **callback_fn** returns 0, the helper will continue to try
|
||||
* and drain the next sample, up to a maximum of
|
||||
* BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1,
|
||||
* the helper will skip the rest of the samples and return. Other
|
||||
* return values are not used now, and will be rejected by the
|
||||
* verifier.
|
||||
*
|
||||
* Returns
|
||||
* The number of drained samples if no error was encountered while
|
||||
* draining samples, or 0 if no samples were present in the ring
|
||||
* buffer. If a user-space producer was epoll-waiting on this map,
|
||||
* and at least one sample was drained, they will receive an event
|
||||
* notification notifying them of available space in the ring
|
||||
* buffer. If the BPF_RB_NO_WAKEUP flag is passed to this
|
||||
* function, no wakeup notification will be sent. If the
|
||||
* BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will
|
||||
* be sent even if no sample was drained.
|
||||
*
|
||||
* On failure, the returned value is one of the following:
|
||||
*
|
||||
* **-EBUSY** if the ring buffer is contended, and another calling
|
||||
* context was concurrently draining the ring buffer.
|
||||
*
|
||||
* **-EINVAL** if user-space is not properly tracking the ring
|
||||
* buffer due to the producer position not being aligned to 8
|
||||
* bytes, a sample not being aligned to 8 bytes, or the producer
|
||||
* position not matching the advertised length of a sample.
|
||||
*
|
||||
* **-E2BIG** if user-space has tried to publish a sample which is
|
||||
* larger than the size of the ring buffer, or which cannot fit
|
||||
* within a struct bpf_dynptr.
|
||||
*/
|
||||
static long (*bpf_user_ringbuf_drain)(void *map, void *callback_fn, void *ctx, __u64 flags) = (void *) 209;
|
||||
|
||||
|
||||
|
||||
@@ -22,12 +22,25 @@
|
||||
* To allow use of SEC() with externs (e.g., for extern .maps declarations),
|
||||
* make sure __attribute__((unused)) doesn't trigger compilation warning.
|
||||
*/
|
||||
#if __GNUC__ && !__clang__
|
||||
|
||||
/*
|
||||
* Pragma macros are broken on GCC
|
||||
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578
|
||||
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90400
|
||||
*/
|
||||
#define SEC(name) __attribute__((section(name), used))
|
||||
|
||||
#else
|
||||
|
||||
#define SEC(name) \
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
|
||||
__attribute__((section(name), used)) \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
|
||||
#endif
|
||||
|
||||
/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
|
||||
#undef __always_inline
|
||||
#define __always_inline inline __attribute__((always_inline))
|
||||
@@ -147,18 +160,6 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Helper structure used by eBPF C program
|
||||
* to describe BPF map attributes to libbpf loader
|
||||
*/
|
||||
struct bpf_map_def {
|
||||
unsigned int type;
|
||||
unsigned int key_size;
|
||||
unsigned int value_size;
|
||||
unsigned int max_entries;
|
||||
unsigned int map_flags;
|
||||
} __attribute__((deprecated("use BTF-defined maps in .maps section")));
|
||||
|
||||
enum libbpf_pin_type {
|
||||
LIBBPF_PIN_NONE,
|
||||
/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#ifndef __BPF_TRACING_H__
|
||||
#define __BPF_TRACING_H__
|
||||
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
|
||||
#if defined(__TARGET_ARCH_x86)
|
||||
#define bpf_target_x86
|
||||
@@ -140,7 +142,7 @@ struct pt_regs___s390 {
|
||||
#define __PT_RC_REG gprs[2]
|
||||
#define __PT_SP_REG gprs[15]
|
||||
#define __PT_IP_REG psw.addr
|
||||
#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; })
|
||||
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2)
|
||||
|
||||
#elif defined(bpf_target_arm)
|
||||
@@ -174,7 +176,7 @@ struct pt_regs___arm64 {
|
||||
#define __PT_RC_REG regs[0]
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG pc
|
||||
#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; })
|
||||
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0)
|
||||
|
||||
#elif defined(bpf_target_mips)
|
||||
@@ -233,7 +235,7 @@ struct pt_regs___arm64 {
|
||||
#define __PT_PARM5_REG a4
|
||||
#define __PT_RET_REG ra
|
||||
#define __PT_FP_REG s0
|
||||
#define __PT_RC_REG a5
|
||||
#define __PT_RC_REG a0
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG pc
|
||||
/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
|
||||
@@ -424,7 +426,7 @@ struct pt_regs;
|
||||
*/
|
||||
#define BPF_PROG(name, args...) \
|
||||
name(unsigned long long *ctx); \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(unsigned long long *ctx, ##args); \
|
||||
typeof(name(0)) name(unsigned long long *ctx) \
|
||||
{ \
|
||||
@@ -433,9 +435,116 @@ typeof(name(0)) name(unsigned long long *ctx) \
|
||||
return ____##name(___bpf_ctx_cast(args)); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
} \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(unsigned long long *ctx, ##args)
|
||||
|
||||
#ifndef ___bpf_nth2
|
||||
#define ___bpf_nth2(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, \
|
||||
_14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N
|
||||
#endif
|
||||
#ifndef ___bpf_narg2
|
||||
#define ___bpf_narg2(...) \
|
||||
___bpf_nth2(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, \
|
||||
6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0)
|
||||
#endif
|
||||
|
||||
#define ___bpf_treg_cnt(t) \
|
||||
__builtin_choose_expr(sizeof(t) == 1, 1, \
|
||||
__builtin_choose_expr(sizeof(t) == 2, 1, \
|
||||
__builtin_choose_expr(sizeof(t) == 4, 1, \
|
||||
__builtin_choose_expr(sizeof(t) == 8, 1, \
|
||||
__builtin_choose_expr(sizeof(t) == 16, 2, \
|
||||
(void)0)))))
|
||||
|
||||
#define ___bpf_reg_cnt0() (0)
|
||||
#define ___bpf_reg_cnt1(t, x) (___bpf_reg_cnt0() + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt2(t, x, args...) (___bpf_reg_cnt1(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt3(t, x, args...) (___bpf_reg_cnt2(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt4(t, x, args...) (___bpf_reg_cnt3(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt5(t, x, args...) (___bpf_reg_cnt4(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt6(t, x, args...) (___bpf_reg_cnt5(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt7(t, x, args...) (___bpf_reg_cnt6(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt8(t, x, args...) (___bpf_reg_cnt7(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt9(t, x, args...) (___bpf_reg_cnt8(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt10(t, x, args...) (___bpf_reg_cnt9(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt11(t, x, args...) (___bpf_reg_cnt10(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt12(t, x, args...) (___bpf_reg_cnt11(args) + ___bpf_treg_cnt(t))
|
||||
#define ___bpf_reg_cnt(args...) ___bpf_apply(___bpf_reg_cnt, ___bpf_narg2(args))(args)
|
||||
|
||||
#define ___bpf_union_arg(t, x, n) \
|
||||
__builtin_choose_expr(sizeof(t) == 1, ({ union { __u8 z[1]; t x; } ___t = { .z = {ctx[n]}}; ___t.x; }), \
|
||||
__builtin_choose_expr(sizeof(t) == 2, ({ union { __u16 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
|
||||
__builtin_choose_expr(sizeof(t) == 4, ({ union { __u32 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
|
||||
__builtin_choose_expr(sizeof(t) == 8, ({ union { __u64 z[1]; t x; } ___t = {.z = {ctx[n]} }; ___t.x; }), \
|
||||
__builtin_choose_expr(sizeof(t) == 16, ({ union { __u64 z[2]; t x; } ___t = {.z = {ctx[n], ctx[n + 1]} }; ___t.x; }), \
|
||||
(void)0)))))
|
||||
|
||||
#define ___bpf_ctx_arg0(n, args...)
|
||||
#define ___bpf_ctx_arg1(n, t, x) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt1(t, x))
|
||||
#define ___bpf_ctx_arg2(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt2(t, x, args)) ___bpf_ctx_arg1(n, args)
|
||||
#define ___bpf_ctx_arg3(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt3(t, x, args)) ___bpf_ctx_arg2(n, args)
|
||||
#define ___bpf_ctx_arg4(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt4(t, x, args)) ___bpf_ctx_arg3(n, args)
|
||||
#define ___bpf_ctx_arg5(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt5(t, x, args)) ___bpf_ctx_arg4(n, args)
|
||||
#define ___bpf_ctx_arg6(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt6(t, x, args)) ___bpf_ctx_arg5(n, args)
|
||||
#define ___bpf_ctx_arg7(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt7(t, x, args)) ___bpf_ctx_arg6(n, args)
|
||||
#define ___bpf_ctx_arg8(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt8(t, x, args)) ___bpf_ctx_arg7(n, args)
|
||||
#define ___bpf_ctx_arg9(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt9(t, x, args)) ___bpf_ctx_arg8(n, args)
|
||||
#define ___bpf_ctx_arg10(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt10(t, x, args)) ___bpf_ctx_arg9(n, args)
|
||||
#define ___bpf_ctx_arg11(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt11(t, x, args)) ___bpf_ctx_arg10(n, args)
|
||||
#define ___bpf_ctx_arg12(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt12(t, x, args)) ___bpf_ctx_arg11(n, args)
|
||||
#define ___bpf_ctx_arg(args...) ___bpf_apply(___bpf_ctx_arg, ___bpf_narg2(args))(___bpf_reg_cnt(args), args)
|
||||
|
||||
#define ___bpf_ctx_decl0()
|
||||
#define ___bpf_ctx_decl1(t, x) , t x
|
||||
#define ___bpf_ctx_decl2(t, x, args...) , t x ___bpf_ctx_decl1(args)
|
||||
#define ___bpf_ctx_decl3(t, x, args...) , t x ___bpf_ctx_decl2(args)
|
||||
#define ___bpf_ctx_decl4(t, x, args...) , t x ___bpf_ctx_decl3(args)
|
||||
#define ___bpf_ctx_decl5(t, x, args...) , t x ___bpf_ctx_decl4(args)
|
||||
#define ___bpf_ctx_decl6(t, x, args...) , t x ___bpf_ctx_decl5(args)
|
||||
#define ___bpf_ctx_decl7(t, x, args...) , t x ___bpf_ctx_decl6(args)
|
||||
#define ___bpf_ctx_decl8(t, x, args...) , t x ___bpf_ctx_decl7(args)
|
||||
#define ___bpf_ctx_decl9(t, x, args...) , t x ___bpf_ctx_decl8(args)
|
||||
#define ___bpf_ctx_decl10(t, x, args...) , t x ___bpf_ctx_decl9(args)
|
||||
#define ___bpf_ctx_decl11(t, x, args...) , t x ___bpf_ctx_decl10(args)
|
||||
#define ___bpf_ctx_decl12(t, x, args...) , t x ___bpf_ctx_decl11(args)
|
||||
#define ___bpf_ctx_decl(args...) ___bpf_apply(___bpf_ctx_decl, ___bpf_narg2(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_PROG2 is an enhanced version of BPF_PROG in order to handle struct
|
||||
* arguments. Since each struct argument might take one or two u64 values
|
||||
* in the trampoline stack, argument type size is needed to place proper number
|
||||
* of u64 values for each argument. Therefore, BPF_PROG2 has different
|
||||
* syntax from BPF_PROG. For example, for the following BPF_PROG syntax:
|
||||
*
|
||||
* int BPF_PROG(test2, int a, int b) { ... }
|
||||
*
|
||||
* the corresponding BPF_PROG2 syntax is:
|
||||
*
|
||||
* int BPF_PROG2(test2, int, a, int, b) { ... }
|
||||
*
|
||||
* where type and the corresponding argument name are separated by comma.
|
||||
*
|
||||
* Use BPF_PROG2 macro if one of the arguments might be a struct/union larger
|
||||
* than 8 bytes:
|
||||
*
|
||||
* int BPF_PROG2(test_struct_arg, struct bpf_testmod_struct_arg_1, a, int, b,
|
||||
* int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret)
|
||||
* {
|
||||
* // access a, b, c, d, e, and ret directly
|
||||
* ...
|
||||
* }
|
||||
*/
|
||||
#define BPF_PROG2(name, args...) \
|
||||
name(unsigned long long *ctx); \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(unsigned long long *ctx ___bpf_ctx_decl(args)); \
|
||||
typeof(name(0)) name(unsigned long long *ctx) \
|
||||
{ \
|
||||
return ____##name(ctx ___bpf_ctx_arg(args)); \
|
||||
} \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(unsigned long long *ctx ___bpf_ctx_decl(args))
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
#define ___bpf_kprobe_args0() ctx
|
||||
@@ -458,7 +567,7 @@ struct pt_regs;
|
||||
*/
|
||||
#define BPF_KPROBE(name, args...) \
|
||||
name(struct pt_regs *ctx); \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args); \
|
||||
typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
{ \
|
||||
@@ -467,7 +576,7 @@ typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
return ____##name(___bpf_kprobe_args(args)); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
} \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args)
|
||||
|
||||
#define ___bpf_kretprobe_args0() ctx
|
||||
@@ -482,7 +591,7 @@ ____##name(struct pt_regs *ctx, ##args)
|
||||
*/
|
||||
#define BPF_KRETPROBE(name, args...) \
|
||||
name(struct pt_regs *ctx); \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args); \
|
||||
typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
{ \
|
||||
@@ -493,39 +602,69 @@ typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
} \
|
||||
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
|
||||
|
||||
/* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
|
||||
#define ___bpf_syscall_args0() ctx
|
||||
#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs)
|
||||
#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
|
||||
|
||||
/* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
|
||||
#define ___bpf_syswrap_args0() ctx
|
||||
#define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
|
||||
#define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_KPROBE_SYSCALL is a variant of BPF_KPROBE, which is intended for
|
||||
* BPF_KSYSCALL is a variant of BPF_KPROBE, which is intended for
|
||||
* tracing syscall functions, like __x64_sys_close. It hides the underlying
|
||||
* platform-specific low-level way of getting syscall input arguments from
|
||||
* struct pt_regs, and provides a familiar typed and named function arguments
|
||||
* syntax and semantics of accessing syscall input parameters.
|
||||
*
|
||||
* Original struct pt_regs* context is preserved as 'ctx' argument. This might
|
||||
* Original struct pt_regs * context is preserved as 'ctx' argument. This might
|
||||
* be necessary when using BPF helpers like bpf_perf_event_output().
|
||||
*
|
||||
* This macro relies on BPF CO-RE support.
|
||||
* At the moment BPF_KSYSCALL does not transparently handle all the calling
|
||||
* convention quirks for the following syscalls:
|
||||
*
|
||||
* - mmap(): __ARCH_WANT_SYS_OLD_MMAP.
|
||||
* - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and
|
||||
* CONFIG_CLONE_BACKWARDS3.
|
||||
* - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL.
|
||||
* - compat syscalls.
|
||||
*
|
||||
* This may or may not change in the future. User needs to take extra measures
|
||||
* to handle such quirks explicitly, if necessary.
|
||||
*
|
||||
* This macro relies on BPF CO-RE support and virtual __kconfig externs.
|
||||
*/
|
||||
#define BPF_KPROBE_SYSCALL(name, args...) \
|
||||
#define BPF_KSYSCALL(name, args...) \
|
||||
name(struct pt_regs *ctx); \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
extern _Bool LINUX_HAS_SYSCALL_WRAPPER __kconfig; \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args); \
|
||||
typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
{ \
|
||||
struct pt_regs *regs = PT_REGS_SYSCALL_REGS(ctx); \
|
||||
struct pt_regs *regs = LINUX_HAS_SYSCALL_WRAPPER \
|
||||
? (struct pt_regs *)PT_REGS_PARM1(ctx) \
|
||||
: ctx; \
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
||||
return ____##name(___bpf_syscall_args(args)); \
|
||||
if (LINUX_HAS_SYSCALL_WRAPPER) \
|
||||
return ____##name(___bpf_syswrap_args(args)); \
|
||||
else \
|
||||
return ____##name(___bpf_syscall_args(args)); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
} \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args)
|
||||
|
||||
#define BPF_KPROBE_SYSCALL BPF_KSYSCALL
|
||||
|
||||
#endif
|
||||
|
||||
446
src/btf.c
@@ -130,7 +130,7 @@ static inline __u64 ptr_to_u64(const void *ptr)
|
||||
|
||||
/* Ensure given dynamically allocated memory region pointed to by *data* with
|
||||
* capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
|
||||
* memory to accomodate *add_cnt* new elements, assuming *cur_cnt* elements
|
||||
* memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
|
||||
* are already used. At most *max_cnt* elements can be ever allocated.
|
||||
* If necessary, memory is reallocated and all existing data is copied over,
|
||||
* new pointer to the memory region is stored at *data, new memory region
|
||||
@@ -305,6 +305,8 @@ static int btf_type_size(const struct btf_type *t)
|
||||
return base_size + sizeof(__u32);
|
||||
case BTF_KIND_ENUM:
|
||||
return base_size + vlen * sizeof(struct btf_enum);
|
||||
case BTF_KIND_ENUM64:
|
||||
return base_size + vlen * sizeof(struct btf_enum64);
|
||||
case BTF_KIND_ARRAY:
|
||||
return base_size + sizeof(struct btf_array);
|
||||
case BTF_KIND_STRUCT:
|
||||
@@ -334,6 +336,7 @@ static void btf_bswap_type_base(struct btf_type *t)
|
||||
static int btf_bswap_type_rest(struct btf_type *t)
|
||||
{
|
||||
struct btf_var_secinfo *v;
|
||||
struct btf_enum64 *e64;
|
||||
struct btf_member *m;
|
||||
struct btf_array *a;
|
||||
struct btf_param *p;
|
||||
@@ -361,6 +364,13 @@ static int btf_bswap_type_rest(struct btf_type *t)
|
||||
e->val = bswap_32(e->val);
|
||||
}
|
||||
return 0;
|
||||
case BTF_KIND_ENUM64:
|
||||
for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
|
||||
e64->name_off = bswap_32(e64->name_off);
|
||||
e64->val_lo32 = bswap_32(e64->val_lo32);
|
||||
e64->val_hi32 = bswap_32(e64->val_hi32);
|
||||
}
|
||||
return 0;
|
||||
case BTF_KIND_ARRAY:
|
||||
a = btf_array(t);
|
||||
a->type = bswap_32(a->type);
|
||||
@@ -438,11 +448,6 @@ static int btf_parse_type_sec(struct btf *btf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u32 btf__get_nr_types(const struct btf *btf)
|
||||
{
|
||||
return btf->start_id + btf->nr_types - 1;
|
||||
}
|
||||
|
||||
__u32 btf__type_cnt(const struct btf *btf)
|
||||
{
|
||||
return btf->start_id + btf->nr_types;
|
||||
@@ -472,9 +477,22 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
|
||||
|
||||
static int determine_ptr_size(const struct btf *btf)
|
||||
{
|
||||
static const char * const long_aliases[] = {
|
||||
"long",
|
||||
"long int",
|
||||
"int long",
|
||||
"unsigned long",
|
||||
"long unsigned",
|
||||
"unsigned long int",
|
||||
"unsigned int long",
|
||||
"long unsigned int",
|
||||
"long int unsigned",
|
||||
"int unsigned long",
|
||||
"int long unsigned",
|
||||
};
|
||||
const struct btf_type *t;
|
||||
const char *name;
|
||||
int i, n;
|
||||
int i, j, n;
|
||||
|
||||
if (btf->base_btf && btf->base_btf->ptr_sz > 0)
|
||||
return btf->base_btf->ptr_sz;
|
||||
@@ -485,15 +503,16 @@ static int determine_ptr_size(const struct btf *btf)
|
||||
if (!btf_is_int(t))
|
||||
continue;
|
||||
|
||||
if (t->size != 4 && t->size != 8)
|
||||
continue;
|
||||
|
||||
name = btf__name_by_offset(btf, t->name_off);
|
||||
if (!name)
|
||||
continue;
|
||||
|
||||
if (strcmp(name, "long int") == 0 ||
|
||||
strcmp(name, "long unsigned int") == 0) {
|
||||
if (t->size != 4 && t->size != 8)
|
||||
continue;
|
||||
return t->size;
|
||||
for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
|
||||
if (strcmp(name, long_aliases[j]) == 0)
|
||||
return t->size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -597,6 +616,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_DATASEC:
|
||||
case BTF_KIND_FLOAT:
|
||||
size = t->size;
|
||||
@@ -644,6 +664,7 @@ int btf__align_of(const struct btf *btf, __u32 id)
|
||||
switch (kind) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_FLOAT:
|
||||
return min(btf_ptr_sz(btf), (size_t)t->size);
|
||||
case BTF_KIND_PTR:
|
||||
@@ -1204,8 +1225,6 @@ int btf__load_into_kernel(struct btf *btf)
|
||||
return btf_load_into_kernel(btf, NULL, 0, 0);
|
||||
}
|
||||
|
||||
int btf__load(struct btf *) __attribute__((alias("btf__load_into_kernel")));
|
||||
|
||||
int btf__fd(const struct btf *btf)
|
||||
{
|
||||
return btf->fd;
|
||||
@@ -1382,92 +1401,6 @@ struct btf *btf__load_from_kernel_by_id(__u32 id)
|
||||
return btf__load_from_kernel_by_id_split(id, NULL);
|
||||
}
|
||||
|
||||
int btf__get_from_id(__u32 id, struct btf **btf)
|
||||
{
|
||||
struct btf *res;
|
||||
int err;
|
||||
|
||||
*btf = NULL;
|
||||
res = btf__load_from_kernel_by_id(id);
|
||||
err = libbpf_get_error(res);
|
||||
|
||||
if (err)
|
||||
return libbpf_err(err);
|
||||
|
||||
*btf = res;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
|
||||
__u32 expected_key_size, __u32 expected_value_size,
|
||||
__u32 *key_type_id, __u32 *value_type_id)
|
||||
{
|
||||
const struct btf_type *container_type;
|
||||
const struct btf_member *key, *value;
|
||||
const size_t max_name = 256;
|
||||
char container_name[max_name];
|
||||
__s64 key_size, value_size;
|
||||
__s32 container_id;
|
||||
|
||||
if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == max_name) {
|
||||
pr_warn("map:%s length of '____btf_map_%s' is too long\n",
|
||||
map_name, map_name);
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
container_id = btf__find_by_name(btf, container_name);
|
||||
if (container_id < 0) {
|
||||
pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
|
||||
map_name, container_name);
|
||||
return libbpf_err(container_id);
|
||||
}
|
||||
|
||||
container_type = btf__type_by_id(btf, container_id);
|
||||
if (!container_type) {
|
||||
pr_warn("map:%s cannot find BTF type for container_id:%u\n",
|
||||
map_name, container_id);
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
|
||||
pr_warn("map:%s container_name:%s is an invalid container struct\n",
|
||||
map_name, container_name);
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
key = btf_members(container_type);
|
||||
value = key + 1;
|
||||
|
||||
key_size = btf__resolve_size(btf, key->type);
|
||||
if (key_size < 0) {
|
||||
pr_warn("map:%s invalid BTF key_type_size\n", map_name);
|
||||
return libbpf_err(key_size);
|
||||
}
|
||||
|
||||
if (expected_key_size != key_size) {
|
||||
pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
|
||||
map_name, (__u32)key_size, expected_key_size);
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
value_size = btf__resolve_size(btf, value->type);
|
||||
if (value_size < 0) {
|
||||
pr_warn("map:%s invalid BTF value_type_size\n", map_name);
|
||||
return libbpf_err(value_size);
|
||||
}
|
||||
|
||||
if (expected_value_size != value_size) {
|
||||
pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
|
||||
map_name, (__u32)value_size, expected_value_size);
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
*key_type_id = key->type;
|
||||
*value_type_id = value->type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void btf_invalidate_raw_data(struct btf *btf)
|
||||
{
|
||||
if (btf->raw_data) {
|
||||
@@ -2115,20 +2048,8 @@ int btf__add_field(struct btf *btf, const char *name, int type_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_ENUM type with:
|
||||
* - *name* - name of the enum, can be NULL or empty for anonymous enums;
|
||||
* - *byte_sz* - size of the enum, in bytes.
|
||||
*
|
||||
* Enum initially has no enum values in it (and corresponds to enum forward
|
||||
* declaration). Enumerator values can be added by btf__add_enum_value()
|
||||
* immediately after btf__add_enum() succeeds.
|
||||
*
|
||||
* Returns:
|
||||
* - >0, type ID of newly added BTF type;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
|
||||
static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
|
||||
bool is_signed, __u8 kind)
|
||||
{
|
||||
struct btf_type *t;
|
||||
int sz, name_off = 0;
|
||||
@@ -2153,12 +2074,34 @@ int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
|
||||
|
||||
/* start out with vlen=0; it will be adjusted when adding enum values */
|
||||
t->name_off = name_off;
|
||||
t->info = btf_type_info(BTF_KIND_ENUM, 0, 0);
|
||||
t->info = btf_type_info(kind, 0, is_signed);
|
||||
t->size = byte_sz;
|
||||
|
||||
return btf_commit_type(btf, sz);
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_ENUM type with:
|
||||
* - *name* - name of the enum, can be NULL or empty for anonymous enums;
|
||||
* - *byte_sz* - size of the enum, in bytes.
|
||||
*
|
||||
* Enum initially has no enum values in it (and corresponds to enum forward
|
||||
* declaration). Enumerator values can be added by btf__add_enum_value()
|
||||
* immediately after btf__add_enum() succeeds.
|
||||
*
|
||||
* Returns:
|
||||
* - >0, type ID of newly added BTF type;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
|
||||
{
|
||||
/*
|
||||
* set the signedness to be unsigned, it will change to signed
|
||||
* if any later enumerator is negative.
|
||||
*/
|
||||
return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new enum value for the current ENUM type with:
|
||||
* - *name* - name of the enumerator value, can't be NULL or empty;
|
||||
@@ -2206,6 +2149,82 @@ int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
|
||||
t = btf_last_type(btf);
|
||||
btf_type_inc_vlen(t);
|
||||
|
||||
/* if negative value, set signedness to signed */
|
||||
if (value < 0)
|
||||
t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
|
||||
|
||||
btf->hdr->type_len += sz;
|
||||
btf->hdr->str_off += sz;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_ENUM64 type with:
|
||||
* - *name* - name of the enum, can be NULL or empty for anonymous enums;
|
||||
* - *byte_sz* - size of the enum, in bytes.
|
||||
* - *is_signed* - whether the enum values are signed or not;
|
||||
*
|
||||
* Enum initially has no enum values in it (and corresponds to enum forward
|
||||
* declaration). Enumerator values can be added by btf__add_enum64_value()
|
||||
* immediately after btf__add_enum64() succeeds.
|
||||
*
|
||||
* Returns:
|
||||
* - >0, type ID of newly added BTF type;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
|
||||
bool is_signed)
|
||||
{
|
||||
return btf_add_enum_common(btf, name, byte_sz, is_signed,
|
||||
BTF_KIND_ENUM64);
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new enum value for the current ENUM64 type with:
|
||||
* - *name* - name of the enumerator value, can't be NULL or empty;
|
||||
* - *value* - integer value corresponding to enum value *name*;
|
||||
* Returns:
|
||||
* - 0, on success;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
|
||||
{
|
||||
struct btf_enum64 *v;
|
||||
struct btf_type *t;
|
||||
int sz, name_off;
|
||||
|
||||
/* last type should be BTF_KIND_ENUM64 */
|
||||
if (btf->nr_types == 0)
|
||||
return libbpf_err(-EINVAL);
|
||||
t = btf_last_type(btf);
|
||||
if (!btf_is_enum64(t))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
/* non-empty name */
|
||||
if (!name || !name[0])
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
/* decompose and invalidate raw data */
|
||||
if (btf_ensure_modifiable(btf))
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
sz = sizeof(struct btf_enum64);
|
||||
v = btf_add_type_mem(btf, sz);
|
||||
if (!v)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
name_off = btf__add_str(btf, name);
|
||||
if (name_off < 0)
|
||||
return name_off;
|
||||
|
||||
v->name_off = name_off;
|
||||
v->val_lo32 = (__u32)value;
|
||||
v->val_hi32 = value >> 32;
|
||||
|
||||
/* update parent type's vlen */
|
||||
t = btf_last_type(btf);
|
||||
btf_type_inc_vlen(t);
|
||||
|
||||
btf->hdr->type_len += sz;
|
||||
btf->hdr->str_off += sz;
|
||||
return 0;
|
||||
@@ -2853,81 +2872,6 @@ const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
|
||||
return btf_ext->data;
|
||||
}
|
||||
|
||||
static int btf_ext_reloc_info(const struct btf *btf,
|
||||
const struct btf_ext_info *ext_info,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **info, __u32 *cnt)
|
||||
{
|
||||
__u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
|
||||
__u32 i, record_size, existing_len, records_len;
|
||||
struct btf_ext_info_sec *sinfo;
|
||||
const char *info_sec_name;
|
||||
__u64 remain_len;
|
||||
void *data;
|
||||
|
||||
record_size = ext_info->rec_size;
|
||||
sinfo = ext_info->info;
|
||||
remain_len = ext_info->len;
|
||||
while (remain_len > 0) {
|
||||
records_len = sinfo->num_info * record_size;
|
||||
info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
|
||||
if (strcmp(info_sec_name, sec_name)) {
|
||||
remain_len -= sec_hdrlen + records_len;
|
||||
sinfo = (void *)sinfo + sec_hdrlen + records_len;
|
||||
continue;
|
||||
}
|
||||
|
||||
existing_len = (*cnt) * record_size;
|
||||
data = realloc(*info, existing_len + records_len);
|
||||
if (!data)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
memcpy(data + existing_len, sinfo->data, records_len);
|
||||
/* adjust insn_off only, the rest data will be passed
|
||||
* to the kernel.
|
||||
*/
|
||||
for (i = 0; i < sinfo->num_info; i++) {
|
||||
__u32 *insn_off;
|
||||
|
||||
insn_off = data + existing_len + (i * record_size);
|
||||
*insn_off = *insn_off / sizeof(struct bpf_insn) + insns_cnt;
|
||||
}
|
||||
*info = data;
|
||||
*cnt += sinfo->num_info;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return libbpf_err(-ENOENT);
|
||||
}
|
||||
|
||||
int btf_ext__reloc_func_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **func_info, __u32 *cnt)
|
||||
{
|
||||
return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
|
||||
insns_cnt, func_info, cnt);
|
||||
}
|
||||
|
||||
int btf_ext__reloc_line_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **line_info, __u32 *cnt)
|
||||
{
|
||||
return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
|
||||
insns_cnt, line_info, cnt);
|
||||
}
|
||||
|
||||
__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
|
||||
{
|
||||
return btf_ext->func_info.rec_size;
|
||||
}
|
||||
|
||||
__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
|
||||
{
|
||||
return btf_ext->line_info.rec_size;
|
||||
}
|
||||
|
||||
struct btf_dedup;
|
||||
|
||||
static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
|
||||
@@ -3077,9 +3021,7 @@ static int btf_dedup_remap_types(struct btf_dedup *d);
|
||||
* deduplicating structs/unions is described in greater details in comments for
|
||||
* `btf_dedup_is_equiv` function.
|
||||
*/
|
||||
|
||||
DEFAULT_VERSION(btf__dedup_v0_6_0, btf__dedup, LIBBPF_0.6.0)
|
||||
int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts)
|
||||
int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
|
||||
{
|
||||
struct btf_dedup *d;
|
||||
int err;
|
||||
@@ -3139,19 +3081,6 @@ done:
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
COMPAT_VERSION(btf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2)
|
||||
int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts)
|
||||
{
|
||||
LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext);
|
||||
|
||||
if (unused_opts) {
|
||||
pr_warn("please use new version of btf__dedup() that supports options\n");
|
||||
return libbpf_err(-ENOTSUP);
|
||||
}
|
||||
|
||||
return btf__dedup(btf, &opts);
|
||||
}
|
||||
|
||||
#define BTF_UNPROCESSED_ID ((__u32)-1)
|
||||
#define BTF_IN_PROGRESS_ID ((__u32)-2)
|
||||
|
||||
@@ -3470,7 +3399,7 @@ static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
|
||||
return info1 == info2;
|
||||
}
|
||||
|
||||
/* Calculate type signature hash of ENUM. */
|
||||
/* Calculate type signature hash of ENUM/ENUM64. */
|
||||
static long btf_hash_enum(struct btf_type *t)
|
||||
{
|
||||
long h;
|
||||
@@ -3504,9 +3433,31 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool btf_equal_enum64(struct btf_type *t1, struct btf_type *t2)
|
||||
{
|
||||
const struct btf_enum64 *m1, *m2;
|
||||
__u16 vlen;
|
||||
int i;
|
||||
|
||||
if (!btf_equal_common(t1, t2))
|
||||
return false;
|
||||
|
||||
vlen = btf_vlen(t1);
|
||||
m1 = btf_enum64(t1);
|
||||
m2 = btf_enum64(t2);
|
||||
for (i = 0; i < vlen; i++) {
|
||||
if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
|
||||
m1->val_hi32 != m2->val_hi32)
|
||||
return false;
|
||||
m1++;
|
||||
m2++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool btf_is_enum_fwd(struct btf_type *t)
|
||||
{
|
||||
return btf_is_enum(t) && btf_vlen(t) == 0;
|
||||
return btf_is_any_enum(t) && btf_vlen(t) == 0;
|
||||
}
|
||||
|
||||
static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
|
||||
@@ -3519,6 +3470,17 @@ static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
|
||||
t1->size == t2->size;
|
||||
}
|
||||
|
||||
static bool btf_compat_enum64(struct btf_type *t1, struct btf_type *t2)
|
||||
{
|
||||
if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
|
||||
return btf_equal_enum64(t1, t2);
|
||||
|
||||
/* ignore vlen when comparing */
|
||||
return t1->name_off == t2->name_off &&
|
||||
(t1->info & ~0xffff) == (t2->info & ~0xffff) &&
|
||||
t1->size == t2->size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
|
||||
* as referenced type IDs equivalence is established separately during type
|
||||
@@ -3731,6 +3693,7 @@ static int btf_dedup_prep(struct btf_dedup *d)
|
||||
h = btf_hash_int_decl_tag(t);
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
h = btf_hash_enum(t);
|
||||
break;
|
||||
case BTF_KIND_STRUCT:
|
||||
@@ -3820,6 +3783,27 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
|
||||
}
|
||||
break;
|
||||
|
||||
case BTF_KIND_ENUM64:
|
||||
h = btf_hash_enum(t);
|
||||
for_each_dedup_cand(d, hash_entry, h) {
|
||||
cand_id = (__u32)(long)hash_entry->value;
|
||||
cand = btf_type_by_id(d->btf, cand_id);
|
||||
if (btf_equal_enum64(t, cand)) {
|
||||
new_id = cand_id;
|
||||
break;
|
||||
}
|
||||
if (btf_compat_enum64(t, cand)) {
|
||||
if (btf_is_enum_fwd(t)) {
|
||||
/* resolve fwd to full enum */
|
||||
new_id = cand_id;
|
||||
break;
|
||||
}
|
||||
/* resolve canonical enum fwd to full enum */
|
||||
d->map[cand_id] = type_id;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FLOAT:
|
||||
h = btf_hash_common(t);
|
||||
@@ -4115,6 +4099,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
|
||||
case BTF_KIND_ENUM:
|
||||
return btf_compat_enum(cand_type, canon_type);
|
||||
|
||||
case BTF_KIND_ENUM64:
|
||||
return btf_compat_enum64(cand_type, canon_type);
|
||||
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FLOAT:
|
||||
return btf_equal_common(cand_type, canon_type);
|
||||
@@ -4655,20 +4642,17 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
|
||||
*/
|
||||
struct btf *btf__load_vmlinux_btf(void)
|
||||
{
|
||||
struct {
|
||||
const char *path_fmt;
|
||||
bool raw_btf;
|
||||
} locations[] = {
|
||||
const char *locations[] = {
|
||||
/* try canonical vmlinux BTF through sysfs first */
|
||||
{ "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
|
||||
/* fall back to trying to find vmlinux ELF on disk otherwise */
|
||||
{ "/boot/vmlinux-%1$s" },
|
||||
{ "/lib/modules/%1$s/vmlinux-%1$s" },
|
||||
{ "/lib/modules/%1$s/build/vmlinux" },
|
||||
{ "/usr/lib/modules/%1$s/kernel/vmlinux" },
|
||||
{ "/usr/lib/debug/boot/vmlinux-%1$s" },
|
||||
{ "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
|
||||
{ "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
|
||||
"/sys/kernel/btf/vmlinux",
|
||||
/* fall back to trying to find vmlinux on disk otherwise */
|
||||
"/boot/vmlinux-%1$s",
|
||||
"/lib/modules/%1$s/vmlinux-%1$s",
|
||||
"/lib/modules/%1$s/build/vmlinux",
|
||||
"/usr/lib/modules/%1$s/kernel/vmlinux",
|
||||
"/usr/lib/debug/boot/vmlinux-%1$s",
|
||||
"/usr/lib/debug/boot/vmlinux-%1$s.debug",
|
||||
"/usr/lib/debug/lib/modules/%1$s/vmlinux",
|
||||
};
|
||||
char path[PATH_MAX + 1];
|
||||
struct utsname buf;
|
||||
@@ -4678,15 +4662,12 @@ struct btf *btf__load_vmlinux_btf(void)
|
||||
uname(&buf);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(locations); i++) {
|
||||
snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
|
||||
snprintf(path, PATH_MAX, locations[i], buf.release);
|
||||
|
||||
if (access(path, R_OK))
|
||||
if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
|
||||
continue;
|
||||
|
||||
if (locations[i].raw_btf)
|
||||
btf = btf__parse_raw(path);
|
||||
else
|
||||
btf = btf__parse_elf(path, NULL);
|
||||
btf = btf__parse(path, NULL);
|
||||
err = libbpf_get_error(btf);
|
||||
pr_debug("loading kernel BTF '%s': %d\n", path, err);
|
||||
if (err)
|
||||
@@ -4717,6 +4698,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
return 0;
|
||||
|
||||
case BTF_KIND_FWD:
|
||||
@@ -4811,6 +4793,16 @@ int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ct
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM64: {
|
||||
struct btf_enum64 *m = btf_enum64(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *m = btf_params(t);
|
||||
|
||||
|
||||
142
src/btf.h
@@ -116,24 +116,15 @@ LIBBPF_API struct btf *btf__parse_raw_split(const char *path, struct btf *base_b
|
||||
|
||||
LIBBPF_API struct btf *btf__load_vmlinux_btf(void);
|
||||
LIBBPF_API struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf);
|
||||
LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
|
||||
|
||||
LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id);
|
||||
LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_from_kernel_by_id instead")
|
||||
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "intended for internal libbpf use only")
|
||||
LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_into_kernel instead")
|
||||
LIBBPF_API int btf__load(struct btf *btf);
|
||||
LIBBPF_API int btf__load_into_kernel(struct btf *btf);
|
||||
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
|
||||
const char *type_name);
|
||||
LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
|
||||
const char *type_name, __u32 kind);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__type_cnt() instead; note that btf__get_nr_types() == btf__type_cnt() - 1")
|
||||
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
|
||||
LIBBPF_API __u32 btf__type_cnt(const struct btf *btf);
|
||||
LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf);
|
||||
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
|
||||
@@ -150,29 +141,10 @@ LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
|
||||
LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size);
|
||||
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "this API is not necessary when BTF-defined maps are used")
|
||||
LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
|
||||
__u32 expected_key_size,
|
||||
__u32 expected_value_size,
|
||||
__u32 *key_type_id, __u32 *value_type_id);
|
||||
|
||||
LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size);
|
||||
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
|
||||
LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
|
||||
int btf_ext__reloc_func_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **func_info, __u32 *cnt);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
|
||||
int btf_ext__reloc_line_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **line_info, __u32 *cnt);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info is deprecated; write custom func_info parsing to fetch rec_size")
|
||||
__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info is deprecated; write custom line_info parsing to fetch rec_size")
|
||||
__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
|
||||
|
||||
LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
|
||||
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
|
||||
@@ -215,6 +187,8 @@ LIBBPF_API int btf__add_field(struct btf *btf, const char *name, int field_type_
|
||||
/* enum construction APIs */
|
||||
LIBBPF_API int btf__add_enum(struct btf *btf, const char *name, __u32 bytes_sz);
|
||||
LIBBPF_API int btf__add_enum_value(struct btf *btf, const char *name, __s64 value);
|
||||
LIBBPF_API int btf__add_enum64(struct btf *btf, const char *name, __u32 bytes_sz, bool is_signed);
|
||||
LIBBPF_API int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value);
|
||||
|
||||
enum btf_fwd_kind {
|
||||
BTF_FWD_STRUCT = 0,
|
||||
@@ -257,22 +231,12 @@ struct btf_dedup_opts {
|
||||
|
||||
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
|
||||
|
||||
LIBBPF_API int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__dedup() instead")
|
||||
LIBBPF_API int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *opts);
|
||||
#define btf__dedup(...) ___libbpf_overload(___btf_dedup, __VA_ARGS__)
|
||||
#define ___btf_dedup3(btf, btf_ext, opts) btf__dedup_deprecated(btf, btf_ext, opts)
|
||||
#define ___btf_dedup2(btf, opts) btf__dedup(btf, opts)
|
||||
|
||||
struct btf_dump;
|
||||
|
||||
struct btf_dump_opts {
|
||||
union {
|
||||
size_t sz;
|
||||
void *ctx; /* DEPRECATED: will be gone in v1.0 */
|
||||
};
|
||||
size_t sz;
|
||||
};
|
||||
#define btf_dump_opts__last_field sz
|
||||
|
||||
typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args);
|
||||
|
||||
@@ -281,51 +245,6 @@ LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf,
|
||||
void *ctx,
|
||||
const struct btf_dump_opts *opts);
|
||||
|
||||
LIBBPF_API struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
|
||||
btf_dump_printf_fn_t printf_fn,
|
||||
void *ctx,
|
||||
const struct btf_dump_opts *opts);
|
||||
|
||||
LIBBPF_API struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const struct btf_dump_opts *opts,
|
||||
btf_dump_printf_fn_t printf_fn);
|
||||
|
||||
/* Choose either btf_dump__new() or btf_dump__new_deprecated() based on the
|
||||
* type of 4th argument. If it's btf_dump's print callback, use deprecated
|
||||
* API; otherwise, choose the new btf_dump__new(). ___libbpf_override()
|
||||
* doesn't work here because both variants have 4 input arguments.
|
||||
*
|
||||
* (void *) casts are necessary to avoid compilation warnings about type
|
||||
* mismatches, because even though __builtin_choose_expr() only ever evaluates
|
||||
* one side the other side still has to satisfy type constraints (this is
|
||||
* compiler implementation limitation which might be lifted eventually,
|
||||
* according to the documentation). So passing struct btf_ext in place of
|
||||
* btf_dump_printf_fn_t would be generating compilation warning. Casting to
|
||||
* void * avoids this issue.
|
||||
*
|
||||
* Also, two type compatibility checks for a function and function pointer are
|
||||
* required because passing function reference into btf_dump__new() as
|
||||
* btf_dump__new(..., my_callback, ...) and as btf_dump__new(...,
|
||||
* &my_callback, ...) (not explicit ampersand in the latter case) actually
|
||||
* differs as far as __builtin_types_compatible_p() is concerned. Thus two
|
||||
* checks are combined to detect callback argument.
|
||||
*
|
||||
* The rest works just like in case of ___libbpf_override() usage with symbol
|
||||
* versioning.
|
||||
*
|
||||
* C++ compilers don't support __builtin_types_compatible_p(), so at least
|
||||
* don't screw up compilation for them and let C++ users pick btf_dump__new
|
||||
* vs btf_dump__new_deprecated explicitly.
|
||||
*/
|
||||
#ifndef __cplusplus
|
||||
#define btf_dump__new(a1, a2, a3, a4) __builtin_choose_expr( \
|
||||
__builtin_types_compatible_p(typeof(a4), btf_dump_printf_fn_t) || \
|
||||
__builtin_types_compatible_p(typeof(a4), void(void *, const char *, va_list)), \
|
||||
btf_dump__new_deprecated((void *)a1, (void *)a2, (void *)a3, (void *)a4), \
|
||||
btf_dump__new((void *)a1, (void *)a2, (void *)a3, (void *)a4))
|
||||
#endif
|
||||
|
||||
LIBBPF_API void btf_dump__free(struct btf_dump *d);
|
||||
|
||||
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
|
||||
@@ -393,9 +312,10 @@ btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
|
||||
#ifndef BTF_KIND_FLOAT
|
||||
#define BTF_KIND_FLOAT 16 /* Floating point */
|
||||
#endif
|
||||
/* The kernel header switched to enums, so these two were never #defined */
|
||||
/* The kernel header switched to enums, so the following were never #defined */
|
||||
#define BTF_KIND_DECL_TAG 17 /* Decl Tag */
|
||||
#define BTF_KIND_TYPE_TAG 18 /* Type Tag */
|
||||
#define BTF_KIND_ENUM64 19 /* Enum for up-to 64bit values */
|
||||
|
||||
static inline __u16 btf_kind(const struct btf_type *t)
|
||||
{
|
||||
@@ -454,6 +374,11 @@ static inline bool btf_is_enum(const struct btf_type *t)
|
||||
return btf_kind(t) == BTF_KIND_ENUM;
|
||||
}
|
||||
|
||||
static inline bool btf_is_enum64(const struct btf_type *t)
|
||||
{
|
||||
return btf_kind(t) == BTF_KIND_ENUM64;
|
||||
}
|
||||
|
||||
static inline bool btf_is_fwd(const struct btf_type *t)
|
||||
{
|
||||
return btf_kind(t) == BTF_KIND_FWD;
|
||||
@@ -524,6 +449,18 @@ static inline bool btf_is_type_tag(const struct btf_type *t)
|
||||
return btf_kind(t) == BTF_KIND_TYPE_TAG;
|
||||
}
|
||||
|
||||
static inline bool btf_is_any_enum(const struct btf_type *t)
|
||||
{
|
||||
return btf_is_enum(t) || btf_is_enum64(t);
|
||||
}
|
||||
|
||||
static inline bool btf_kind_core_compat(const struct btf_type *t1,
|
||||
const struct btf_type *t2)
|
||||
{
|
||||
return btf_kind(t1) == btf_kind(t2) ||
|
||||
(btf_is_any_enum(t1) && btf_is_any_enum(t2));
|
||||
}
|
||||
|
||||
static inline __u8 btf_int_encoding(const struct btf_type *t)
|
||||
{
|
||||
return BTF_INT_ENCODING(*(__u32 *)(t + 1));
|
||||
@@ -549,6 +486,39 @@ static inline struct btf_enum *btf_enum(const struct btf_type *t)
|
||||
return (struct btf_enum *)(t + 1);
|
||||
}
|
||||
|
||||
struct btf_enum64;
|
||||
|
||||
static inline struct btf_enum64 *btf_enum64(const struct btf_type *t)
|
||||
{
|
||||
return (struct btf_enum64 *)(t + 1);
|
||||
}
|
||||
|
||||
static inline __u64 btf_enum64_value(const struct btf_enum64 *e)
|
||||
{
|
||||
/* struct btf_enum64 is introduced in Linux 6.0, which is very
|
||||
* bleeding-edge. Here we are avoiding relying on struct btf_enum64
|
||||
* definition coming from kernel UAPI headers to support wider range
|
||||
* of system-wide kernel headers.
|
||||
*
|
||||
* Given this header can be also included from C++ applications, that
|
||||
* further restricts C tricks we can use (like using compatible
|
||||
* anonymous struct). So just treat struct btf_enum64 as
|
||||
* a three-element array of u32 and access second (lo32) and third
|
||||
* (hi32) elements directly.
|
||||
*
|
||||
* For reference, here is a struct btf_enum64 definition:
|
||||
*
|
||||
* const struct btf_enum64 {
|
||||
* __u32 name_off;
|
||||
* __u32 val_lo32;
|
||||
* __u32 val_hi32;
|
||||
* };
|
||||
*/
|
||||
const __u32 *e64 = (const __u32 *)e;
|
||||
|
||||
return ((__u64)e64[2] << 32) | e64[1];
|
||||
}
|
||||
|
||||
static inline struct btf_member *btf_members(const struct btf_type *t)
|
||||
{
|
||||
return (struct btf_member *)(t + 1);
|
||||
|
||||
162
src/btf_dump.c
@@ -144,15 +144,17 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
|
||||
static int btf_dump_mark_referenced(struct btf_dump *d);
|
||||
static int btf_dump_resize(struct btf_dump *d);
|
||||
|
||||
DEFAULT_VERSION(btf_dump__new_v0_6_0, btf_dump__new, LIBBPF_0.6.0)
|
||||
struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
|
||||
btf_dump_printf_fn_t printf_fn,
|
||||
void *ctx,
|
||||
const struct btf_dump_opts *opts)
|
||||
struct btf_dump *btf_dump__new(const struct btf *btf,
|
||||
btf_dump_printf_fn_t printf_fn,
|
||||
void *ctx,
|
||||
const struct btf_dump_opts *opts)
|
||||
{
|
||||
struct btf_dump *d;
|
||||
int err;
|
||||
|
||||
if (!OPTS_VALID(opts, btf_dump_opts))
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
if (!printf_fn)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
@@ -188,17 +190,6 @@ err:
|
||||
return libbpf_err_ptr(err);
|
||||
}
|
||||
|
||||
COMPAT_VERSION(btf_dump__new_deprecated, btf_dump__new, LIBBPF_0.0.4)
|
||||
struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const struct btf_dump_opts *opts,
|
||||
btf_dump_printf_fn_t printf_fn)
|
||||
{
|
||||
if (!printf_fn)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
return btf_dump__new_v0_6_0(btf, printf_fn, opts ? opts->ctx : NULL, opts);
|
||||
}
|
||||
|
||||
static int btf_dump_resize(struct btf_dump *d)
|
||||
{
|
||||
int err, last_id = btf__type_cnt(d->btf) - 1;
|
||||
@@ -318,6 +309,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FLOAT:
|
||||
break;
|
||||
@@ -538,6 +530,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
|
||||
return 1;
|
||||
}
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_FWD:
|
||||
/*
|
||||
* non-anonymous or non-referenced enums are top-level
|
||||
@@ -739,6 +732,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
|
||||
tstate->emit_state = EMITTED;
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
if (top_level_def) {
|
||||
btf_dump_emit_enum_def(d, id, t, 0);
|
||||
btf_dump_printf(d, ";\n\n");
|
||||
@@ -989,38 +983,81 @@ static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
|
||||
btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id));
|
||||
}
|
||||
|
||||
static void btf_dump_emit_enum32_val(struct btf_dump *d,
|
||||
const struct btf_type *t,
|
||||
int lvl, __u16 vlen)
|
||||
{
|
||||
const struct btf_enum *v = btf_enum(t);
|
||||
bool is_signed = btf_kflag(t);
|
||||
const char *fmt_str;
|
||||
const char *name;
|
||||
size_t dup_cnt;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vlen; i++, v++) {
|
||||
name = btf_name_of(d, v->name_off);
|
||||
/* enumerators share namespace with typedef idents */
|
||||
dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
|
||||
if (dup_cnt > 1) {
|
||||
fmt_str = is_signed ? "\n%s%s___%zd = %d," : "\n%s%s___%zd = %u,";
|
||||
btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, dup_cnt, v->val);
|
||||
} else {
|
||||
fmt_str = is_signed ? "\n%s%s = %d," : "\n%s%s = %u,";
|
||||
btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, v->val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void btf_dump_emit_enum64_val(struct btf_dump *d,
|
||||
const struct btf_type *t,
|
||||
int lvl, __u16 vlen)
|
||||
{
|
||||
const struct btf_enum64 *v = btf_enum64(t);
|
||||
bool is_signed = btf_kflag(t);
|
||||
const char *fmt_str;
|
||||
const char *name;
|
||||
size_t dup_cnt;
|
||||
__u64 val;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vlen; i++, v++) {
|
||||
name = btf_name_of(d, v->name_off);
|
||||
dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
|
||||
val = btf_enum64_value(v);
|
||||
if (dup_cnt > 1) {
|
||||
fmt_str = is_signed ? "\n%s%s___%zd = %lldLL,"
|
||||
: "\n%s%s___%zd = %lluULL,";
|
||||
btf_dump_printf(d, fmt_str,
|
||||
pfx(lvl + 1), name, dup_cnt,
|
||||
(unsigned long long)val);
|
||||
} else {
|
||||
fmt_str = is_signed ? "\n%s%s = %lldLL,"
|
||||
: "\n%s%s = %lluULL,";
|
||||
btf_dump_printf(d, fmt_str,
|
||||
pfx(lvl + 1), name,
|
||||
(unsigned long long)val);
|
||||
}
|
||||
}
|
||||
}
|
||||
static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
|
||||
const struct btf_type *t,
|
||||
int lvl)
|
||||
{
|
||||
const struct btf_enum *v = btf_enum(t);
|
||||
__u16 vlen = btf_vlen(t);
|
||||
const char *name;
|
||||
size_t dup_cnt;
|
||||
int i;
|
||||
|
||||
btf_dump_printf(d, "enum%s%s",
|
||||
t->name_off ? " " : "",
|
||||
btf_dump_type_name(d, id));
|
||||
|
||||
if (vlen) {
|
||||
btf_dump_printf(d, " {");
|
||||
for (i = 0; i < vlen; i++, v++) {
|
||||
name = btf_name_of(d, v->name_off);
|
||||
/* enumerators share namespace with typedef idents */
|
||||
dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
|
||||
if (dup_cnt > 1) {
|
||||
btf_dump_printf(d, "\n%s%s___%zu = %u,",
|
||||
pfx(lvl + 1), name, dup_cnt,
|
||||
(__u32)v->val);
|
||||
} else {
|
||||
btf_dump_printf(d, "\n%s%s = %u,",
|
||||
pfx(lvl + 1), name,
|
||||
(__u32)v->val);
|
||||
}
|
||||
}
|
||||
btf_dump_printf(d, "\n%s}", pfx(lvl));
|
||||
}
|
||||
if (!vlen)
|
||||
return;
|
||||
|
||||
btf_dump_printf(d, " {");
|
||||
if (btf_is_enum(t))
|
||||
btf_dump_emit_enum32_val(d, t, lvl, vlen);
|
||||
else
|
||||
btf_dump_emit_enum64_val(d, t, lvl, vlen);
|
||||
btf_dump_printf(d, "\n%s}", pfx(lvl));
|
||||
}
|
||||
|
||||
static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
|
||||
@@ -1178,6 +1215,7 @@ skip_mod:
|
||||
break;
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
@@ -1312,6 +1350,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
|
||||
btf_dump_emit_struct_fwd(d, id, t);
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
btf_dump_emit_mods(d, decls);
|
||||
/* inline anonymous enum */
|
||||
if (t->name_off == 0 && !d->skip_anon_defs)
|
||||
@@ -1988,7 +2027,8 @@ static int btf_dump_get_enum_value(struct btf_dump *d,
|
||||
__u32 id,
|
||||
__s64 *value)
|
||||
{
|
||||
/* handle unaligned enum value */
|
||||
bool is_signed = btf_kflag(t);
|
||||
|
||||
if (!ptr_is_aligned(d->btf, id, data)) {
|
||||
__u64 val;
|
||||
int err;
|
||||
@@ -2005,13 +2045,13 @@ static int btf_dump_get_enum_value(struct btf_dump *d,
|
||||
*value = *(__s64 *)data;
|
||||
return 0;
|
||||
case 4:
|
||||
*value = *(__s32 *)data;
|
||||
*value = is_signed ? (__s64)*(__s32 *)data : *(__u32 *)data;
|
||||
return 0;
|
||||
case 2:
|
||||
*value = *(__s16 *)data;
|
||||
*value = is_signed ? *(__s16 *)data : *(__u16 *)data;
|
||||
return 0;
|
||||
case 1:
|
||||
*value = *(__s8 *)data;
|
||||
*value = is_signed ? *(__s8 *)data : *(__u8 *)data;
|
||||
return 0;
|
||||
default:
|
||||
pr_warn("unexpected size %d for enum, id:[%u]\n", t->size, id);
|
||||
@@ -2024,7 +2064,7 @@ static int btf_dump_enum_data(struct btf_dump *d,
|
||||
__u32 id,
|
||||
const void *data)
|
||||
{
|
||||
const struct btf_enum *e;
|
||||
bool is_signed;
|
||||
__s64 value;
|
||||
int i, err;
|
||||
|
||||
@@ -2032,14 +2072,31 @@ static int btf_dump_enum_data(struct btf_dump *d,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 0, e = btf_enum(t); i < btf_vlen(t); i++, e++) {
|
||||
if (value != e->val)
|
||||
continue;
|
||||
btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
|
||||
return 0;
|
||||
}
|
||||
is_signed = btf_kflag(t);
|
||||
if (btf_is_enum(t)) {
|
||||
const struct btf_enum *e;
|
||||
|
||||
btf_dump_type_values(d, "%d", value);
|
||||
for (i = 0, e = btf_enum(t); i < btf_vlen(t); i++, e++) {
|
||||
if (value != e->val)
|
||||
continue;
|
||||
btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
|
||||
return 0;
|
||||
}
|
||||
|
||||
btf_dump_type_values(d, is_signed ? "%d" : "%u", value);
|
||||
} else {
|
||||
const struct btf_enum64 *e;
|
||||
|
||||
for (i = 0, e = btf_enum64(t); i < btf_vlen(t); i++, e++) {
|
||||
if (value != btf_enum64_value(e))
|
||||
continue;
|
||||
btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
|
||||
return 0;
|
||||
}
|
||||
|
||||
btf_dump_type_values(d, is_signed ? "%lldLL" : "%lluULL",
|
||||
(unsigned long long)value);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2099,6 +2156,7 @@ static int btf_dump_type_data_check_overflow(struct btf_dump *d,
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
if (data + bits_offset / 8 + size > d->typed_dump->data_end)
|
||||
return -E2BIG;
|
||||
break;
|
||||
@@ -2203,6 +2261,7 @@ static int btf_dump_type_data_check_zero(struct btf_dump *d,
|
||||
return -ENODATA;
|
||||
}
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
err = btf_dump_get_enum_value(d, t, data, id, &value);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -2275,6 +2334,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
|
||||
err = btf_dump_struct_data(d, t, id, data);
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
/* handle bitfield and int enum values */
|
||||
if (bit_sz) {
|
||||
__u64 print_num;
|
||||
@@ -2325,7 +2385,7 @@ int btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
|
||||
d->typed_dump->indent_lvl = OPTS_GET(opts, indent_level, 0);
|
||||
|
||||
/* default indent string is a tab */
|
||||
if (!opts->indent_str)
|
||||
if (!OPTS_GET(opts, indent_str, NULL))
|
||||
d->typed_dump->indent_str[0] = '\t';
|
||||
else
|
||||
libbpf_strlcpy(d->typed_dump->indent_str, opts->indent_str,
|
||||
|
||||
@@ -533,7 +533,7 @@ void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
|
||||
gen->attach_kind = kind;
|
||||
ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
|
||||
prefix, attach_name);
|
||||
if (ret == sizeof(gen->attach_target))
|
||||
if (ret >= sizeof(gen->attach_target))
|
||||
gen->error = -ENOSPC;
|
||||
}
|
||||
|
||||
|
||||
2416
src/libbpf.c
678
src/libbpf.h
@@ -51,6 +51,42 @@ enum libbpf_errno {
|
||||
|
||||
LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_bpf_attach_type_str()** converts the provided attach type
|
||||
* value into a textual representation.
|
||||
* @param t The attach type.
|
||||
* @return Pointer to a static string identifying the attach type. NULL is
|
||||
* returned for unknown **bpf_attach_type** values.
|
||||
*/
|
||||
LIBBPF_API const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_bpf_link_type_str()** converts the provided link type value
|
||||
* into a textual representation.
|
||||
* @param t The link type.
|
||||
* @return Pointer to a static string identifying the link type. NULL is
|
||||
* returned for unknown **bpf_link_type** values.
|
||||
*/
|
||||
LIBBPF_API const char *libbpf_bpf_link_type_str(enum bpf_link_type t);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_bpf_map_type_str()** converts the provided map type value
|
||||
* into a textual representation.
|
||||
* @param t The map type.
|
||||
* @return Pointer to a static string identifying the map type. NULL is
|
||||
* returned for unknown **bpf_map_type** values.
|
||||
*/
|
||||
LIBBPF_API const char *libbpf_bpf_map_type_str(enum bpf_map_type t);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_bpf_prog_type_str()** converts the provided program type
|
||||
* value into a textual representation.
|
||||
* @param t The program type.
|
||||
* @return Pointer to a static string identifying the program type. NULL is
|
||||
* returned for unknown **bpf_prog_type** values.
|
||||
*/
|
||||
LIBBPF_API const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t);
|
||||
|
||||
enum libbpf_print_level {
|
||||
LIBBPF_WARN,
|
||||
LIBBPF_INFO,
|
||||
@@ -65,13 +101,8 @@ LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn);
|
||||
/* Hide internal to user */
|
||||
struct bpf_object;
|
||||
|
||||
struct bpf_object_open_attr {
|
||||
const char *file;
|
||||
enum bpf_prog_type prog_type;
|
||||
};
|
||||
|
||||
struct bpf_object_open_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
/* object name override, if provided:
|
||||
* - for object open from file, this will override setting object
|
||||
@@ -82,21 +113,14 @@ struct bpf_object_open_opts {
|
||||
const char *object_name;
|
||||
/* parse map definitions non-strictly, allowing extra attributes/data */
|
||||
bool relaxed_maps;
|
||||
/* DEPRECATED: handle CO-RE relocations non-strictly, allowing failures.
|
||||
* Value is ignored. Relocations always are processed non-strictly.
|
||||
* Non-relocatable instructions are replaced with invalid ones to
|
||||
* prevent accidental errors.
|
||||
* */
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "field has no effect")
|
||||
bool relaxed_core_relocs;
|
||||
/* maps that set the 'pinning' attribute in their definition will have
|
||||
* their pin_path attribute set to a file in this directory, and be
|
||||
* auto-pinned to that path on load; defaults to "/sys/fs/bpf".
|
||||
*/
|
||||
const char *pin_root_path;
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__set_attach_target() on each individual bpf_program")
|
||||
__u32 attach_prog_fd;
|
||||
__u32 :32; /* stub out now removed attach_prog_fd */
|
||||
|
||||
/* Additional kernel config content that augments and overrides
|
||||
* system Kconfig for CONFIG_xxx externs.
|
||||
*/
|
||||
@@ -179,20 +203,10 @@ LIBBPF_API struct bpf_object *
|
||||
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
|
||||
const struct bpf_object_open_opts *opts);
|
||||
|
||||
/* deprecated bpf_object__open variants */
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open_mem() instead")
|
||||
LIBBPF_API struct bpf_object *
|
||||
bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
|
||||
const char *name);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open_file() instead")
|
||||
LIBBPF_API struct bpf_object *
|
||||
bpf_object__open_xattr(struct bpf_object_open_attr *attr);
|
||||
/* Load/unload object into/from kernel */
|
||||
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
|
||||
|
||||
enum libbpf_pin_type {
|
||||
LIBBPF_PIN_NONE,
|
||||
/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
|
||||
LIBBPF_PIN_BY_NAME,
|
||||
};
|
||||
LIBBPF_API void bpf_object__close(struct bpf_object *object);
|
||||
|
||||
/* pin_maps and unpin_maps can both be called with a NULL path, in which case
|
||||
* they will use the pin_path attribute of each map (and ignore all maps that
|
||||
@@ -206,20 +220,6 @@ LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj,
|
||||
LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
|
||||
const char *path);
|
||||
LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
|
||||
LIBBPF_API void bpf_object__close(struct bpf_object *object);
|
||||
|
||||
struct bpf_object_load_attr {
|
||||
struct bpf_object *obj;
|
||||
int log_level;
|
||||
const char *target_btf_path;
|
||||
};
|
||||
|
||||
/* Load/unload object into/from kernel */
|
||||
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "bpf_object__unload() is deprecated, use bpf_object__close() instead")
|
||||
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
|
||||
|
||||
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
|
||||
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
|
||||
@@ -229,29 +229,10 @@ struct btf;
|
||||
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
|
||||
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__find_program_by_name() instead")
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__find_program_by_title(const struct bpf_object *obj,
|
||||
const char *title);
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__find_program_by_name(const struct bpf_object *obj,
|
||||
const char *name);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "track bpf_objects in application code instead")
|
||||
struct bpf_object *bpf_object__next(struct bpf_object *prev);
|
||||
#define bpf_object__for_each_safe(pos, tmp) \
|
||||
for ((pos) = bpf_object__next(NULL), \
|
||||
(tmp) = bpf_object__next(pos); \
|
||||
(pos) != NULL; \
|
||||
(pos) = (tmp), (tmp) = bpf_object__next(tmp))
|
||||
|
||||
typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv,
|
||||
bpf_object_clear_priv_t clear_priv);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API void *bpf_object__priv(const struct bpf_object *prog);
|
||||
|
||||
LIBBPF_API int
|
||||
libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
|
||||
enum bpf_attach_type *expected_attach_type);
|
||||
@@ -262,9 +243,7 @@ LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name,
|
||||
|
||||
/* Accessors of bpf_program */
|
||||
struct bpf_program;
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_program() instead")
|
||||
struct bpf_program *bpf_program__next(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog);
|
||||
|
||||
@@ -273,32 +252,18 @@ bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog)
|
||||
(pos) != NULL; \
|
||||
(pos) = bpf_object__next_program((obj), (pos)))
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_program() instead")
|
||||
struct bpf_program *bpf_program__prev(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog);
|
||||
|
||||
typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv,
|
||||
bpf_program_clear_priv_t clear_priv);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog);
|
||||
LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
|
||||
__u32 ifindex);
|
||||
|
||||
LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
|
||||
LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("BPF program title is confusing term; please use bpf_program__section_name() instead")
|
||||
const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy);
|
||||
LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
|
||||
LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
|
||||
|
||||
/* returns program size in bytes */
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insn_cnt() instead")
|
||||
LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);
|
||||
LIBBPF_API bool bpf_program__autoattach(const struct bpf_program *prog);
|
||||
LIBBPF_API void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach);
|
||||
|
||||
struct bpf_insn;
|
||||
|
||||
@@ -352,17 +317,7 @@ LIBBPF_API int bpf_program__set_insns(struct bpf_program *prog,
|
||||
*/
|
||||
LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_version);
|
||||
LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
|
||||
const char *path,
|
||||
int instance);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog,
|
||||
const char *path,
|
||||
int instance);
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__pin()** pins the BPF program to a file
|
||||
@@ -506,6 +461,52 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
|
||||
const char *pattern,
|
||||
const struct bpf_kprobe_multi_opts *opts);
|
||||
|
||||
struct bpf_ksyscall_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
size_t sz;
|
||||
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
|
||||
__u64 bpf_cookie;
|
||||
/* attach as return probe? */
|
||||
bool retprobe;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_ksyscall_opts__last_field retprobe
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__attach_ksyscall()** attaches a BPF program
|
||||
* to kernel syscall handler of a specified syscall. Optionally it's possible
|
||||
* to request to install retprobe that will be triggered at syscall exit. It's
|
||||
* also possible to associate BPF cookie (though options).
|
||||
*
|
||||
* Libbpf automatically will determine correct full kernel function name,
|
||||
* which depending on system architecture and kernel version/configuration
|
||||
* could be of the form __<arch>_sys_<syscall> or __se_sys_<syscall>, and will
|
||||
* attach specified program using kprobe/kretprobe mechanism.
|
||||
*
|
||||
* **bpf_program__attach_ksyscall()** is an API counterpart of declarative
|
||||
* **SEC("ksyscall/<syscall>")** annotation of BPF programs.
|
||||
*
|
||||
* At the moment **SEC("ksyscall")** and **bpf_program__attach_ksyscall()** do
|
||||
* not handle all the calling convention quirks for mmap(), clone() and compat
|
||||
* syscalls. It also only attaches to "native" syscall interfaces. If host
|
||||
* system supports compat syscalls or defines 32-bit syscalls in 64-bit
|
||||
* kernel, such syscall interfaces won't be attached to by libbpf.
|
||||
*
|
||||
* These limitations may or may not change in the future. Therefore it is
|
||||
* recommended to use SEC("kprobe") for these syscalls or if working with
|
||||
* compat and 32-bit interfaces is required.
|
||||
*
|
||||
* @param prog BPF program to attach
|
||||
* @param syscall_name Symbolic name of the syscall (e.g., "bpf")
|
||||
* @param opts Additional options (see **struct bpf_ksyscall_opts**)
|
||||
* @return Reference to the newly created BPF link; or NULL is returned on
|
||||
* error, error code is stored in errno
|
||||
*/
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_ksyscall(const struct bpf_program *prog,
|
||||
const char *syscall_name,
|
||||
const struct bpf_ksyscall_opts *opts);
|
||||
|
||||
struct bpf_uprobe_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
size_t sz;
|
||||
@@ -662,99 +663,6 @@ LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_iter(const struct bpf_program *prog,
|
||||
const struct bpf_iter_attach_opts *opts);
|
||||
|
||||
/*
|
||||
* Libbpf allows callers to adjust BPF programs before being loaded
|
||||
* into kernel. One program in an object file can be transformed into
|
||||
* multiple variants to be attached to different hooks.
|
||||
*
|
||||
* bpf_program_prep_t, bpf_program__set_prep and bpf_program__nth_fd
|
||||
* form an API for this purpose.
|
||||
*
|
||||
* - bpf_program_prep_t:
|
||||
* Defines a 'preprocessor', which is a caller defined function
|
||||
* passed to libbpf through bpf_program__set_prep(), and will be
|
||||
* called before program is loaded. The processor should adjust
|
||||
* the program one time for each instance according to the instance id
|
||||
* passed to it.
|
||||
*
|
||||
* - bpf_program__set_prep:
|
||||
* Attaches a preprocessor to a BPF program. The number of instances
|
||||
* that should be created is also passed through this function.
|
||||
*
|
||||
* - bpf_program__nth_fd:
|
||||
* After the program is loaded, get resulting FD of a given instance
|
||||
* of the BPF program.
|
||||
*
|
||||
* If bpf_program__set_prep() is not used, the program would be loaded
|
||||
* without adjustment during bpf_object__load(). The program has only
|
||||
* one instance. In this case bpf_program__fd(prog) is equal to
|
||||
* bpf_program__nth_fd(prog, 0).
|
||||
*/
|
||||
struct bpf_prog_prep_result {
|
||||
/*
|
||||
* If not NULL, load new instruction array.
|
||||
* If set to NULL, don't load this instance.
|
||||
*/
|
||||
struct bpf_insn *new_insn_ptr;
|
||||
int new_insn_cnt;
|
||||
|
||||
/* If not NULL, result FD is written to it. */
|
||||
int *pfd;
|
||||
};
|
||||
|
||||
/*
|
||||
* Parameters of bpf_program_prep_t:
|
||||
* - prog: The bpf_program being loaded.
|
||||
* - n: Index of instance being generated.
|
||||
* - insns: BPF instructions array.
|
||||
* - insns_cnt:Number of instructions in insns.
|
||||
* - res: Output parameter, result of transformation.
|
||||
*
|
||||
* Return value:
|
||||
* - Zero: pre-processing success.
|
||||
* - Non-zero: pre-processing error, stop loading.
|
||||
*/
|
||||
typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
|
||||
struct bpf_insn *insns, int insns_cnt,
|
||||
struct bpf_prog_prep_result *res);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions")
|
||||
LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
|
||||
bpf_program_prep_t prep);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n);
|
||||
|
||||
/*
|
||||
* Adjust type of BPF program. Default is kprobe.
|
||||
*/
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog);
|
||||
|
||||
LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog);
|
||||
|
||||
/**
|
||||
@@ -817,47 +725,6 @@ LIBBPF_API int
|
||||
bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
|
||||
const char *attach_func_name);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog);
|
||||
|
||||
/*
|
||||
* No need for __attribute__((packed)), all members of 'bpf_map_def'
|
||||
* are all aligned. In addition, using __attribute__((packed))
|
||||
* would trigger a -Wpacked warning message, and lead to an error
|
||||
* if -Werror is set.
|
||||
*/
|
||||
struct bpf_map_def {
|
||||
unsigned int type;
|
||||
unsigned int key_size;
|
||||
unsigned int value_size;
|
||||
unsigned int max_entries;
|
||||
unsigned int map_flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief **bpf_object__find_map_by_name()** returns BPF map of
|
||||
* the given name, if it exists within the passed BPF object
|
||||
@@ -872,16 +739,6 @@ bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
|
||||
LIBBPF_API int
|
||||
bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name);
|
||||
|
||||
/*
|
||||
* Get bpf_map through the offset of corresponding struct bpf_map_def
|
||||
* in the BPF object file.
|
||||
*/
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__find_map_by_name() instead")
|
||||
struct bpf_map *
|
||||
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_map() instead")
|
||||
struct bpf_map *bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_map *
|
||||
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
|
||||
|
||||
@@ -891,8 +748,6 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
|
||||
(pos) = bpf_object__next_map((obj), (pos)))
|
||||
#define bpf_map__for_each bpf_object__for_each_map
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_map() instead")
|
||||
struct bpf_map *bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_map *
|
||||
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
|
||||
|
||||
@@ -926,9 +781,6 @@ LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
|
||||
*/
|
||||
LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
|
||||
/* get map definition */
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use appropriate getters or setters instead")
|
||||
const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
|
||||
/* get map name */
|
||||
LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
|
||||
/* get/set map type */
|
||||
@@ -937,8 +789,6 @@ LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type);
|
||||
/* get/set map size (max_entries) */
|
||||
LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_map__set_max_entries() instead")
|
||||
LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
|
||||
/* get/set map flags */
|
||||
LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags);
|
||||
@@ -961,17 +811,9 @@ LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
|
||||
LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
|
||||
|
||||
typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
|
||||
bpf_map_clear_priv_t clear_priv);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
|
||||
const void *data, size_t size);
|
||||
LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_map__type() instead")
|
||||
LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map__is_internal()** tells the caller whether or not the
|
||||
@@ -1094,65 +936,6 @@ LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
|
||||
LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map,
|
||||
const void *cur_key, void *next_key, size_t key_sz);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_get_error()** extracts the error code from the passed
|
||||
* pointer
|
||||
* @param ptr pointer returned from libbpf API function
|
||||
* @return error code; or 0 if no error occured
|
||||
*
|
||||
* Many libbpf API functions which return pointers have logic to encode error
|
||||
* codes as pointers, and do not return NULL. Meaning **libbpf_get_error()**
|
||||
* should be used on the return value from these functions immediately after
|
||||
* calling the API function, with no intervening calls that could clobber the
|
||||
* `errno` variable. Consult the individual functions documentation to verify
|
||||
* if this logic applies should be used.
|
||||
*
|
||||
* For these API functions, if `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)`
|
||||
* is enabled, NULL is returned on error instead.
|
||||
*
|
||||
* If ptr is NULL, then errno should be already set by the failing
|
||||
* API, because libbpf never returns NULL on success and it now always
|
||||
* sets errno on error.
|
||||
*
|
||||
* Example usage:
|
||||
*
|
||||
* struct perf_buffer *pb;
|
||||
*
|
||||
* pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES, &opts);
|
||||
* err = libbpf_get_error(pb);
|
||||
* if (err) {
|
||||
* pb = NULL;
|
||||
* fprintf(stderr, "failed to open perf buffer: %d\n", err);
|
||||
* goto cleanup;
|
||||
* }
|
||||
*/
|
||||
LIBBPF_API long libbpf_get_error(const void *ptr);
|
||||
|
||||
struct bpf_prog_load_attr {
|
||||
const char *file;
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
int ifindex;
|
||||
int log_level;
|
||||
int prog_flags;
|
||||
};
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open() and bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
|
||||
struct bpf_object **pobj, int *prog_fd);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open() and bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
|
||||
struct bpf_object **pobj, int *prog_fd);
|
||||
|
||||
/* XDP related API */
|
||||
struct xdp_link_info {
|
||||
__u32 prog_id;
|
||||
__u32 drv_prog_id;
|
||||
__u32 hw_prog_id;
|
||||
__u32 skb_prog_id;
|
||||
__u8 attach_mode;
|
||||
};
|
||||
|
||||
struct bpf_xdp_set_link_opts {
|
||||
size_t sz;
|
||||
int old_fd;
|
||||
@@ -1160,17 +943,6 @@ struct bpf_xdp_set_link_opts {
|
||||
};
|
||||
#define bpf_xdp_set_link_opts__last_field old_fd
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_attach() instead")
|
||||
LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_attach() instead")
|
||||
LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
|
||||
const struct bpf_xdp_set_link_opts *opts);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_query_id() instead")
|
||||
LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_query() instead")
|
||||
LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
|
||||
size_t info_size, __u32 flags);
|
||||
|
||||
struct bpf_xdp_attach_opts {
|
||||
size_t sz;
|
||||
int old_prog_fd;
|
||||
@@ -1241,6 +1013,7 @@ LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook,
|
||||
|
||||
/* Ring buffer APIs */
|
||||
struct ring_buffer;
|
||||
struct user_ring_buffer;
|
||||
|
||||
typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
|
||||
|
||||
@@ -1260,6 +1033,112 @@ LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
|
||||
LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb);
|
||||
LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb);
|
||||
|
||||
struct user_ring_buffer_opts {
|
||||
size_t sz; /* size of this struct, for forward/backward compatibility */
|
||||
};
|
||||
|
||||
#define user_ring_buffer_opts__last_field sz
|
||||
|
||||
/* @brief **user_ring_buffer__new()** creates a new instance of a user ring
|
||||
* buffer.
|
||||
*
|
||||
* @param map_fd A file descriptor to a BPF_MAP_TYPE_USER_RINGBUF map.
|
||||
* @param opts Options for how the ring buffer should be created.
|
||||
* @return A user ring buffer on success; NULL and errno being set on a
|
||||
* failure.
|
||||
*/
|
||||
LIBBPF_API struct user_ring_buffer *
|
||||
user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts);
|
||||
|
||||
/* @brief **user_ring_buffer__reserve()** reserves a pointer to a sample in the
|
||||
* user ring buffer.
|
||||
* @param rb A pointer to a user ring buffer.
|
||||
* @param size The size of the sample, in bytes.
|
||||
* @return A pointer to an 8-byte aligned reserved region of the user ring
|
||||
* buffer; NULL, and errno being set if a sample could not be reserved.
|
||||
*
|
||||
* This function is *not* thread safe, and callers must synchronize accessing
|
||||
* this function if there are multiple producers. If a size is requested that
|
||||
* is larger than the size of the entire ring buffer, errno will be set to
|
||||
* E2BIG and NULL is returned. If the ring buffer could accommodate the size,
|
||||
* but currently does not have enough space, errno is set to ENOSPC and NULL is
|
||||
* returned.
|
||||
*
|
||||
* After initializing the sample, callers must invoke
|
||||
* **user_ring_buffer__submit()** to post the sample to the kernel. Otherwise,
|
||||
* the sample must be freed with **user_ring_buffer__discard()**.
|
||||
*/
|
||||
LIBBPF_API void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size);
|
||||
|
||||
/* @brief **user_ring_buffer__reserve_blocking()** reserves a record in the
|
||||
* ring buffer, possibly blocking for up to @timeout_ms until a sample becomes
|
||||
* available.
|
||||
* @param rb The user ring buffer.
|
||||
* @param size The size of the sample, in bytes.
|
||||
* @param timeout_ms The amount of time, in milliseconds, for which the caller
|
||||
* should block when waiting for a sample. -1 causes the caller to block
|
||||
* indefinitely.
|
||||
* @return A pointer to an 8-byte aligned reserved region of the user ring
|
||||
* buffer; NULL, and errno being set if a sample could not be reserved.
|
||||
*
|
||||
* This function is *not* thread safe, and callers must synchronize
|
||||
* accessing this function if there are multiple producers
|
||||
*
|
||||
* If **timeout_ms** is -1, the function will block indefinitely until a sample
|
||||
* becomes available. Otherwise, **timeout_ms** must be non-negative, or errno
|
||||
* is set to EINVAL, and NULL is returned. If **timeout_ms** is 0, no blocking
|
||||
* will occur and the function will return immediately after attempting to
|
||||
* reserve a sample.
|
||||
*
|
||||
* If **size** is larger than the size of the entire ring buffer, errno is set
|
||||
* to E2BIG and NULL is returned. If the ring buffer could accommodate
|
||||
* **size**, but currently does not have enough space, the caller will block
|
||||
* until at most **timeout_ms** has elapsed. If insufficient space is available
|
||||
* at that time, errno is set to ENOSPC, and NULL is returned.
|
||||
*
|
||||
* The kernel guarantees that it will wake up this thread to check if
|
||||
* sufficient space is available in the ring buffer at least once per
|
||||
* invocation of the **bpf_ringbuf_drain()** helper function, provided that at
|
||||
* least one sample is consumed, and the BPF program did not invoke the
|
||||
* function with BPF_RB_NO_WAKEUP. A wakeup may occur sooner than that, but the
|
||||
* kernel does not guarantee this. If the helper function is invoked with
|
||||
* BPF_RB_FORCE_WAKEUP, a wakeup event will be sent even if no sample is
|
||||
* consumed.
|
||||
*
|
||||
* When a sample of size **size** is found within **timeout_ms**, a pointer to
|
||||
* the sample is returned. After initializing the sample, callers must invoke
|
||||
* **user_ring_buffer__submit()** to post the sample to the ring buffer.
|
||||
* Otherwise, the sample must be freed with **user_ring_buffer__discard()**.
|
||||
*/
|
||||
LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb,
|
||||
__u32 size,
|
||||
int timeout_ms);
|
||||
|
||||
/* @brief **user_ring_buffer__submit()** submits a previously reserved sample
|
||||
* into the ring buffer.
|
||||
* @param rb The user ring buffer.
|
||||
* @param sample A reserved sample.
|
||||
*
|
||||
* It is not necessary to synchronize amongst multiple producers when invoking
|
||||
* this function.
|
||||
*/
|
||||
LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample);
|
||||
|
||||
/* @brief **user_ring_buffer__discard()** discards a previously reserved sample.
|
||||
* @param rb The user ring buffer.
|
||||
* @param sample A reserved sample.
|
||||
*
|
||||
* It is not necessary to synchronize amongst multiple producers when invoking
|
||||
* this function.
|
||||
*/
|
||||
LIBBPF_API void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample);
|
||||
|
||||
/* @brief **user_ring_buffer__free()** frees a ring buffer that was previously
|
||||
* created with **user_ring_buffer__new()**.
|
||||
* @param rb The user ring buffer being freed.
|
||||
*/
|
||||
LIBBPF_API void user_ring_buffer__free(struct user_ring_buffer *rb);
|
||||
|
||||
/* Perf buffer APIs */
|
||||
struct perf_buffer;
|
||||
|
||||
@@ -1269,17 +1148,7 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
|
||||
|
||||
/* common use perf buffer options */
|
||||
struct perf_buffer_opts {
|
||||
union {
|
||||
size_t sz;
|
||||
struct { /* DEPRECATED: will be removed in v1.0 */
|
||||
/* if specified, sample_cb is called for each sample */
|
||||
perf_buffer_sample_fn sample_cb;
|
||||
/* if specified, lost_cb is called for each batch of lost samples */
|
||||
perf_buffer_lost_fn lost_cb;
|
||||
/* ctx is provided to sample_cb and lost_cb */
|
||||
void *ctx;
|
||||
};
|
||||
};
|
||||
size_t sz;
|
||||
};
|
||||
#define perf_buffer_opts__last_field sz
|
||||
|
||||
@@ -1300,21 +1169,6 @@ perf_buffer__new(int map_fd, size_t page_cnt,
|
||||
perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
|
||||
const struct perf_buffer_opts *opts);
|
||||
|
||||
LIBBPF_API struct perf_buffer *
|
||||
perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
|
||||
perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
|
||||
const struct perf_buffer_opts *opts);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new() instead")
|
||||
struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
|
||||
const struct perf_buffer_opts *opts);
|
||||
|
||||
#define perf_buffer__new(...) ___libbpf_overload(___perf_buffer_new, __VA_ARGS__)
|
||||
#define ___perf_buffer_new6(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts) \
|
||||
perf_buffer__new(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts)
|
||||
#define ___perf_buffer_new3(map_fd, page_cnt, opts) \
|
||||
perf_buffer__new_deprecated(map_fd, page_cnt, opts)
|
||||
|
||||
enum bpf_perf_event_ret {
|
||||
LIBBPF_PERF_EVENT_DONE = 0,
|
||||
LIBBPF_PERF_EVENT_ERROR = -1,
|
||||
@@ -1328,21 +1182,9 @@ typedef enum bpf_perf_event_ret
|
||||
|
||||
/* raw perf buffer options, giving most power and control */
|
||||
struct perf_buffer_raw_opts {
|
||||
union {
|
||||
struct {
|
||||
size_t sz;
|
||||
long :0;
|
||||
long :0;
|
||||
};
|
||||
struct { /* DEPRECATED: will be removed in v1.0 */
|
||||
/* perf event attrs passed directly into perf_event_open() */
|
||||
struct perf_event_attr *attr;
|
||||
/* raw event callback */
|
||||
perf_buffer_event_fn event_cb;
|
||||
/* ctx is provided to event_cb */
|
||||
void *ctx;
|
||||
};
|
||||
};
|
||||
size_t sz;
|
||||
long :0;
|
||||
long :0;
|
||||
/* if cpu_cnt == 0, open all on all possible CPUs (up to the number of
|
||||
* max_entries of given PERF_EVENT_ARRAY map)
|
||||
*/
|
||||
@@ -1354,26 +1196,13 @@ struct perf_buffer_raw_opts {
|
||||
};
|
||||
#define perf_buffer_raw_opts__last_field map_keys
|
||||
|
||||
struct perf_event_attr;
|
||||
|
||||
LIBBPF_API struct perf_buffer *
|
||||
perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
|
||||
perf_buffer_event_fn event_cb, void *ctx,
|
||||
const struct perf_buffer_raw_opts *opts);
|
||||
|
||||
LIBBPF_API struct perf_buffer *
|
||||
perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
|
||||
perf_buffer_event_fn event_cb, void *ctx,
|
||||
const struct perf_buffer_raw_opts *opts);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new_raw() instead")
|
||||
struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
|
||||
const struct perf_buffer_raw_opts *opts);
|
||||
|
||||
#define perf_buffer__new_raw(...) ___libbpf_overload(___perf_buffer_new_raw, __VA_ARGS__)
|
||||
#define ___perf_buffer_new_raw6(map_fd, page_cnt, attr, event_cb, ctx, opts) \
|
||||
perf_buffer__new_raw(map_fd, page_cnt, attr, event_cb, ctx, opts)
|
||||
#define ___perf_buffer_new_raw3(map_fd, page_cnt, opts) \
|
||||
perf_buffer__new_raw_deprecated(map_fd, page_cnt, opts)
|
||||
|
||||
LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
|
||||
LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb);
|
||||
LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
|
||||
@@ -1381,15 +1210,22 @@ LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb);
|
||||
LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx);
|
||||
LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb);
|
||||
LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx);
|
||||
|
||||
typedef enum bpf_perf_event_ret
|
||||
(*bpf_perf_event_print_t)(struct perf_event_header *hdr,
|
||||
void *private_data);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use perf_buffer__poll() or perf_buffer__consume() instead")
|
||||
LIBBPF_API enum bpf_perf_event_ret
|
||||
bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
|
||||
void **copy_mem, size_t *copy_size,
|
||||
bpf_perf_event_print_t fn, void *private_data);
|
||||
/**
|
||||
* @brief **perf_buffer__buffer()** returns the per-cpu raw mmap()'ed underlying
|
||||
* memory region of the ring buffer.
|
||||
* This ring buffer can be used to implement a custom events consumer.
|
||||
* The ring buffer starts with the *struct perf_event_mmap_page*, which
|
||||
* holds the ring buffer managment fields, when accessing the header
|
||||
* structure it's important to be SMP aware.
|
||||
* You can refer to *perf_event_read_simple* for a simple example.
|
||||
* @param pb the perf buffer structure
|
||||
* @param buf_idx the buffer index to retreive
|
||||
* @param buf (out) gets the base pointer of the mmap()'ed memory
|
||||
* @param buf_size (out) gets the size of the mmap()'ed region
|
||||
* @return 0 on success, negative error code for failure
|
||||
*/
|
||||
LIBBPF_API int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf,
|
||||
size_t *buf_size);
|
||||
|
||||
struct bpf_prog_linfo;
|
||||
struct bpf_prog_info;
|
||||
@@ -1412,14 +1248,6 @@ bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
|
||||
* user, causing subsequent probes to fail. In this case, the caller may want
|
||||
* to adjust that limit with setrlimit().
|
||||
*/
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_prog_type() instead")
|
||||
LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_map_type() instead")
|
||||
LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_helper() instead")
|
||||
LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "implement your own or use bpftool for feature detection")
|
||||
LIBBPF_API bool bpf_probe_large_insn_limit(__u32 ifindex);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports
|
||||
@@ -1463,72 +1291,6 @@ LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void
|
||||
LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type,
|
||||
enum bpf_func_id helper_id, const void *opts);
|
||||
|
||||
/*
|
||||
* Get bpf_prog_info in continuous memory
|
||||
*
|
||||
* struct bpf_prog_info has multiple arrays. The user has option to choose
|
||||
* arrays to fetch from kernel. The following APIs provide an uniform way to
|
||||
* fetch these data. All arrays in bpf_prog_info are stored in a single
|
||||
* continuous memory region. This makes it easy to store the info in a
|
||||
* file.
|
||||
*
|
||||
* Before writing bpf_prog_info_linear to files, it is necessary to
|
||||
* translate pointers in bpf_prog_info to offsets. Helper functions
|
||||
* bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr()
|
||||
* are introduced to switch between pointers and offsets.
|
||||
*
|
||||
* Examples:
|
||||
* # To fetch map_ids and prog_tags:
|
||||
* __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) |
|
||||
* (1UL << BPF_PROG_INFO_PROG_TAGS);
|
||||
* struct bpf_prog_info_linear *info_linear =
|
||||
* bpf_program__get_prog_info_linear(fd, arrays);
|
||||
*
|
||||
* # To save data in file
|
||||
* bpf_program__bpil_addr_to_offs(info_linear);
|
||||
* write(f, info_linear, sizeof(*info_linear) + info_linear->data_len);
|
||||
*
|
||||
* # To read data from file
|
||||
* read(f, info_linear, <proper_size>);
|
||||
* bpf_program__bpil_offs_to_addr(info_linear);
|
||||
*/
|
||||
enum bpf_prog_info_array {
|
||||
BPF_PROG_INFO_FIRST_ARRAY = 0,
|
||||
BPF_PROG_INFO_JITED_INSNS = 0,
|
||||
BPF_PROG_INFO_XLATED_INSNS,
|
||||
BPF_PROG_INFO_MAP_IDS,
|
||||
BPF_PROG_INFO_JITED_KSYMS,
|
||||
BPF_PROG_INFO_JITED_FUNC_LENS,
|
||||
BPF_PROG_INFO_FUNC_INFO,
|
||||
BPF_PROG_INFO_LINE_INFO,
|
||||
BPF_PROG_INFO_JITED_LINE_INFO,
|
||||
BPF_PROG_INFO_PROG_TAGS,
|
||||
BPF_PROG_INFO_LAST_ARRAY,
|
||||
};
|
||||
|
||||
struct bpf_prog_info_linear {
|
||||
/* size of struct bpf_prog_info, when the tool is compiled */
|
||||
__u32 info_len;
|
||||
/* total bytes allocated for data, round up to 8 bytes */
|
||||
__u32 data_len;
|
||||
/* which arrays are included in data */
|
||||
__u64 arrays;
|
||||
struct bpf_prog_info info;
|
||||
__u8 data[];
|
||||
};
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
|
||||
LIBBPF_API struct bpf_prog_info_linear *
|
||||
bpf_program__get_prog_info_linear(int fd, __u64 arrays);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
|
||||
LIBBPF_API void
|
||||
bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
|
||||
LIBBPF_API void
|
||||
bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_num_possible_cpus()** is a helper function to get the
|
||||
* number of possible CPUs that the host kernel supports and expects.
|
||||
|
||||
137
src/libbpf.map
@@ -1,29 +1,14 @@
|
||||
LIBBPF_0.0.1 {
|
||||
global:
|
||||
bpf_btf_get_fd_by_id;
|
||||
bpf_create_map;
|
||||
bpf_create_map_in_map;
|
||||
bpf_create_map_in_map_node;
|
||||
bpf_create_map_name;
|
||||
bpf_create_map_node;
|
||||
bpf_create_map_xattr;
|
||||
bpf_load_btf;
|
||||
bpf_load_program;
|
||||
bpf_load_program_xattr;
|
||||
bpf_map__btf_key_type_id;
|
||||
bpf_map__btf_value_type_id;
|
||||
bpf_map__def;
|
||||
bpf_map__fd;
|
||||
bpf_map__is_offload_neutral;
|
||||
bpf_map__name;
|
||||
bpf_map__next;
|
||||
bpf_map__pin;
|
||||
bpf_map__prev;
|
||||
bpf_map__priv;
|
||||
bpf_map__reuse_fd;
|
||||
bpf_map__set_ifindex;
|
||||
bpf_map__set_inner_map_fd;
|
||||
bpf_map__set_priv;
|
||||
bpf_map__unpin;
|
||||
bpf_map_delete_elem;
|
||||
bpf_map_get_fd_by_id;
|
||||
@@ -38,79 +23,37 @@ LIBBPF_0.0.1 {
|
||||
bpf_object__btf_fd;
|
||||
bpf_object__close;
|
||||
bpf_object__find_map_by_name;
|
||||
bpf_object__find_map_by_offset;
|
||||
bpf_object__find_program_by_title;
|
||||
bpf_object__kversion;
|
||||
bpf_object__load;
|
||||
bpf_object__name;
|
||||
bpf_object__next;
|
||||
bpf_object__open;
|
||||
bpf_object__open_buffer;
|
||||
bpf_object__open_xattr;
|
||||
bpf_object__pin;
|
||||
bpf_object__pin_maps;
|
||||
bpf_object__pin_programs;
|
||||
bpf_object__priv;
|
||||
bpf_object__set_priv;
|
||||
bpf_object__unload;
|
||||
bpf_object__unpin_maps;
|
||||
bpf_object__unpin_programs;
|
||||
bpf_perf_event_read_simple;
|
||||
bpf_prog_attach;
|
||||
bpf_prog_detach;
|
||||
bpf_prog_detach2;
|
||||
bpf_prog_get_fd_by_id;
|
||||
bpf_prog_get_next_id;
|
||||
bpf_prog_load;
|
||||
bpf_prog_load_xattr;
|
||||
bpf_prog_query;
|
||||
bpf_prog_test_run;
|
||||
bpf_prog_test_run_xattr;
|
||||
bpf_program__fd;
|
||||
bpf_program__is_kprobe;
|
||||
bpf_program__is_perf_event;
|
||||
bpf_program__is_raw_tracepoint;
|
||||
bpf_program__is_sched_act;
|
||||
bpf_program__is_sched_cls;
|
||||
bpf_program__is_socket_filter;
|
||||
bpf_program__is_tracepoint;
|
||||
bpf_program__is_xdp;
|
||||
bpf_program__load;
|
||||
bpf_program__next;
|
||||
bpf_program__nth_fd;
|
||||
bpf_program__pin;
|
||||
bpf_program__pin_instance;
|
||||
bpf_program__prev;
|
||||
bpf_program__priv;
|
||||
bpf_program__set_expected_attach_type;
|
||||
bpf_program__set_ifindex;
|
||||
bpf_program__set_kprobe;
|
||||
bpf_program__set_perf_event;
|
||||
bpf_program__set_prep;
|
||||
bpf_program__set_priv;
|
||||
bpf_program__set_raw_tracepoint;
|
||||
bpf_program__set_sched_act;
|
||||
bpf_program__set_sched_cls;
|
||||
bpf_program__set_socket_filter;
|
||||
bpf_program__set_tracepoint;
|
||||
bpf_program__set_type;
|
||||
bpf_program__set_xdp;
|
||||
bpf_program__title;
|
||||
bpf_program__unload;
|
||||
bpf_program__unpin;
|
||||
bpf_program__unpin_instance;
|
||||
bpf_prog_linfo__free;
|
||||
bpf_prog_linfo__new;
|
||||
bpf_prog_linfo__lfind_addr_func;
|
||||
bpf_prog_linfo__lfind;
|
||||
bpf_raw_tracepoint_open;
|
||||
bpf_set_link_xdp_fd;
|
||||
bpf_task_fd_query;
|
||||
bpf_verify_program;
|
||||
btf__fd;
|
||||
btf__find_by_name;
|
||||
btf__free;
|
||||
btf__get_from_id;
|
||||
btf__name_by_offset;
|
||||
btf__new;
|
||||
btf__resolve_size;
|
||||
@@ -127,48 +70,24 @@ LIBBPF_0.0.1 {
|
||||
|
||||
LIBBPF_0.0.2 {
|
||||
global:
|
||||
bpf_probe_helper;
|
||||
bpf_probe_map_type;
|
||||
bpf_probe_prog_type;
|
||||
bpf_map__resize;
|
||||
bpf_map_lookup_elem_flags;
|
||||
bpf_object__btf;
|
||||
bpf_object__find_map_fd_by_name;
|
||||
bpf_get_link_xdp_id;
|
||||
btf__dedup;
|
||||
btf__get_map_kv_tids;
|
||||
btf__get_nr_types;
|
||||
btf__get_raw_data;
|
||||
btf__load;
|
||||
btf_ext__free;
|
||||
btf_ext__func_info_rec_size;
|
||||
btf_ext__get_raw_data;
|
||||
btf_ext__line_info_rec_size;
|
||||
btf_ext__new;
|
||||
btf_ext__reloc_func_info;
|
||||
btf_ext__reloc_line_info;
|
||||
xsk_umem__create;
|
||||
xsk_socket__create;
|
||||
xsk_umem__delete;
|
||||
xsk_socket__delete;
|
||||
xsk_umem__fd;
|
||||
xsk_socket__fd;
|
||||
bpf_program__get_prog_info_linear;
|
||||
bpf_program__bpil_addr_to_offs;
|
||||
bpf_program__bpil_offs_to_addr;
|
||||
} LIBBPF_0.0.1;
|
||||
|
||||
LIBBPF_0.0.3 {
|
||||
global:
|
||||
bpf_map__is_internal;
|
||||
bpf_map_freeze;
|
||||
btf__finalize_data;
|
||||
} LIBBPF_0.0.2;
|
||||
|
||||
LIBBPF_0.0.4 {
|
||||
global:
|
||||
bpf_link__destroy;
|
||||
bpf_object__load_xattr;
|
||||
bpf_program__attach_kprobe;
|
||||
bpf_program__attach_perf_event;
|
||||
bpf_program__attach_raw_tracepoint;
|
||||
@@ -176,14 +95,10 @@ LIBBPF_0.0.4 {
|
||||
bpf_program__attach_uprobe;
|
||||
btf_dump__dump_type;
|
||||
btf_dump__free;
|
||||
btf_dump__new;
|
||||
btf__parse_elf;
|
||||
libbpf_num_possible_cpus;
|
||||
perf_buffer__free;
|
||||
perf_buffer__new;
|
||||
perf_buffer__new_raw;
|
||||
perf_buffer__poll;
|
||||
xsk_umem__create;
|
||||
} LIBBPF_0.0.3;
|
||||
|
||||
LIBBPF_0.0.5 {
|
||||
@@ -193,7 +108,6 @@ LIBBPF_0.0.5 {
|
||||
|
||||
LIBBPF_0.0.6 {
|
||||
global:
|
||||
bpf_get_link_xdp_info;
|
||||
bpf_map__get_pin_path;
|
||||
bpf_map__is_pinned;
|
||||
bpf_map__set_pin_path;
|
||||
@@ -202,9 +116,6 @@ LIBBPF_0.0.6 {
|
||||
bpf_program__attach_trace;
|
||||
bpf_program__get_expected_attach_type;
|
||||
bpf_program__get_type;
|
||||
bpf_program__is_tracing;
|
||||
bpf_program__set_tracing;
|
||||
bpf_program__size;
|
||||
btf__find_by_name_kind;
|
||||
libbpf_find_vmlinux_btf_id;
|
||||
} LIBBPF_0.0.5;
|
||||
@@ -224,14 +135,8 @@ LIBBPF_0.0.7 {
|
||||
bpf_object__detach_skeleton;
|
||||
bpf_object__load_skeleton;
|
||||
bpf_object__open_skeleton;
|
||||
bpf_probe_large_insn_limit;
|
||||
bpf_prog_attach_xattr;
|
||||
bpf_program__attach;
|
||||
bpf_program__name;
|
||||
bpf_program__is_extension;
|
||||
bpf_program__is_struct_ops;
|
||||
bpf_program__set_extension;
|
||||
bpf_program__set_struct_ops;
|
||||
btf__align_of;
|
||||
libbpf_find_kernel_btf;
|
||||
} LIBBPF_0.0.6;
|
||||
@@ -250,10 +155,7 @@ LIBBPF_0.0.8 {
|
||||
bpf_prog_attach_opts;
|
||||
bpf_program__attach_cgroup;
|
||||
bpf_program__attach_lsm;
|
||||
bpf_program__is_lsm;
|
||||
bpf_program__set_attach_target;
|
||||
bpf_program__set_lsm;
|
||||
bpf_set_link_xdp_fd_opts;
|
||||
} LIBBPF_0.0.7;
|
||||
|
||||
LIBBPF_0.0.9 {
|
||||
@@ -291,9 +193,7 @@ LIBBPF_0.1.0 {
|
||||
bpf_map__value_size;
|
||||
bpf_program__attach_xdp;
|
||||
bpf_program__autoload;
|
||||
bpf_program__is_sk_lookup;
|
||||
bpf_program__set_autoload;
|
||||
bpf_program__set_sk_lookup;
|
||||
btf__parse;
|
||||
btf__parse_raw;
|
||||
btf__pointer_size;
|
||||
@@ -336,7 +236,6 @@ LIBBPF_0.2.0 {
|
||||
perf_buffer__buffer_fd;
|
||||
perf_buffer__epoll_fd;
|
||||
perf_buffer__consume_buffer;
|
||||
xsk_socket__create_shared;
|
||||
} LIBBPF_0.1.0;
|
||||
|
||||
LIBBPF_0.3.0 {
|
||||
@@ -348,8 +247,6 @@ LIBBPF_0.3.0 {
|
||||
btf__new_empty_split;
|
||||
btf__new_split;
|
||||
ring_buffer__epoll_fd;
|
||||
xsk_setup_xdp_prog;
|
||||
xsk_socket__update_xskmap;
|
||||
} LIBBPF_0.2.0;
|
||||
|
||||
LIBBPF_0.4.0 {
|
||||
@@ -397,7 +294,6 @@ LIBBPF_0.6.0 {
|
||||
bpf_object__next_program;
|
||||
bpf_object__prev_map;
|
||||
bpf_object__prev_program;
|
||||
bpf_prog_load_deprecated;
|
||||
bpf_prog_load;
|
||||
bpf_program__flags;
|
||||
bpf_program__insn_cnt;
|
||||
@@ -407,18 +303,14 @@ LIBBPF_0.6.0 {
|
||||
btf__add_decl_tag;
|
||||
btf__add_type_tag;
|
||||
btf__dedup;
|
||||
btf__dedup_deprecated;
|
||||
btf__raw_data;
|
||||
btf__type_cnt;
|
||||
btf_dump__new;
|
||||
btf_dump__new_deprecated;
|
||||
libbpf_major_version;
|
||||
libbpf_minor_version;
|
||||
libbpf_version_string;
|
||||
perf_buffer__new;
|
||||
perf_buffer__new_deprecated;
|
||||
perf_buffer__new_raw;
|
||||
perf_buffer__new_raw_deprecated;
|
||||
} LIBBPF_0.5.0;
|
||||
|
||||
LIBBPF_0.7.0 {
|
||||
@@ -434,10 +326,11 @@ LIBBPF_0.7.0 {
|
||||
bpf_xdp_detach;
|
||||
bpf_xdp_query;
|
||||
bpf_xdp_query_id;
|
||||
btf_ext__raw_data;
|
||||
libbpf_probe_bpf_helper;
|
||||
libbpf_probe_bpf_map_type;
|
||||
libbpf_probe_bpf_prog_type;
|
||||
libbpf_set_memlock_rlim_max;
|
||||
libbpf_set_memlock_rlim;
|
||||
} LIBBPF_0.6.0;
|
||||
|
||||
LIBBPF_0.8.0 {
|
||||
@@ -459,3 +352,29 @@ LIBBPF_0.8.0 {
|
||||
libbpf_register_prog_handler;
|
||||
libbpf_unregister_prog_handler;
|
||||
} LIBBPF_0.7.0;
|
||||
|
||||
LIBBPF_1.0.0 {
|
||||
global:
|
||||
bpf_obj_get_opts;
|
||||
bpf_prog_query_opts;
|
||||
bpf_program__attach_ksyscall;
|
||||
bpf_program__autoattach;
|
||||
bpf_program__set_autoattach;
|
||||
btf__add_enum64;
|
||||
btf__add_enum64_value;
|
||||
libbpf_bpf_attach_type_str;
|
||||
libbpf_bpf_link_type_str;
|
||||
libbpf_bpf_map_type_str;
|
||||
libbpf_bpf_prog_type_str;
|
||||
perf_buffer__buffer;
|
||||
};
|
||||
|
||||
LIBBPF_1.1.0 {
|
||||
global:
|
||||
user_ring_buffer__discard;
|
||||
user_ring_buffer__free;
|
||||
user_ring_buffer__new;
|
||||
user_ring_buffer__reserve;
|
||||
user_ring_buffer__reserve_blocking;
|
||||
user_ring_buffer__submit;
|
||||
} LIBBPF_1.0.0;
|
||||
|
||||
@@ -30,20 +30,10 @@
|
||||
/* Add checks for other versions below when planning deprecation of API symbols
|
||||
* with the LIBBPF_DEPRECATED_SINCE macro.
|
||||
*/
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(0, 6)
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_6(X) X
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(1, 0)
|
||||
#define __LIBBPF_MARK_DEPRECATED_1_0(X) X
|
||||
#else
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_6(X)
|
||||
#endif
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(0, 7)
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_7(X) X
|
||||
#else
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_7(X)
|
||||
#endif
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(0, 8)
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_8(X) X
|
||||
#else
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_8(X)
|
||||
#define __LIBBPF_MARK_DEPRECATED_1_0(X)
|
||||
#endif
|
||||
|
||||
/* This set of internal macros allows to do "function overloading" based on
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
#include <linux/err.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include "libbpf_legacy.h"
|
||||
#include "relo_core.h"
|
||||
|
||||
/* make sure libbpf doesn't use kernel-only integer typedefs */
|
||||
@@ -109,9 +108,9 @@ static inline bool str_has_sfx(const char *str, const char *sfx)
|
||||
size_t str_len = strlen(str);
|
||||
size_t sfx_len = strlen(sfx);
|
||||
|
||||
if (sfx_len <= str_len)
|
||||
return strcmp(str + str_len - sfx_len, sfx);
|
||||
return false;
|
||||
if (sfx_len > str_len)
|
||||
return false;
|
||||
return strcmp(str + str_len - sfx_len, sfx) == 0;
|
||||
}
|
||||
|
||||
/* Symbol versioning is different between static and shared library.
|
||||
@@ -351,6 +350,10 @@ enum kern_feature_id {
|
||||
FEAT_MEMCG_ACCOUNT,
|
||||
/* BPF cookie (bpf_get_attach_cookie() BPF helper) support */
|
||||
FEAT_BPF_COOKIE,
|
||||
/* BTF_KIND_ENUM64 support and BTF_KIND_ENUM kflag support */
|
||||
FEAT_BTF_ENUM64,
|
||||
/* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */
|
||||
FEAT_SYSCALL_WRAPPER,
|
||||
__FEAT_CNT,
|
||||
};
|
||||
|
||||
@@ -476,8 +479,6 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void
|
||||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||
__u32 kind);
|
||||
|
||||
extern enum libbpf_strict_mode libbpf_mode;
|
||||
|
||||
typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
|
||||
const char *sym_name, void *ctx);
|
||||
|
||||
@@ -496,12 +497,8 @@ static inline int libbpf_err(int ret)
|
||||
*/
|
||||
static inline int libbpf_err_errno(int ret)
|
||||
{
|
||||
if (libbpf_mode & LIBBPF_STRICT_DIRECT_ERRS)
|
||||
/* errno is already assumed to be set on error */
|
||||
return ret < 0 ? -errno : ret;
|
||||
|
||||
/* legacy: on error return -1 directly and don't touch errno */
|
||||
return ret;
|
||||
/* errno is already assumed to be set on error */
|
||||
return ret < 0 ? -errno : ret;
|
||||
}
|
||||
|
||||
/* handle error for pointer-returning APIs, err is assumed to be < 0 always */
|
||||
@@ -509,12 +506,7 @@ static inline void *libbpf_err_ptr(int err)
|
||||
{
|
||||
/* set errno on error, this doesn't break anything */
|
||||
errno = -err;
|
||||
|
||||
if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
|
||||
return NULL;
|
||||
|
||||
/* legacy: encode err as ptr */
|
||||
return ERR_PTR(err);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* handle pointer-returning APIs' error handling */
|
||||
@@ -524,11 +516,7 @@ static inline void *libbpf_ptr(void *ret)
|
||||
if (IS_ERR(ret))
|
||||
errno = -PTR_ERR(ret);
|
||||
|
||||
if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
|
||||
return IS_ERR(ret) ? NULL : ret;
|
||||
|
||||
/* legacy: pass-through original pointer */
|
||||
return ret;
|
||||
return IS_ERR(ret) ? NULL : ret;
|
||||
}
|
||||
|
||||
static inline bool str_is_empty(const char *s)
|
||||
@@ -580,4 +568,12 @@ struct bpf_link * usdt_manager_attach_usdt(struct usdt_manager *man,
|
||||
const char *usdt_provider, const char *usdt_name,
|
||||
__u64 usdt_cookie);
|
||||
|
||||
static inline bool is_pow_of_2(size_t x)
|
||||
{
|
||||
return x && (x & (x - 1)) == 0;
|
||||
}
|
||||
|
||||
#define PROG_LOAD_ATTEMPTS 5
|
||||
int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts);
|
||||
|
||||
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
|
||||
|
||||
@@ -20,6 +20,11 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* As of libbpf 1.0 libbpf_set_strict_mode() and enum libbpf_struct_mode have
|
||||
* no effect. But they are left in libbpf_legacy.h so that applications that
|
||||
* prepared for libbpf 1.0 before final release by using
|
||||
* libbpf_set_strict_mode() still work with libbpf 1.0+ without any changes.
|
||||
*/
|
||||
enum libbpf_strict_mode {
|
||||
/* Turn on all supported strict features of libbpf to simulate libbpf
|
||||
* v1.0 behavior.
|
||||
@@ -71,8 +76,8 @@ enum libbpf_strict_mode {
|
||||
* first BPF program or map creation operation. This is done only if
|
||||
* kernel is too old to support memcg-based memory accounting for BPF
|
||||
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
|
||||
* but it can be overriden with libbpf_set_memlock_rlim_max() API.
|
||||
* Note that libbpf_set_memlock_rlim_max() needs to be called before
|
||||
* but it can be overriden with libbpf_set_memlock_rlim() API.
|
||||
* Note that libbpf_set_memlock_rlim() needs to be called before
|
||||
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
|
||||
* operation.
|
||||
*/
|
||||
@@ -88,6 +93,25 @@ enum libbpf_strict_mode {
|
||||
|
||||
LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_get_error()** extracts the error code from the passed
|
||||
* pointer
|
||||
* @param ptr pointer returned from libbpf API function
|
||||
* @return error code; or 0 if no error occured
|
||||
*
|
||||
* Note, as of libbpf 1.0 this function is not necessary and not recommended
|
||||
* to be used. Libbpf doesn't return error code embedded into the pointer
|
||||
* itself. Instead, NULL is returned on error and error code is passed through
|
||||
* thread-local errno variable. **libbpf_get_error()** is just returning -errno
|
||||
* value if it receives NULL, which is correct only if errno hasn't been
|
||||
* modified between libbpf API call and corresponding **libbpf_get_error()**
|
||||
* call. Prefer to check return for NULL and use errno directly.
|
||||
*
|
||||
* This API is left in libbpf 1.0 to allow applications that were 1.0-ready
|
||||
* before final libbpf 1.0 without needing to change them.
|
||||
*/
|
||||
LIBBPF_API long libbpf_get_error(const void *ptr);
|
||||
|
||||
#define DECLARE_LIBBPF_OPTS LIBBPF_OPTS
|
||||
|
||||
/* "Discouraged" APIs which don't follow consistent libbpf naming patterns.
|
||||
@@ -101,6 +125,8 @@ struct bpf_map;
|
||||
struct btf;
|
||||
struct btf_ext;
|
||||
|
||||
LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
|
||||
|
||||
LIBBPF_API enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
|
||||
LIBBPF_API enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
|
||||
LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
|
||||
|
||||
@@ -17,47 +17,14 @@
|
||||
#include "libbpf.h"
|
||||
#include "libbpf_internal.h"
|
||||
|
||||
static bool grep(const char *buffer, const char *pattern)
|
||||
{
|
||||
return !!strstr(buffer, pattern);
|
||||
}
|
||||
|
||||
static int get_vendor_id(int ifindex)
|
||||
{
|
||||
char ifname[IF_NAMESIZE], path[64], buf[8];
|
||||
ssize_t len;
|
||||
int fd;
|
||||
|
||||
if (!if_indextoname(ifindex, ifname))
|
||||
return -1;
|
||||
|
||||
snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
|
||||
|
||||
fd = open(path, O_RDONLY | O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
return -1;
|
||||
|
||||
len = read(fd, buf, sizeof(buf));
|
||||
close(fd);
|
||||
if (len < 0)
|
||||
return -1;
|
||||
if (len >= (ssize_t)sizeof(buf))
|
||||
return -1;
|
||||
buf[len] = '\0';
|
||||
|
||||
return strtol(buf, NULL, 0);
|
||||
}
|
||||
|
||||
static int probe_prog_load(enum bpf_prog_type prog_type,
|
||||
const struct bpf_insn *insns, size_t insns_cnt,
|
||||
char *log_buf, size_t log_buf_sz,
|
||||
__u32 ifindex)
|
||||
char *log_buf, size_t log_buf_sz)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
||||
.log_buf = log_buf,
|
||||
.log_size = log_buf_sz,
|
||||
.log_level = log_buf ? 1 : 0,
|
||||
.prog_ifindex = ifindex,
|
||||
);
|
||||
int fd, err, exp_err = 0;
|
||||
const char *exp_msg = NULL;
|
||||
@@ -161,31 +128,10 @@ int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
|
||||
if (opts)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0, 0);
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
|
||||
{
|
||||
struct bpf_insn insns[2] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
|
||||
/* prefer libbpf_probe_bpf_prog_type() unless offload is requested */
|
||||
if (ifindex == 0)
|
||||
return libbpf_probe_bpf_prog_type(prog_type, NULL) == 1;
|
||||
|
||||
if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
|
||||
/* nfp returns -EINVAL on exit(0) with TC offload */
|
||||
insns[0].imm = 2;
|
||||
|
||||
errno = 0;
|
||||
probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
|
||||
|
||||
return errno != EINVAL && errno != EOPNOTSUPP;
|
||||
}
|
||||
|
||||
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
||||
const char *str_sec, size_t str_len)
|
||||
{
|
||||
@@ -242,14 +188,12 @@ static int load_local_storage_btf(void)
|
||||
strs, sizeof(strs));
|
||||
}
|
||||
|
||||
static int probe_map_create(enum bpf_map_type map_type, __u32 ifindex)
|
||||
static int probe_map_create(enum bpf_map_type map_type)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts);
|
||||
int key_size, value_size, max_entries;
|
||||
__u32 btf_key_type_id = 0, btf_value_type_id = 0;
|
||||
int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err;
|
||||
|
||||
opts.map_ifindex = ifindex;
|
||||
int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err = 0;
|
||||
|
||||
key_size = sizeof(__u32);
|
||||
value_size = sizeof(__u32);
|
||||
@@ -287,6 +231,7 @@ static int probe_map_create(enum bpf_map_type map_type, __u32 ifindex)
|
||||
return btf_fd;
|
||||
break;
|
||||
case BPF_MAP_TYPE_RINGBUF:
|
||||
case BPF_MAP_TYPE_USER_RINGBUF:
|
||||
key_size = 0;
|
||||
value_size = 0;
|
||||
max_entries = 4096;
|
||||
@@ -326,12 +271,6 @@ static int probe_map_create(enum bpf_map_type map_type, __u32 ifindex)
|
||||
|
||||
if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
|
||||
map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
|
||||
/* TODO: probe for device, once libbpf has a function to create
|
||||
* map-in-map for offload
|
||||
*/
|
||||
if (ifindex)
|
||||
goto cleanup;
|
||||
|
||||
fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
|
||||
sizeof(__u32), sizeof(__u32), 1, NULL);
|
||||
if (fd_inner < 0)
|
||||
@@ -370,15 +309,10 @@ int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts)
|
||||
if (opts)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
ret = probe_map_create(map_type, 0);
|
||||
ret = probe_map_create(map_type);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
{
|
||||
return probe_map_create(map_type, ifindex) == 1;
|
||||
}
|
||||
|
||||
int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id,
|
||||
const void *opts)
|
||||
{
|
||||
@@ -407,7 +341,7 @@ int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helpe
|
||||
}
|
||||
|
||||
buf[0] = '\0';
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf), 0);
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf));
|
||||
if (ret < 0)
|
||||
return libbpf_err(ret);
|
||||
|
||||
@@ -427,51 +361,3 @@ int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helpe
|
||||
return 0;
|
||||
return 1; /* assume supported */
|
||||
}
|
||||
|
||||
bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
|
||||
__u32 ifindex)
|
||||
{
|
||||
struct bpf_insn insns[2] = {
|
||||
BPF_EMIT_CALL(id),
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
char buf[4096] = {};
|
||||
bool res;
|
||||
|
||||
probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), ifindex);
|
||||
res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
|
||||
|
||||
if (ifindex) {
|
||||
switch (get_vendor_id(ifindex)) {
|
||||
case 0x19ee: /* Netronome specific */
|
||||
res = res && !grep(buf, "not supported by FW") &&
|
||||
!grep(buf, "unsupported function id");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* Probe for availability of kernel commit (5.3):
|
||||
*
|
||||
* c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
|
||||
*/
|
||||
bool bpf_probe_large_insn_limit(__u32 ifindex)
|
||||
{
|
||||
struct bpf_insn insns[BPF_MAXINSNS + 1];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BPF_MAXINSNS; i++)
|
||||
insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
|
||||
insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
|
||||
|
||||
errno = 0;
|
||||
probe_prog_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
|
||||
ifindex);
|
||||
|
||||
return errno != E2BIG && errno != EINVAL;
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
#ifndef __LIBBPF_VERSION_H
|
||||
#define __LIBBPF_VERSION_H
|
||||
|
||||
#define LIBBPF_MAJOR_VERSION 0
|
||||
#define LIBBPF_MINOR_VERSION 8
|
||||
#define LIBBPF_MAJOR_VERSION 1
|
||||
#define LIBBPF_MINOR_VERSION 1
|
||||
|
||||
#endif /* __LIBBPF_VERSION_H */
|
||||
|
||||
@@ -697,11 +697,6 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool is_pow_of_2(size_t x)
|
||||
{
|
||||
return x && (x & (x - 1)) == 0;
|
||||
}
|
||||
|
||||
static int linker_sanity_check_elf(struct src_obj *obj)
|
||||
{
|
||||
struct src_sec *sec;
|
||||
@@ -1340,6 +1335,7 @@ recur:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
@@ -1362,6 +1358,7 @@ recur:
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
/* ignore encoding for int and enum values for enum */
|
||||
if (t1->size != t2->size) {
|
||||
pr_warn("global '%s': incompatible %s '%s' size %u and %u\n",
|
||||
|
||||
@@ -27,6 +27,14 @@ typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
|
||||
typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t,
|
||||
void *cookie);
|
||||
|
||||
struct xdp_link_info {
|
||||
__u32 prog_id;
|
||||
__u32 drv_prog_id;
|
||||
__u32 hw_prog_id;
|
||||
__u32 skb_prog_id;
|
||||
__u8 attach_mode;
|
||||
};
|
||||
|
||||
struct xdp_id_md {
|
||||
int ifindex;
|
||||
__u32 flags;
|
||||
@@ -288,31 +296,6 @@ int bpf_xdp_detach(int ifindex, __u32 flags, const struct bpf_xdp_attach_opts *o
|
||||
return bpf_xdp_attach(ifindex, -1, flags, opts);
|
||||
}
|
||||
|
||||
int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
|
||||
const struct bpf_xdp_set_link_opts *opts)
|
||||
{
|
||||
int old_fd = -1, ret;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_xdp_set_link_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
if (OPTS_HAS(opts, old_fd)) {
|
||||
old_fd = OPTS_GET(opts, old_fd, -1);
|
||||
flags |= XDP_FLAGS_REPLACE;
|
||||
}
|
||||
|
||||
ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, old_fd, flags);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
static int __dump_link_nlmsg(struct nlmsghdr *nlh,
|
||||
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
|
||||
{
|
||||
@@ -413,30 +396,6 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
|
||||
size_t info_size, __u32 flags)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
|
||||
size_t sz;
|
||||
int err;
|
||||
|
||||
if (!info_size)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
err = bpf_xdp_query(ifindex, flags, &opts);
|
||||
if (err)
|
||||
return libbpf_err(err);
|
||||
|
||||
/* struct xdp_link_info field layout matches struct bpf_xdp_query_opts
|
||||
* layout after sz field
|
||||
*/
|
||||
sz = min(info_size, offsetofend(struct xdp_link_info, attach_mode));
|
||||
memcpy(info, &opts.prog_id, sz);
|
||||
memset((void *)info + sz, 0, info_size - sz);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
|
||||
@@ -463,11 +422,6 @@ int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id)
|
||||
}
|
||||
|
||||
|
||||
int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
|
||||
{
|
||||
return bpf_xdp_query_id(ifindex, flags, prog_id);
|
||||
}
|
||||
|
||||
typedef int (*qdisc_config_t)(struct libbpf_nla_req *req);
|
||||
|
||||
static int clsact_config(struct libbpf_nla_req *req)
|
||||
@@ -633,11 +587,12 @@ static int get_tc_info(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
|
||||
|
||||
static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd)
|
||||
{
|
||||
struct bpf_prog_info info = {};
|
||||
struct bpf_prog_info info;
|
||||
__u32 info_len = sizeof(info);
|
||||
char name[256];
|
||||
int len, ret;
|
||||
|
||||
memset(&info, 0, info_len);
|
||||
ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
479
src/relo_core.c
@@ -95,6 +95,7 @@ static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
|
||||
case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
|
||||
case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
|
||||
case BPF_CORE_TYPE_EXISTS: return "type_exists";
|
||||
case BPF_CORE_TYPE_MATCHES: return "type_matches";
|
||||
case BPF_CORE_TYPE_SIZE: return "type_size";
|
||||
case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
|
||||
case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
|
||||
@@ -123,6 +124,7 @@ static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
|
||||
case BPF_CORE_TYPE_ID_LOCAL:
|
||||
case BPF_CORE_TYPE_ID_TARGET:
|
||||
case BPF_CORE_TYPE_EXISTS:
|
||||
case BPF_CORE_TYPE_MATCHES:
|
||||
case BPF_CORE_TYPE_SIZE:
|
||||
return true;
|
||||
default:
|
||||
@@ -141,6 +143,86 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
|
||||
}
|
||||
}
|
||||
|
||||
int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
const struct btf *targ_btf, __u32 targ_id, int level)
|
||||
{
|
||||
const struct btf_type *local_type, *targ_type;
|
||||
int depth = 32; /* max recursion depth */
|
||||
|
||||
/* caller made sure that names match (ignoring flavor suffix) */
|
||||
local_type = btf_type_by_id(local_btf, local_id);
|
||||
targ_type = btf_type_by_id(targ_btf, targ_id);
|
||||
if (!btf_kind_core_compat(local_type, targ_type))
|
||||
return 0;
|
||||
|
||||
recur:
|
||||
depth--;
|
||||
if (depth < 0)
|
||||
return -EINVAL;
|
||||
|
||||
local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
|
||||
targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
|
||||
if (!local_type || !targ_type)
|
||||
return -EINVAL;
|
||||
|
||||
if (!btf_kind_core_compat(local_type, targ_type))
|
||||
return 0;
|
||||
|
||||
switch (btf_kind(local_type)) {
|
||||
case BTF_KIND_UNKN:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_ENUM64:
|
||||
return 1;
|
||||
case BTF_KIND_INT:
|
||||
/* just reject deprecated bitfield-like integers; all other
|
||||
* integers are by default compatible between each other
|
||||
*/
|
||||
return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
|
||||
case BTF_KIND_PTR:
|
||||
local_id = local_type->type;
|
||||
targ_id = targ_type->type;
|
||||
goto recur;
|
||||
case BTF_KIND_ARRAY:
|
||||
local_id = btf_array(local_type)->type;
|
||||
targ_id = btf_array(targ_type)->type;
|
||||
goto recur;
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *local_p = btf_params(local_type);
|
||||
struct btf_param *targ_p = btf_params(targ_type);
|
||||
__u16 local_vlen = btf_vlen(local_type);
|
||||
__u16 targ_vlen = btf_vlen(targ_type);
|
||||
int i, err;
|
||||
|
||||
if (local_vlen != targ_vlen)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
|
||||
if (level <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
|
||||
skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
|
||||
err = __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
|
||||
level - 1);
|
||||
if (err <= 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* tail recurse for return type check */
|
||||
skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
|
||||
skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
|
||||
goto recur;
|
||||
}
|
||||
default:
|
||||
pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
|
||||
btf_kind_str(local_type), local_id, targ_id);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Turn bpf_core_relo into a low- and high-level spec representation,
|
||||
* validating correctness along the way, as well as calculating resulting
|
||||
@@ -167,11 +249,11 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
|
||||
* just a parsed access string representation): [0, 1, 2, 3].
|
||||
*
|
||||
* High-level spec will capture only 3 points:
|
||||
* - intial zero-index access by pointer (&s->... is the same as &s[0]...);
|
||||
* - initial zero-index access by pointer (&s->... is the same as &s[0]...);
|
||||
* - field 'a' access (corresponds to '2' in low-level spec);
|
||||
* - array element #3 access (corresponds to '3' in low-level spec).
|
||||
*
|
||||
* Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
|
||||
* Type-based relocations (TYPE_EXISTS/TYPE_MATCHES/TYPE_SIZE,
|
||||
* TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
|
||||
* spec and raw_spec are kept empty.
|
||||
*
|
||||
@@ -186,7 +268,7 @@ int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
|
||||
struct bpf_core_accessor *acc;
|
||||
const struct btf_type *t;
|
||||
const char *name, *spec_str;
|
||||
__u32 id;
|
||||
__u32 id, name_off;
|
||||
__s64 sz;
|
||||
|
||||
spec_str = btf__name_by_offset(btf, relo->access_str_off);
|
||||
@@ -231,11 +313,13 @@ int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
|
||||
spec->len++;
|
||||
|
||||
if (core_relo_is_enumval_based(relo->kind)) {
|
||||
if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
|
||||
if (!btf_is_any_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
|
||||
return -EINVAL;
|
||||
|
||||
/* record enumerator name in a first accessor */
|
||||
acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
|
||||
name_off = btf_is_enum(t) ? btf_enum(t)[access_idx].name_off
|
||||
: btf_enum64(t)[access_idx].name_off;
|
||||
acc->name = btf__name_by_offset(btf, name_off);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -340,7 +424,7 @@ recur:
|
||||
|
||||
if (btf_is_composite(local_type) && btf_is_composite(targ_type))
|
||||
return 1;
|
||||
if (btf_kind(local_type) != btf_kind(targ_type))
|
||||
if (!btf_kind_core_compat(local_type, targ_type))
|
||||
return 0;
|
||||
|
||||
switch (btf_kind(local_type)) {
|
||||
@@ -348,6 +432,7 @@ recur:
|
||||
case BTF_KIND_FLOAT:
|
||||
return 1;
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_ENUM64:
|
||||
case BTF_KIND_ENUM: {
|
||||
const char *local_name, *targ_name;
|
||||
size_t local_len, targ_len;
|
||||
@@ -477,6 +562,7 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
|
||||
const struct bpf_core_accessor *local_acc;
|
||||
struct bpf_core_accessor *targ_acc;
|
||||
int i, sz, matched;
|
||||
__u32 name_off;
|
||||
|
||||
memset(targ_spec, 0, sizeof(*targ_spec));
|
||||
targ_spec->btf = targ_btf;
|
||||
@@ -484,9 +570,14 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
|
||||
targ_spec->relo_kind = local_spec->relo_kind;
|
||||
|
||||
if (core_relo_is_type_based(local_spec->relo_kind)) {
|
||||
return bpf_core_types_are_compat(local_spec->btf,
|
||||
local_spec->root_type_id,
|
||||
targ_btf, targ_id);
|
||||
if (local_spec->relo_kind == BPF_CORE_TYPE_MATCHES)
|
||||
return bpf_core_types_match(local_spec->btf,
|
||||
local_spec->root_type_id,
|
||||
targ_btf, targ_id);
|
||||
else
|
||||
return bpf_core_types_are_compat(local_spec->btf,
|
||||
local_spec->root_type_id,
|
||||
targ_btf, targ_id);
|
||||
}
|
||||
|
||||
local_acc = &local_spec->spec[0];
|
||||
@@ -494,18 +585,22 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
|
||||
|
||||
if (core_relo_is_enumval_based(local_spec->relo_kind)) {
|
||||
size_t local_essent_len, targ_essent_len;
|
||||
const struct btf_enum *e;
|
||||
const char *targ_name;
|
||||
|
||||
/* has to resolve to an enum */
|
||||
targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
|
||||
if (!btf_is_enum(targ_type))
|
||||
if (!btf_is_any_enum(targ_type))
|
||||
return 0;
|
||||
|
||||
local_essent_len = bpf_core_essential_name_len(local_acc->name);
|
||||
|
||||
for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
|
||||
targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
|
||||
for (i = 0; i < btf_vlen(targ_type); i++) {
|
||||
if (btf_is_enum(targ_type))
|
||||
name_off = btf_enum(targ_type)[i].name_off;
|
||||
else
|
||||
name_off = btf_enum64(targ_type)[i].name_off;
|
||||
|
||||
targ_name = btf__name_by_offset(targ_spec->btf, name_off);
|
||||
targ_essent_len = bpf_core_essential_name_len(targ_name);
|
||||
if (targ_essent_len != local_essent_len)
|
||||
continue;
|
||||
@@ -583,7 +678,7 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
|
||||
static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
const struct bpf_core_relo *relo,
|
||||
const struct bpf_core_spec *spec,
|
||||
__u32 *val, __u32 *field_sz, __u32 *type_id,
|
||||
__u64 *val, __u32 *field_sz, __u32 *type_id,
|
||||
bool *validate)
|
||||
{
|
||||
const struct bpf_core_accessor *acc;
|
||||
@@ -680,8 +775,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
*val = byte_sz;
|
||||
break;
|
||||
case BPF_CORE_FIELD_SIGNED:
|
||||
/* enums will be assumed unsigned */
|
||||
*val = btf_is_enum(mt) ||
|
||||
*val = (btf_is_any_enum(mt) && BTF_INFO_KFLAG(mt->info)) ||
|
||||
(btf_int_encoding(mt) & BTF_INT_SIGNED);
|
||||
if (validate)
|
||||
*validate = true; /* signedness is never ambiguous */
|
||||
@@ -708,7 +802,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
|
||||
static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
|
||||
const struct bpf_core_spec *spec,
|
||||
__u32 *val, bool *validate)
|
||||
__u64 *val, bool *validate)
|
||||
{
|
||||
__s64 sz;
|
||||
|
||||
@@ -732,6 +826,7 @@ static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
|
||||
*validate = false;
|
||||
break;
|
||||
case BPF_CORE_TYPE_EXISTS:
|
||||
case BPF_CORE_TYPE_MATCHES:
|
||||
*val = 1;
|
||||
break;
|
||||
case BPF_CORE_TYPE_SIZE:
|
||||
@@ -751,10 +846,9 @@ static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
|
||||
|
||||
static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
|
||||
const struct bpf_core_spec *spec,
|
||||
__u32 *val)
|
||||
__u64 *val)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
const struct btf_enum *e;
|
||||
|
||||
switch (relo->kind) {
|
||||
case BPF_CORE_ENUMVAL_EXISTS:
|
||||
@@ -764,8 +858,10 @@ static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
|
||||
if (!spec)
|
||||
return -EUCLEAN; /* request instruction poisoning */
|
||||
t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
|
||||
e = btf_enum(t) + spec->spec[0].idx;
|
||||
*val = e->val;
|
||||
if (btf_is_enum(t))
|
||||
*val = btf_enum(t)[spec->spec[0].idx].val;
|
||||
else
|
||||
*val = btf_enum64_value(btf_enum64(t) + spec->spec[0].idx);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@@ -929,7 +1025,7 @@ int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
int insn_idx, const struct bpf_core_relo *relo,
|
||||
int relo_idx, const struct bpf_core_relo_res *res)
|
||||
{
|
||||
__u32 orig_val, new_val;
|
||||
__u64 orig_val, new_val;
|
||||
__u8 class;
|
||||
|
||||
class = BPF_CLASS(insn->code);
|
||||
@@ -954,28 +1050,30 @@ poison:
|
||||
if (BPF_SRC(insn->code) != BPF_K)
|
||||
return -EINVAL;
|
||||
if (res->validate && insn->imm != orig_val) {
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %llu -> %llu\n",
|
||||
prog_name, relo_idx,
|
||||
insn_idx, insn->imm, orig_val, new_val);
|
||||
insn_idx, insn->imm, (unsigned long long)orig_val,
|
||||
(unsigned long long)new_val);
|
||||
return -EINVAL;
|
||||
}
|
||||
orig_val = insn->imm;
|
||||
insn->imm = new_val;
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %llu -> %llu\n",
|
||||
prog_name, relo_idx, insn_idx,
|
||||
orig_val, new_val);
|
||||
(unsigned long long)orig_val, (unsigned long long)new_val);
|
||||
break;
|
||||
case BPF_LDX:
|
||||
case BPF_ST:
|
||||
case BPF_STX:
|
||||
if (res->validate && insn->off != orig_val) {
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
|
||||
prog_name, relo_idx, insn_idx, insn->off, orig_val, new_val);
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %llu -> %llu\n",
|
||||
prog_name, relo_idx, insn_idx, insn->off, (unsigned long long)orig_val,
|
||||
(unsigned long long)new_val);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (new_val > SHRT_MAX) {
|
||||
pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
|
||||
prog_name, relo_idx, insn_idx, new_val);
|
||||
pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %llu\n",
|
||||
prog_name, relo_idx, insn_idx, (unsigned long long)new_val);
|
||||
return -ERANGE;
|
||||
}
|
||||
if (res->fail_memsz_adjust) {
|
||||
@@ -987,8 +1085,9 @@ poison:
|
||||
|
||||
orig_val = insn->off;
|
||||
insn->off = new_val;
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
|
||||
prog_name, relo_idx, insn_idx, orig_val, new_val);
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %llu -> %llu\n",
|
||||
prog_name, relo_idx, insn_idx, (unsigned long long)orig_val,
|
||||
(unsigned long long)new_val);
|
||||
|
||||
if (res->new_sz != res->orig_sz) {
|
||||
int insn_bytes_sz, insn_bpf_sz;
|
||||
@@ -1024,20 +1123,20 @@ poison:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
imm = insn[0].imm + ((__u64)insn[1].imm << 32);
|
||||
imm = (__u32)insn[0].imm | ((__u64)insn[1].imm << 32);
|
||||
if (res->validate && imm != orig_val) {
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %llu -> %llu\n",
|
||||
prog_name, relo_idx,
|
||||
insn_idx, (unsigned long long)imm,
|
||||
orig_val, new_val);
|
||||
(unsigned long long)orig_val, (unsigned long long)new_val);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
insn[0].imm = new_val;
|
||||
insn[1].imm = 0; /* currently only 32-bit values are supported */
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
|
||||
insn[1].imm = new_val >> 32;
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %llu\n",
|
||||
prog_name, relo_idx, insn_idx,
|
||||
(unsigned long long)imm, new_val);
|
||||
(unsigned long long)imm, (unsigned long long)new_val);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@@ -1057,7 +1156,6 @@ poison:
|
||||
int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
const struct btf_enum *e;
|
||||
const char *s;
|
||||
__u32 type_id;
|
||||
int i, len = 0;
|
||||
@@ -1086,10 +1184,23 @@ int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *s
|
||||
|
||||
if (core_relo_is_enumval_based(spec->relo_kind)) {
|
||||
t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
|
||||
e = btf_enum(t) + spec->raw_spec[0];
|
||||
s = btf__name_by_offset(spec->btf, e->name_off);
|
||||
if (btf_is_enum(t)) {
|
||||
const struct btf_enum *e;
|
||||
const char *fmt_str;
|
||||
|
||||
append_buf("::%s = %u", s, e->val);
|
||||
e = btf_enum(t) + spec->raw_spec[0];
|
||||
s = btf__name_by_offset(spec->btf, e->name_off);
|
||||
fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %d" : "::%s = %u";
|
||||
append_buf(fmt_str, s, e->val);
|
||||
} else {
|
||||
const struct btf_enum64 *e;
|
||||
const char *fmt_str;
|
||||
|
||||
e = btf_enum64(t) + spec->raw_spec[0];
|
||||
s = btf__name_by_offset(spec->btf, e->name_off);
|
||||
fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %lld" : "::%s = %llu";
|
||||
append_buf(fmt_str, s, (unsigned long long)btf_enum64_value(e));
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
@@ -1148,11 +1259,11 @@ int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *s
|
||||
* 3. It is supported and expected that there might be multiple flavors
|
||||
* matching the spec. As long as all the specs resolve to the same set of
|
||||
* offsets across all candidates, there is no error. If there is any
|
||||
* ambiguity, CO-RE relocation will fail. This is necessary to accomodate
|
||||
* imprefection of BTF deduplication, which can cause slight duplication of
|
||||
* ambiguity, CO-RE relocation will fail. This is necessary to accommodate
|
||||
* imperfection of BTF deduplication, which can cause slight duplication of
|
||||
* the same BTF type, if some directly or indirectly referenced (by
|
||||
* pointer) type gets resolved to different actual types in different
|
||||
* object files. If such situation occurs, deduplicated BTF will end up
|
||||
* object files. If such a situation occurs, deduplicated BTF will end up
|
||||
* with two (or more) structurally identical types, which differ only in
|
||||
* types they refer to through pointer. This should be OK in most cases and
|
||||
* is not an error.
|
||||
@@ -1261,10 +1372,12 @@ int bpf_core_calc_relo_insn(const char *prog_name,
|
||||
* decision and value, otherwise it's dangerous to
|
||||
* proceed due to ambiguity
|
||||
*/
|
||||
pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
|
||||
pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %llu != %s %llu\n",
|
||||
prog_name, relo_idx,
|
||||
cand_res.poison ? "failure" : "success", cand_res.new_val,
|
||||
targ_res->poison ? "failure" : "success", targ_res->new_val);
|
||||
cand_res.poison ? "failure" : "success",
|
||||
(unsigned long long)cand_res.new_val,
|
||||
targ_res->poison ? "failure" : "success",
|
||||
(unsigned long long)targ_res->new_val);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -1305,3 +1418,273 @@ int bpf_core_calc_relo_insn(const char *prog_name,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool bpf_core_names_match(const struct btf *local_btf, size_t local_name_off,
|
||||
const struct btf *targ_btf, size_t targ_name_off)
|
||||
{
|
||||
const char *local_n, *targ_n;
|
||||
size_t local_len, targ_len;
|
||||
|
||||
local_n = btf__name_by_offset(local_btf, local_name_off);
|
||||
targ_n = btf__name_by_offset(targ_btf, targ_name_off);
|
||||
|
||||
if (str_is_empty(targ_n))
|
||||
return str_is_empty(local_n);
|
||||
|
||||
targ_len = bpf_core_essential_name_len(targ_n);
|
||||
local_len = bpf_core_essential_name_len(local_n);
|
||||
|
||||
return targ_len == local_len && strncmp(local_n, targ_n, local_len) == 0;
|
||||
}
|
||||
|
||||
static int bpf_core_enums_match(const struct btf *local_btf, const struct btf_type *local_t,
|
||||
const struct btf *targ_btf, const struct btf_type *targ_t)
|
||||
{
|
||||
__u16 local_vlen = btf_vlen(local_t);
|
||||
__u16 targ_vlen = btf_vlen(targ_t);
|
||||
int i, j;
|
||||
|
||||
if (local_t->size != targ_t->size)
|
||||
return 0;
|
||||
|
||||
if (local_vlen > targ_vlen)
|
||||
return 0;
|
||||
|
||||
/* iterate over the local enum's variants and make sure each has
|
||||
* a symbolic name correspondent in the target
|
||||
*/
|
||||
for (i = 0; i < local_vlen; i++) {
|
||||
bool matched = false;
|
||||
__u32 local_n_off, targ_n_off;
|
||||
|
||||
local_n_off = btf_is_enum(local_t) ? btf_enum(local_t)[i].name_off :
|
||||
btf_enum64(local_t)[i].name_off;
|
||||
|
||||
for (j = 0; j < targ_vlen; j++) {
|
||||
targ_n_off = btf_is_enum(targ_t) ? btf_enum(targ_t)[j].name_off :
|
||||
btf_enum64(targ_t)[j].name_off;
|
||||
|
||||
if (bpf_core_names_match(local_btf, local_n_off, targ_btf, targ_n_off)) {
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!matched)
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int bpf_core_composites_match(const struct btf *local_btf, const struct btf_type *local_t,
|
||||
const struct btf *targ_btf, const struct btf_type *targ_t,
|
||||
bool behind_ptr, int level)
|
||||
{
|
||||
const struct btf_member *local_m = btf_members(local_t);
|
||||
__u16 local_vlen = btf_vlen(local_t);
|
||||
__u16 targ_vlen = btf_vlen(targ_t);
|
||||
int i, j, err;
|
||||
|
||||
if (local_vlen > targ_vlen)
|
||||
return 0;
|
||||
|
||||
/* check that all local members have a match in the target */
|
||||
for (i = 0; i < local_vlen; i++, local_m++) {
|
||||
const struct btf_member *targ_m = btf_members(targ_t);
|
||||
bool matched = false;
|
||||
|
||||
for (j = 0; j < targ_vlen; j++, targ_m++) {
|
||||
if (!bpf_core_names_match(local_btf, local_m->name_off,
|
||||
targ_btf, targ_m->name_off))
|
||||
continue;
|
||||
|
||||
err = __bpf_core_types_match(local_btf, local_m->type, targ_btf,
|
||||
targ_m->type, behind_ptr, level - 1);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (err > 0) {
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!matched)
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Check that two types "match". This function assumes that root types were
|
||||
* already checked for name match.
|
||||
*
|
||||
* The matching relation is defined as follows:
|
||||
* - modifiers and typedefs are stripped (and, hence, effectively ignored)
|
||||
* - generally speaking types need to be of same kind (struct vs. struct, union
|
||||
* vs. union, etc.)
|
||||
* - exceptions are struct/union behind a pointer which could also match a
|
||||
* forward declaration of a struct or union, respectively, and enum vs.
|
||||
* enum64 (see below)
|
||||
* Then, depending on type:
|
||||
* - integers:
|
||||
* - match if size and signedness match
|
||||
* - arrays & pointers:
|
||||
* - target types are recursively matched
|
||||
* - structs & unions:
|
||||
* - local members need to exist in target with the same name
|
||||
* - for each member we recursively check match unless it is already behind a
|
||||
* pointer, in which case we only check matching names and compatible kind
|
||||
* - enums:
|
||||
* - local variants have to have a match in target by symbolic name (but not
|
||||
* numeric value)
|
||||
* - size has to match (but enum may match enum64 and vice versa)
|
||||
* - function pointers:
|
||||
* - number and position of arguments in local type has to match target
|
||||
* - for each argument and the return value we recursively check match
|
||||
*/
|
||||
int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
|
||||
__u32 targ_id, bool behind_ptr, int level)
|
||||
{
|
||||
const struct btf_type *local_t, *targ_t;
|
||||
int depth = 32; /* max recursion depth */
|
||||
__u16 local_k, targ_k;
|
||||
|
||||
if (level <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
local_t = btf_type_by_id(local_btf, local_id);
|
||||
targ_t = btf_type_by_id(targ_btf, targ_id);
|
||||
|
||||
recur:
|
||||
depth--;
|
||||
if (depth < 0)
|
||||
return -EINVAL;
|
||||
|
||||
local_t = skip_mods_and_typedefs(local_btf, local_id, &local_id);
|
||||
targ_t = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
|
||||
if (!local_t || !targ_t)
|
||||
return -EINVAL;
|
||||
|
||||
/* While the name check happens after typedefs are skipped, root-level
|
||||
* typedefs would still be name-matched as that's the contract with
|
||||
* callers.
|
||||
*/
|
||||
if (!bpf_core_names_match(local_btf, local_t->name_off, targ_btf, targ_t->name_off))
|
||||
return 0;
|
||||
|
||||
local_k = btf_kind(local_t);
|
||||
targ_k = btf_kind(targ_t);
|
||||
|
||||
switch (local_k) {
|
||||
case BTF_KIND_UNKN:
|
||||
return local_k == targ_k;
|
||||
case BTF_KIND_FWD: {
|
||||
bool local_f = BTF_INFO_KFLAG(local_t->info);
|
||||
|
||||
if (behind_ptr) {
|
||||
if (local_k == targ_k)
|
||||
return local_f == BTF_INFO_KFLAG(targ_t->info);
|
||||
|
||||
/* for forward declarations kflag dictates whether the
|
||||
* target is a struct (0) or union (1)
|
||||
*/
|
||||
return (targ_k == BTF_KIND_STRUCT && !local_f) ||
|
||||
(targ_k == BTF_KIND_UNION && local_f);
|
||||
} else {
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
/* match if the forward declaration is for the same kind */
|
||||
return local_f == BTF_INFO_KFLAG(targ_t->info);
|
||||
}
|
||||
}
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
if (!btf_is_any_enum(targ_t))
|
||||
return 0;
|
||||
|
||||
return bpf_core_enums_match(local_btf, local_t, targ_btf, targ_t);
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
if (behind_ptr) {
|
||||
bool targ_f = BTF_INFO_KFLAG(targ_t->info);
|
||||
|
||||
if (local_k == targ_k)
|
||||
return 1;
|
||||
|
||||
if (targ_k != BTF_KIND_FWD)
|
||||
return 0;
|
||||
|
||||
return (local_k == BTF_KIND_UNION) == targ_f;
|
||||
} else {
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
return bpf_core_composites_match(local_btf, local_t, targ_btf, targ_t,
|
||||
behind_ptr, level);
|
||||
}
|
||||
case BTF_KIND_INT: {
|
||||
__u8 local_sgn;
|
||||
__u8 targ_sgn;
|
||||
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
local_sgn = btf_int_encoding(local_t) & BTF_INT_SIGNED;
|
||||
targ_sgn = btf_int_encoding(targ_t) & BTF_INT_SIGNED;
|
||||
|
||||
return local_t->size == targ_t->size && local_sgn == targ_sgn;
|
||||
}
|
||||
case BTF_KIND_PTR:
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
behind_ptr = true;
|
||||
|
||||
local_id = local_t->type;
|
||||
targ_id = targ_t->type;
|
||||
goto recur;
|
||||
case BTF_KIND_ARRAY: {
|
||||
const struct btf_array *local_array = btf_array(local_t);
|
||||
const struct btf_array *targ_array = btf_array(targ_t);
|
||||
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
if (local_array->nelems != targ_array->nelems)
|
||||
return 0;
|
||||
|
||||
local_id = local_array->type;
|
||||
targ_id = targ_array->type;
|
||||
goto recur;
|
||||
}
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *local_p = btf_params(local_t);
|
||||
struct btf_param *targ_p = btf_params(targ_t);
|
||||
__u16 local_vlen = btf_vlen(local_t);
|
||||
__u16 targ_vlen = btf_vlen(targ_t);
|
||||
int i, err;
|
||||
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
if (local_vlen != targ_vlen)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
|
||||
err = __bpf_core_types_match(local_btf, local_p->type, targ_btf,
|
||||
targ_p->type, behind_ptr, level - 1);
|
||||
if (err <= 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* tail recurse for return type check */
|
||||
local_id = local_t->type;
|
||||
targ_id = targ_t->type;
|
||||
goto recur;
|
||||
}
|
||||
default:
|
||||
pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
|
||||
btf_kind_str(local_t), local_id, targ_id);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,9 +46,9 @@ struct bpf_core_spec {
|
||||
|
||||
struct bpf_core_relo_res {
|
||||
/* expected value in the instruction, unless validate == false */
|
||||
__u32 orig_val;
|
||||
__u64 orig_val;
|
||||
/* new value that needs to be patched up to */
|
||||
__u32 new_val;
|
||||
__u64 new_val;
|
||||
/* relocation unsuccessful, poison instruction, but don't fail load */
|
||||
bool poison;
|
||||
/* some relocations can't be validated against orig_val */
|
||||
@@ -68,8 +68,14 @@ struct bpf_core_relo_res {
|
||||
__u32 new_type_id;
|
||||
};
|
||||
|
||||
int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
const struct btf *targ_btf, __u32 targ_id, int level);
|
||||
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
const struct btf *targ_btf, __u32 targ_id);
|
||||
int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
|
||||
__u32 targ_id, bool behind_ptr, int level);
|
||||
int bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
|
||||
__u32 targ_id);
|
||||
|
||||
size_t bpf_core_essential_name_len(const char *name);
|
||||
|
||||
|
||||
271
src/ringbuf.c
@@ -16,6 +16,7 @@
|
||||
#include <asm/barrier.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "libbpf.h"
|
||||
#include "libbpf_internal.h"
|
||||
@@ -39,6 +40,23 @@ struct ring_buffer {
|
||||
int ring_cnt;
|
||||
};
|
||||
|
||||
struct user_ring_buffer {
|
||||
struct epoll_event event;
|
||||
unsigned long *consumer_pos;
|
||||
unsigned long *producer_pos;
|
||||
void *data;
|
||||
unsigned long mask;
|
||||
size_t page_size;
|
||||
int map_fd;
|
||||
int epoll_fd;
|
||||
};
|
||||
|
||||
/* 8-byte ring buffer header structure */
|
||||
struct ringbuf_hdr {
|
||||
__u32 len;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r)
|
||||
{
|
||||
if (r->consumer_pos) {
|
||||
@@ -300,3 +318,256 @@ int ring_buffer__epoll_fd(const struct ring_buffer *rb)
|
||||
{
|
||||
return rb->epoll_fd;
|
||||
}
|
||||
|
||||
static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
|
||||
{
|
||||
if (rb->consumer_pos) {
|
||||
munmap(rb->consumer_pos, rb->page_size);
|
||||
rb->consumer_pos = NULL;
|
||||
}
|
||||
if (rb->producer_pos) {
|
||||
munmap(rb->producer_pos, rb->page_size + 2 * (rb->mask + 1));
|
||||
rb->producer_pos = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void user_ring_buffer__free(struct user_ring_buffer *rb)
|
||||
{
|
||||
if (!rb)
|
||||
return;
|
||||
|
||||
user_ringbuf_unmap_ring(rb);
|
||||
|
||||
if (rb->epoll_fd >= 0)
|
||||
close(rb->epoll_fd);
|
||||
|
||||
free(rb);
|
||||
}
|
||||
|
||||
static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
|
||||
{
|
||||
struct bpf_map_info info;
|
||||
__u32 len = sizeof(info);
|
||||
void *tmp;
|
||||
struct epoll_event *rb_epoll;
|
||||
int err;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
err = bpf_obj_get_info_by_fd(map_fd, &info, &len);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (info.type != BPF_MAP_TYPE_USER_RINGBUF) {
|
||||
pr_warn("user ringbuf: map fd=%d is not BPF_MAP_TYPE_USER_RINGBUF\n", map_fd);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rb->map_fd = map_fd;
|
||||
rb->mask = info.max_entries - 1;
|
||||
|
||||
/* Map read-only consumer page */
|
||||
tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0);
|
||||
if (tmp == MAP_FAILED) {
|
||||
err = -errno;
|
||||
pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
|
||||
map_fd, err);
|
||||
return err;
|
||||
}
|
||||
rb->consumer_pos = tmp;
|
||||
|
||||
/* Map read-write the producer page and data pages. We map the data
|
||||
* region as twice the total size of the ring buffer to allow the
|
||||
* simple reading and writing of samples that wrap around the end of
|
||||
* the buffer. See the kernel implementation for details.
|
||||
*/
|
||||
tmp = mmap(NULL, rb->page_size + 2 * info.max_entries,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, rb->page_size);
|
||||
if (tmp == MAP_FAILED) {
|
||||
err = -errno;
|
||||
pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %d\n",
|
||||
map_fd, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
rb->producer_pos = tmp;
|
||||
rb->data = tmp + rb->page_size;
|
||||
|
||||
rb_epoll = &rb->event;
|
||||
rb_epoll->events = EPOLLOUT;
|
||||
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) {
|
||||
err = -errno;
|
||||
pr_warn("user ringbuf: failed to epoll add map fd=%d: %d\n", map_fd, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct user_ring_buffer *
|
||||
user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts)
|
||||
{
|
||||
struct user_ring_buffer *rb;
|
||||
int err;
|
||||
|
||||
if (!OPTS_VALID(opts, user_ring_buffer_opts))
|
||||
return errno = EINVAL, NULL;
|
||||
|
||||
rb = calloc(1, sizeof(*rb));
|
||||
if (!rb)
|
||||
return errno = ENOMEM, NULL;
|
||||
|
||||
rb->page_size = getpagesize();
|
||||
|
||||
rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
|
||||
if (rb->epoll_fd < 0) {
|
||||
err = -errno;
|
||||
pr_warn("user ringbuf: failed to create epoll instance: %d\n", err);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = user_ringbuf_map(rb, map_fd);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
return rb;
|
||||
|
||||
err_out:
|
||||
user_ring_buffer__free(rb);
|
||||
return errno = -err, NULL;
|
||||
}
|
||||
|
||||
static void user_ringbuf_commit(struct user_ring_buffer *rb, void *sample, bool discard)
|
||||
{
|
||||
__u32 new_len;
|
||||
struct ringbuf_hdr *hdr;
|
||||
uintptr_t hdr_offset;
|
||||
|
||||
hdr_offset = rb->mask + 1 + (sample - rb->data) - BPF_RINGBUF_HDR_SZ;
|
||||
hdr = rb->data + (hdr_offset & rb->mask);
|
||||
|
||||
new_len = hdr->len & ~BPF_RINGBUF_BUSY_BIT;
|
||||
if (discard)
|
||||
new_len |= BPF_RINGBUF_DISCARD_BIT;
|
||||
|
||||
/* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in
|
||||
* the kernel.
|
||||
*/
|
||||
__atomic_exchange_n(&hdr->len, new_len, __ATOMIC_ACQ_REL);
|
||||
}
|
||||
|
||||
void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample)
|
||||
{
|
||||
user_ringbuf_commit(rb, sample, true);
|
||||
}
|
||||
|
||||
void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample)
|
||||
{
|
||||
user_ringbuf_commit(rb, sample, false);
|
||||
}
|
||||
|
||||
void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size)
|
||||
{
|
||||
__u32 avail_size, total_size, max_size;
|
||||
/* 64-bit to avoid overflow in case of extreme application behavior */
|
||||
__u64 cons_pos, prod_pos;
|
||||
struct ringbuf_hdr *hdr;
|
||||
|
||||
/* Synchronizes with smp_store_release() in __bpf_user_ringbuf_peek() in
|
||||
* the kernel.
|
||||
*/
|
||||
cons_pos = smp_load_acquire(rb->consumer_pos);
|
||||
/* Synchronizes with smp_store_release() in user_ringbuf_commit() */
|
||||
prod_pos = smp_load_acquire(rb->producer_pos);
|
||||
|
||||
max_size = rb->mask + 1;
|
||||
avail_size = max_size - (prod_pos - cons_pos);
|
||||
/* Round up total size to a multiple of 8. */
|
||||
total_size = (size + BPF_RINGBUF_HDR_SZ + 7) / 8 * 8;
|
||||
|
||||
if (total_size > max_size)
|
||||
return errno = E2BIG, NULL;
|
||||
|
||||
if (avail_size < total_size)
|
||||
return errno = ENOSPC, NULL;
|
||||
|
||||
hdr = rb->data + (prod_pos & rb->mask);
|
||||
hdr->len = size | BPF_RINGBUF_BUSY_BIT;
|
||||
hdr->pad = 0;
|
||||
|
||||
/* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in
|
||||
* the kernel.
|
||||
*/
|
||||
smp_store_release(rb->producer_pos, prod_pos + total_size);
|
||||
|
||||
return (void *)rb->data + ((prod_pos + BPF_RINGBUF_HDR_SZ) & rb->mask);
|
||||
}
|
||||
|
||||
static __u64 ns_elapsed_timespec(const struct timespec *start, const struct timespec *end)
|
||||
{
|
||||
__u64 start_ns, end_ns, ns_per_s = 1000000000;
|
||||
|
||||
start_ns = (__u64)start->tv_sec * ns_per_s + start->tv_nsec;
|
||||
end_ns = (__u64)end->tv_sec * ns_per_s + end->tv_nsec;
|
||||
|
||||
return end_ns - start_ns;
|
||||
}
|
||||
|
||||
void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, __u32 size, int timeout_ms)
|
||||
{
|
||||
void *sample;
|
||||
int err, ms_remaining = timeout_ms;
|
||||
struct timespec start;
|
||||
|
||||
if (timeout_ms < 0 && timeout_ms != -1)
|
||||
return errno = EINVAL, NULL;
|
||||
|
||||
if (timeout_ms != -1) {
|
||||
err = clock_gettime(CLOCK_MONOTONIC, &start);
|
||||
if (err)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
do {
|
||||
int cnt, ms_elapsed;
|
||||
struct timespec curr;
|
||||
__u64 ns_per_ms = 1000000;
|
||||
|
||||
sample = user_ring_buffer__reserve(rb, size);
|
||||
if (sample)
|
||||
return sample;
|
||||
else if (errno != ENOSPC)
|
||||
return NULL;
|
||||
|
||||
/* The kernel guarantees at least one event notification
|
||||
* delivery whenever at least one sample is drained from the
|
||||
* ring buffer in an invocation to bpf_ringbuf_drain(). Other
|
||||
* additional events may be delivered at any time, but only one
|
||||
* event is guaranteed per bpf_ringbuf_drain() invocation,
|
||||
* provided that a sample is drained, and the BPF program did
|
||||
* not pass BPF_RB_NO_WAKEUP to bpf_ringbuf_drain(). If
|
||||
* BPF_RB_FORCE_WAKEUP is passed to bpf_ringbuf_drain(), a
|
||||
* wakeup event will be delivered even if no samples are
|
||||
* drained.
|
||||
*/
|
||||
cnt = epoll_wait(rb->epoll_fd, &rb->event, 1, ms_remaining);
|
||||
if (cnt < 0)
|
||||
return NULL;
|
||||
|
||||
if (timeout_ms == -1)
|
||||
continue;
|
||||
|
||||
err = clock_gettime(CLOCK_MONOTONIC, &curr);
|
||||
if (err)
|
||||
return NULL;
|
||||
|
||||
ms_elapsed = ns_elapsed_timespec(&start, &curr) / ns_per_ms;
|
||||
ms_remaining = timeout_ms - ms_elapsed;
|
||||
} while (ms_remaining > 0);
|
||||
|
||||
/* Try one more time to reserve a sample after the specified timeout has elapsed. */
|
||||
return user_ring_buffer__reserve(rb, size);
|
||||
}
|
||||
|
||||
@@ -66,13 +66,13 @@ struct bpf_load_and_run_opts {
|
||||
const char *errstr;
|
||||
};
|
||||
|
||||
long bpf_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
|
||||
long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
|
||||
|
||||
static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
|
||||
unsigned int size)
|
||||
{
|
||||
#ifdef __KERNEL__
|
||||
return bpf_sys_bpf(cmd, attr, size);
|
||||
return kern_sys_bpf(cmd, attr, size);
|
||||
#else
|
||||
return syscall(__NR_bpf, cmd, attr, size);
|
||||
#endif
|
||||
@@ -251,6 +251,29 @@ static inline int skel_map_update_elem(int fd, const void *key,
|
||||
return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
|
||||
}
|
||||
|
||||
static inline int skel_map_delete_elem(int fd, const void *key)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, flags);
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_fd = fd;
|
||||
attr.key = (long)key;
|
||||
|
||||
return skel_sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
|
||||
}
|
||||
|
||||
static inline int skel_map_get_fd_by_id(__u32 id)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, flags);
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.map_id = id;
|
||||
|
||||
return skel_sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
|
||||
}
|
||||
|
||||
static inline int skel_raw_tracepoint_open(const char *name, int prog_fd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd);
|
||||
@@ -285,6 +308,8 @@ static inline int skel_link_create(int prog_fd, int target_fd,
|
||||
|
||||
static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
|
||||
{
|
||||
const size_t prog_load_attr_sz = offsetofend(union bpf_attr, fd_array);
|
||||
const size_t test_run_attr_sz = offsetofend(union bpf_attr, test);
|
||||
int map_fd = -1, prog_fd = -1, key = 0, err;
|
||||
union bpf_attr attr;
|
||||
|
||||
@@ -302,7 +327,7 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, prog_load_attr_sz);
|
||||
attr.prog_type = BPF_PROG_TYPE_SYSCALL;
|
||||
attr.insns = (long) opts->insns;
|
||||
attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
|
||||
@@ -313,18 +338,18 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
|
||||
attr.log_size = opts->ctx->log_size;
|
||||
attr.log_buf = opts->ctx->log_buf;
|
||||
attr.prog_flags = BPF_F_SLEEPABLE;
|
||||
err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
|
||||
err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, prog_load_attr_sz);
|
||||
if (prog_fd < 0) {
|
||||
opts->errstr = "failed to load loader prog";
|
||||
set_err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
memset(&attr, 0, test_run_attr_sz);
|
||||
attr.test.prog_fd = prog_fd;
|
||||
attr.test.ctx_in = (long) opts->ctx;
|
||||
attr.test.ctx_size_in = opts->ctx->sz;
|
||||
err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr));
|
||||
err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz);
|
||||
if (err < 0 || (int)attr.test.retval < 0) {
|
||||
opts->errstr = "failed to execute loader prog";
|
||||
if (err < 0) {
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
#include <linux/errno.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
|
||||
/* Below types and maps are internal implementation details of libbpf's USDT
|
||||
* support and are subjects to change. Also, bpf_usdt_xxx() API helpers should
|
||||
@@ -30,14 +29,6 @@
|
||||
#ifndef BPF_USDT_MAX_IP_CNT
|
||||
#define BPF_USDT_MAX_IP_CNT (4 * BPF_USDT_MAX_SPEC_CNT)
|
||||
#endif
|
||||
/* We use BPF CO-RE to detect support for BPF cookie from BPF side. This is
|
||||
* the only dependency on CO-RE, so if it's undesirable, user can override
|
||||
* BPF_USDT_HAS_BPF_COOKIE to specify whether to BPF cookie is supported or not.
|
||||
*/
|
||||
#ifndef BPF_USDT_HAS_BPF_COOKIE
|
||||
#define BPF_USDT_HAS_BPF_COOKIE \
|
||||
bpf_core_enum_value_exists(enum bpf_func_id___usdt, BPF_FUNC_get_attach_cookie___usdt)
|
||||
#endif
|
||||
|
||||
enum __bpf_usdt_arg_type {
|
||||
BPF_USDT_ARG_CONST,
|
||||
@@ -83,15 +74,12 @@ struct {
|
||||
__type(value, __u32);
|
||||
} __bpf_usdt_ip_to_spec_id SEC(".maps") __weak;
|
||||
|
||||
/* don't rely on user's BPF code to have latest definition of bpf_func_id */
|
||||
enum bpf_func_id___usdt {
|
||||
BPF_FUNC_get_attach_cookie___usdt = 0xBAD, /* value doesn't matter */
|
||||
};
|
||||
extern const _Bool LINUX_HAS_BPF_COOKIE __kconfig;
|
||||
|
||||
static __always_inline
|
||||
int __bpf_usdt_spec_id(struct pt_regs *ctx)
|
||||
{
|
||||
if (!BPF_USDT_HAS_BPF_COOKIE) {
|
||||
if (!LINUX_HAS_BPF_COOKIE) {
|
||||
long ip = PT_REGS_IP(ctx);
|
||||
int *spec_id_ptr;
|
||||
|
||||
@@ -244,7 +232,7 @@ long bpf_usdt_cookie(struct pt_regs *ctx)
|
||||
*/
|
||||
#define BPF_USDT(name, args...) \
|
||||
name(struct pt_regs *ctx); \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args); \
|
||||
typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
{ \
|
||||
@@ -253,7 +241,7 @@ typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
return ____##name(___bpf_usdt_args(args)); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
} \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
static __always_inline typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args)
|
||||
|
||||
#endif /* __USDT_BPF_H__ */
|
||||
|
||||
133
src/usdt.c
@@ -282,7 +282,7 @@ struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
|
||||
* If this is not supported, USDTs with semaphores will not be supported.
|
||||
* Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
|
||||
*/
|
||||
man->has_sema_refcnt = access(ref_ctr_sysfs_path, F_OK) == 0;
|
||||
man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0;
|
||||
|
||||
return man;
|
||||
}
|
||||
@@ -441,7 +441,7 @@ static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, siz
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_lib_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
|
||||
static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
|
||||
{
|
||||
char path[PATH_MAX], line[PATH_MAX], mode[16];
|
||||
size_t seg_start, seg_end, seg_off;
|
||||
@@ -531,35 +531,40 @@ err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long addr, bool relative)
|
||||
static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long virtaddr)
|
||||
{
|
||||
struct elf_seg *seg;
|
||||
int i;
|
||||
|
||||
if (relative) {
|
||||
/* for shared libraries, address is relative offset and thus
|
||||
* should be fall within logical offset-based range of
|
||||
* [offset_start, offset_end)
|
||||
*/
|
||||
for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
|
||||
if (seg->offset <= addr && addr < seg->offset + (seg->end - seg->start))
|
||||
return seg;
|
||||
}
|
||||
} else {
|
||||
/* for binaries, address is absolute and thus should be within
|
||||
* absolute address range of [seg_start, seg_end)
|
||||
*/
|
||||
for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
|
||||
if (seg->start <= addr && addr < seg->end)
|
||||
return seg;
|
||||
}
|
||||
/* for ELF binaries (both executables and shared libraries), we are
|
||||
* given virtual address (absolute for executables, relative for
|
||||
* libraries) which should match address range of [seg_start, seg_end)
|
||||
*/
|
||||
for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
|
||||
if (seg->start <= virtaddr && virtaddr < seg->end)
|
||||
return seg;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int parse_usdt_note(Elf *elf, const char *path, long base_addr,
|
||||
GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off,
|
||||
static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long offset)
|
||||
{
|
||||
struct elf_seg *seg;
|
||||
int i;
|
||||
|
||||
/* for VMA segments from /proc/<pid>/maps file, provided "address" is
|
||||
* actually a file offset, so should be fall within logical
|
||||
* offset-based range of [offset_start, offset_end)
|
||||
*/
|
||||
for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
|
||||
if (seg->offset <= offset && offset < seg->offset + (seg->end - seg->start))
|
||||
return seg;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
|
||||
const char *data, size_t name_off, size_t desc_off,
|
||||
struct usdt_note *usdt_note);
|
||||
|
||||
static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
|
||||
@@ -568,8 +573,8 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie,
|
||||
struct usdt_target **out_targets, size_t *out_target_cnt)
|
||||
{
|
||||
size_t off, name_off, desc_off, seg_cnt = 0, lib_seg_cnt = 0, target_cnt = 0;
|
||||
struct elf_seg *segs = NULL, *lib_segs = NULL;
|
||||
size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0;
|
||||
struct elf_seg *segs = NULL, *vma_segs = NULL;
|
||||
struct usdt_target *targets = NULL, *target;
|
||||
long base_addr = 0;
|
||||
Elf_Scn *notes_scn, *base_scn;
|
||||
@@ -613,8 +618,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
struct elf_seg *seg = NULL;
|
||||
void *tmp;
|
||||
|
||||
err = parse_usdt_note(elf, path, base_addr, &nhdr,
|
||||
data->d_buf, name_off, desc_off, ¬e);
|
||||
err = parse_usdt_note(elf, path, &nhdr, data->d_buf, name_off, desc_off, ¬e);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
@@ -648,36 +652,33 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
*
|
||||
* [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
|
||||
*/
|
||||
usdt_rel_ip = usdt_abs_ip = note.loc_addr;
|
||||
if (base_addr) {
|
||||
usdt_abs_ip = note.loc_addr;
|
||||
if (base_addr)
|
||||
usdt_abs_ip += base_addr - note.base_addr;
|
||||
usdt_rel_ip += base_addr - note.base_addr;
|
||||
|
||||
/* When attaching uprobes (which is what USDTs basically are)
|
||||
* kernel expects file offset to be specified, not a relative
|
||||
* virtual address, so we need to translate virtual address to
|
||||
* file offset, for both ET_EXEC and ET_DYN binaries.
|
||||
*/
|
||||
seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip);
|
||||
if (!seg) {
|
||||
err = -ESRCH;
|
||||
pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n",
|
||||
usdt_provider, usdt_name, path, usdt_abs_ip);
|
||||
goto err_out;
|
||||
}
|
||||
if (!seg->is_exec) {
|
||||
err = -ESRCH;
|
||||
pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n",
|
||||
path, seg->start, seg->end, usdt_provider, usdt_name,
|
||||
usdt_abs_ip);
|
||||
goto err_out;
|
||||
}
|
||||
/* translate from virtual address to file offset */
|
||||
usdt_rel_ip = usdt_abs_ip - seg->start + seg->offset;
|
||||
|
||||
if (ehdr.e_type == ET_EXEC) {
|
||||
/* When attaching uprobes (which what USDTs basically
|
||||
* are) kernel expects a relative IP to be specified,
|
||||
* so if we are attaching to an executable ELF binary
|
||||
* (i.e., not a shared library), we need to calculate
|
||||
* proper relative IP based on ELF's load address
|
||||
*/
|
||||
seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip, false /* relative */);
|
||||
if (!seg) {
|
||||
err = -ESRCH;
|
||||
pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n",
|
||||
usdt_provider, usdt_name, path, usdt_abs_ip);
|
||||
goto err_out;
|
||||
}
|
||||
if (!seg->is_exec) {
|
||||
err = -ESRCH;
|
||||
pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n",
|
||||
path, seg->start, seg->end, usdt_provider, usdt_name,
|
||||
usdt_abs_ip);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
usdt_rel_ip = usdt_abs_ip - (seg->start - seg->offset);
|
||||
} else if (!man->has_bpf_cookie) { /* ehdr.e_type == ET_DYN */
|
||||
if (ehdr.e_type == ET_DYN && !man->has_bpf_cookie) {
|
||||
/* If we don't have BPF cookie support but need to
|
||||
* attach to a shared library, we'll need to know and
|
||||
* record absolute addresses of attach points due to
|
||||
@@ -697,9 +698,9 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* lib_segs are lazily initialized only if necessary */
|
||||
if (lib_seg_cnt == 0) {
|
||||
err = parse_lib_segs(pid, path, &lib_segs, &lib_seg_cnt);
|
||||
/* vma_segs are lazily initialized only if necessary */
|
||||
if (vma_seg_cnt == 0) {
|
||||
err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
|
||||
if (err) {
|
||||
pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n",
|
||||
pid, path, err);
|
||||
@@ -707,7 +708,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
}
|
||||
}
|
||||
|
||||
seg = find_elf_seg(lib_segs, lib_seg_cnt, usdt_rel_ip, true /* relative */);
|
||||
seg = find_vma_seg(vma_segs, vma_seg_cnt, usdt_rel_ip);
|
||||
if (!seg) {
|
||||
err = -ESRCH;
|
||||
pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n",
|
||||
@@ -715,7 +716,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
usdt_abs_ip = seg->start + (usdt_rel_ip - seg->offset);
|
||||
usdt_abs_ip = seg->start - seg->offset + usdt_rel_ip;
|
||||
}
|
||||
|
||||
pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n",
|
||||
@@ -723,7 +724,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args,
|
||||
seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0);
|
||||
|
||||
/* Adjust semaphore address to be a relative offset */
|
||||
/* Adjust semaphore address to be a file offset */
|
||||
if (note.sema_addr) {
|
||||
if (!man->has_sema_refcnt) {
|
||||
pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n",
|
||||
@@ -732,7 +733,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
seg = find_elf_seg(segs, seg_cnt, note.sema_addr, false /* relative */);
|
||||
seg = find_elf_seg(segs, seg_cnt, note.sema_addr);
|
||||
if (!seg) {
|
||||
err = -ESRCH;
|
||||
pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n",
|
||||
@@ -747,7 +748,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
usdt_sema_off = note.sema_addr - (seg->start - seg->offset);
|
||||
usdt_sema_off = note.sema_addr - seg->start + seg->offset;
|
||||
|
||||
pr_debug("usdt: sema for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n",
|
||||
usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ",
|
||||
@@ -770,7 +771,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
target->rel_ip = usdt_rel_ip;
|
||||
target->sema_off = usdt_sema_off;
|
||||
|
||||
/* notes->args references strings from Elf itself, so they can
|
||||
/* notes.args references strings from Elf itself, so they can
|
||||
* be referenced safely until elf_end() call
|
||||
*/
|
||||
target->spec_str = note.args;
|
||||
@@ -788,7 +789,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
||||
|
||||
err_out:
|
||||
free(segs);
|
||||
free(lib_segs);
|
||||
free(vma_segs);
|
||||
if (err < 0)
|
||||
free(targets);
|
||||
return err;
|
||||
@@ -1089,8 +1090,8 @@ err_out:
|
||||
/* Parse out USDT ELF note from '.note.stapsdt' section.
|
||||
* Logic inspired by perf's code.
|
||||
*/
|
||||
static int parse_usdt_note(Elf *elf, const char *path, long base_addr,
|
||||
GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off,
|
||||
static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
|
||||
const char *data, size_t name_off, size_t desc_off,
|
||||
struct usdt_note *note)
|
||||
{
|
||||
const char *provider, *name, *args;
|
||||
|
||||
336
src/xsk.h
@@ -1,336 +0,0 @@
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
|
||||
/*
|
||||
* AF_XDP user-space access library.
|
||||
*
|
||||
* Copyright (c) 2018 - 2019 Intel Corporation.
|
||||
* Copyright (c) 2019 Facebook
|
||||
*
|
||||
* Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
|
||||
*/
|
||||
|
||||
#ifndef __LIBBPF_XSK_H
|
||||
#define __LIBBPF_XSK_H
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <linux/if_xdp.h>
|
||||
|
||||
#include "libbpf.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* This whole API has been deprecated and moved to libxdp that can be found at
|
||||
* https://github.com/xdp-project/xdp-tools. The APIs are exactly the same so
|
||||
* it should just be linking with libxdp instead of libbpf for this set of
|
||||
* functionality. If not, please submit a bug report on the aforementioned page.
|
||||
*/
|
||||
|
||||
/* Load-Acquire Store-Release barriers used by the XDP socket
|
||||
* library. The following macros should *NOT* be considered part of
|
||||
* the xsk.h API, and is subject to change anytime.
|
||||
*
|
||||
* LIBRARY INTERNAL
|
||||
*/
|
||||
|
||||
#define __XSK_READ_ONCE(x) (*(volatile typeof(x) *)&x)
|
||||
#define __XSK_WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
# define libbpf_smp_store_release(p, v) \
|
||||
do { \
|
||||
asm volatile("" : : : "memory"); \
|
||||
__XSK_WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
# define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
|
||||
asm volatile("" : : : "memory"); \
|
||||
___p1; \
|
||||
})
|
||||
#elif defined(__aarch64__)
|
||||
# define libbpf_smp_store_release(p, v) \
|
||||
asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory")
|
||||
# define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1; \
|
||||
asm volatile ("ldar %w0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
___p1; \
|
||||
})
|
||||
#elif defined(__riscv)
|
||||
# define libbpf_smp_store_release(p, v) \
|
||||
do { \
|
||||
asm volatile ("fence rw,w" : : : "memory"); \
|
||||
__XSK_WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
# define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
|
||||
asm volatile ("fence r,rw" : : : "memory"); \
|
||||
___p1; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifndef libbpf_smp_store_release
|
||||
#define libbpf_smp_store_release(p, v) \
|
||||
do { \
|
||||
__sync_synchronize(); \
|
||||
__XSK_WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef libbpf_smp_load_acquire
|
||||
#define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
|
||||
__sync_synchronize(); \
|
||||
___p1; \
|
||||
})
|
||||
#endif
|
||||
|
||||
/* LIBRARY INTERNAL -- END */
|
||||
|
||||
/* Do not access these members directly. Use the functions below. */
|
||||
#define DEFINE_XSK_RING(name) \
|
||||
struct name { \
|
||||
__u32 cached_prod; \
|
||||
__u32 cached_cons; \
|
||||
__u32 mask; \
|
||||
__u32 size; \
|
||||
__u32 *producer; \
|
||||
__u32 *consumer; \
|
||||
void *ring; \
|
||||
__u32 *flags; \
|
||||
}
|
||||
|
||||
DEFINE_XSK_RING(xsk_ring_prod);
|
||||
DEFINE_XSK_RING(xsk_ring_cons);
|
||||
|
||||
/* For a detailed explanation on the memory barriers associated with the
|
||||
* ring, please take a look at net/xdp/xsk_queue.h.
|
||||
*/
|
||||
|
||||
struct xsk_umem;
|
||||
struct xsk_socket;
|
||||
|
||||
static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
|
||||
__u32 idx)
|
||||
{
|
||||
__u64 *addrs = (__u64 *)fill->ring;
|
||||
|
||||
return &addrs[idx & fill->mask];
|
||||
}
|
||||
|
||||
static inline const __u64 *
|
||||
xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
|
||||
{
|
||||
const __u64 *addrs = (const __u64 *)comp->ring;
|
||||
|
||||
return &addrs[idx & comp->mask];
|
||||
}
|
||||
|
||||
static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
|
||||
__u32 idx)
|
||||
{
|
||||
struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
|
||||
|
||||
return &descs[idx & tx->mask];
|
||||
}
|
||||
|
||||
static inline const struct xdp_desc *
|
||||
xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
|
||||
{
|
||||
const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
|
||||
|
||||
return &descs[idx & rx->mask];
|
||||
}
|
||||
|
||||
static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
|
||||
{
|
||||
return *r->flags & XDP_RING_NEED_WAKEUP;
|
||||
}
|
||||
|
||||
static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
|
||||
{
|
||||
__u32 free_entries = r->cached_cons - r->cached_prod;
|
||||
|
||||
if (free_entries >= nb)
|
||||
return free_entries;
|
||||
|
||||
/* Refresh the local tail pointer.
|
||||
* cached_cons is r->size bigger than the real consumer pointer so
|
||||
* that this addition can be avoided in the more frequently
|
||||
* executed code that computs free_entries in the beginning of
|
||||
* this function. Without this optimization it whould have been
|
||||
* free_entries = r->cached_prod - r->cached_cons + r->size.
|
||||
*/
|
||||
r->cached_cons = libbpf_smp_load_acquire(r->consumer);
|
||||
r->cached_cons += r->size;
|
||||
|
||||
return r->cached_cons - r->cached_prod;
|
||||
}
|
||||
|
||||
static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
|
||||
{
|
||||
__u32 entries = r->cached_prod - r->cached_cons;
|
||||
|
||||
if (entries == 0) {
|
||||
r->cached_prod = libbpf_smp_load_acquire(r->producer);
|
||||
entries = r->cached_prod - r->cached_cons;
|
||||
}
|
||||
|
||||
return (entries > nb) ? nb : entries;
|
||||
}
|
||||
|
||||
static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx)
|
||||
{
|
||||
if (xsk_prod_nb_free(prod, nb) < nb)
|
||||
return 0;
|
||||
|
||||
*idx = prod->cached_prod;
|
||||
prod->cached_prod += nb;
|
||||
|
||||
return nb;
|
||||
}
|
||||
|
||||
static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
|
||||
{
|
||||
/* Make sure everything has been written to the ring before indicating
|
||||
* this to the kernel by writing the producer pointer.
|
||||
*/
|
||||
libbpf_smp_store_release(prod->producer, *prod->producer + nb);
|
||||
}
|
||||
|
||||
static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
|
||||
{
|
||||
__u32 entries = xsk_cons_nb_avail(cons, nb);
|
||||
|
||||
if (entries > 0) {
|
||||
*idx = cons->cached_cons;
|
||||
cons->cached_cons += entries;
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
static inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb)
|
||||
{
|
||||
cons->cached_cons -= nb;
|
||||
}
|
||||
|
||||
static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb)
|
||||
{
|
||||
/* Make sure data has been read before indicating we are done
|
||||
* with the entries by updating the consumer pointer.
|
||||
*/
|
||||
libbpf_smp_store_release(cons->consumer, *cons->consumer + nb);
|
||||
|
||||
}
|
||||
|
||||
static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
|
||||
{
|
||||
return &((char *)umem_area)[addr];
|
||||
}
|
||||
|
||||
static inline __u64 xsk_umem__extract_addr(__u64 addr)
|
||||
{
|
||||
return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
|
||||
}
|
||||
|
||||
static inline __u64 xsk_umem__extract_offset(__u64 addr)
|
||||
{
|
||||
return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
|
||||
}
|
||||
|
||||
static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
|
||||
{
|
||||
return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
|
||||
}
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_umem__fd(const struct xsk_umem *umem);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_socket__fd(const struct xsk_socket *xsk);
|
||||
|
||||
#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
|
||||
#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
|
||||
#define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */
|
||||
#define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
|
||||
#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
|
||||
#define XSK_UMEM__DEFAULT_FLAGS 0
|
||||
|
||||
struct xsk_umem_config {
|
||||
__u32 fill_size;
|
||||
__u32 comp_size;
|
||||
__u32 frame_size;
|
||||
__u32 frame_headroom;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd);
|
||||
|
||||
/* Flags for the libbpf_flags field. */
|
||||
#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
|
||||
|
||||
struct xsk_socket_config {
|
||||
__u32 rx_size;
|
||||
__u32 tx_size;
|
||||
__u32 libbpf_flags;
|
||||
__u32 xdp_flags;
|
||||
__u16 bind_flags;
|
||||
};
|
||||
|
||||
/* Set config to NULL to get the default configuration. */
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_umem__create(struct xsk_umem **umem,
|
||||
void *umem_area, __u64 size,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_umem_config *config);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
|
||||
void *umem_area, __u64 size,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_umem_config *config);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
|
||||
void *umem_area, __u64 size,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_umem_config *config);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_socket__create(struct xsk_socket **xsk,
|
||||
const char *ifname, __u32 queue_id,
|
||||
struct xsk_umem *umem,
|
||||
struct xsk_ring_cons *rx,
|
||||
struct xsk_ring_prod *tx,
|
||||
const struct xsk_socket_config *config);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
|
||||
const char *ifname,
|
||||
__u32 queue_id, struct xsk_umem *umem,
|
||||
struct xsk_ring_cons *rx,
|
||||
struct xsk_ring_prod *tx,
|
||||
struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
const struct xsk_socket_config *config);
|
||||
|
||||
/* Returns 0 for success and -EBUSY if the umem is still in use. */
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
int xsk_umem__delete(struct xsk_umem *umem);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
|
||||
void xsk_socket__delete(struct xsk_socket *xsk);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif /* __LIBBPF_XSK_H */
|
||||
@@ -1,14 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -euox pipefail
|
||||
|
||||
CFLAGS=${CFLAGS:-}
|
||||
|
||||
cat << EOF > main.c
|
||||
#include <bpf/libbpf.h>
|
||||
int main() {
|
||||
return bpf_object__open(0) < 0;
|
||||
}
|
||||
EOF
|
||||
|
||||
# static linking
|
||||
${CC:-cc} ${CFLAGS} -o main -I./install/usr/include main.c ./build/libbpf.a -lelf -lz
|
||||
@@ -1,5 +0,0 @@
|
||||
# TEMPORARY
|
||||
get_stack_raw_tp # spams with kernel warnings until next bpf -> bpf-next merge
|
||||
stacktrace_build_id_nmi
|
||||
stacktrace_build_id
|
||||
task_fd_query_rawtp
|
||||
@@ -1,64 +0,0 @@
|
||||
# TEMPORARY
|
||||
atomics # attach(add): actual -524 <= expected 0 (trampoline)
|
||||
bpf_iter_setsockopt # JIT does not support calling kernel function (kfunc)
|
||||
bloom_filter_map # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
|
||||
bpf_tcp_ca # JIT does not support calling kernel function (kfunc)
|
||||
bpf_loop # attaches to __x64_sys_nanosleep
|
||||
bpf_mod_race # BPF trampoline
|
||||
bpf_nf # JIT does not support calling kernel function
|
||||
core_read_macros # unknown func bpf_probe_read#4 (overlapping)
|
||||
d_path # failed to auto-attach program 'prog_stat': -524 (trampoline)
|
||||
dummy_st_ops # test_run unexpected error: -524 (errno 524) (trampoline)
|
||||
fentry_fexit # fentry attach failed: -524 (trampoline)
|
||||
fentry_test # fentry_first_attach unexpected error: -524 (trampoline)
|
||||
fexit_bpf2bpf # freplace_attach_trace unexpected error: -524 (trampoline)
|
||||
fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
|
||||
fexit_stress # fexit attach failed prog 0 failed: -524 (trampoline)
|
||||
fexit_test # fexit_first_attach unexpected error: -524 (trampoline)
|
||||
get_func_args_test # trampoline
|
||||
get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (trampoline)
|
||||
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
|
||||
kfree_skb # attach fentry unexpected error: -524 (trampoline)
|
||||
kfunc_call # 'bpf_prog_active': not found in kernel BTF (?)
|
||||
ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?)
|
||||
ksyms_module_libbpf # JIT does not support calling kernel function (kfunc)
|
||||
ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?)
|
||||
modify_return # modify_return attach failed: -524 (trampoline)
|
||||
module_attach # skel_attach skeleton attach failed: -524 (trampoline)
|
||||
kprobe_multi_test # relies on fentry
|
||||
netcnt # failed to load BPF skeleton 'netcnt_prog': -7 (?)
|
||||
probe_user # check_kprobe_res wrong kprobe res from probe read (?)
|
||||
recursion # skel_attach unexpected error: -524 (trampoline)
|
||||
ringbuf # skel_load skeleton load failed (?)
|
||||
sk_assign # Can't read on server: Invalid argument (?)
|
||||
sk_lookup # endianness problem
|
||||
sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (trampoline)
|
||||
skc_to_unix_sock # could not attach BPF object unexpected error: -524 (trampoline)
|
||||
socket_cookie # prog_attach unexpected error: -524 (trampoline)
|
||||
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
|
||||
tailcalls # tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls (?)
|
||||
task_local_storage # failed to auto-attach program 'trace_exit_creds': -524 (trampoline)
|
||||
test_bpffs # bpffs test failed 255 (iterator)
|
||||
test_bprm_opts # failed to auto-attach program 'secure_exec': -524 (trampoline)
|
||||
test_ima # failed to auto-attach program 'ima': -524 (trampoline)
|
||||
test_local_storage # failed to auto-attach program 'unlink_hook': -524 (trampoline)
|
||||
test_lsm # failed to find kernel BTF type ID of '__x64_sys_setdomainname': -3 (?)
|
||||
test_overhead # attach_fentry unexpected error: -524 (trampoline)
|
||||
test_profiler # unknown func bpf_probe_read_str#45 (overlapping)
|
||||
timer # failed to auto-attach program 'test1': -524 (trampoline)
|
||||
timer_crash # trampoline
|
||||
timer_mim # failed to auto-attach program 'test1': -524 (trampoline)
|
||||
trace_ext # failed to auto-attach program 'test_pkt_md_access_new': -524 (trampoline)
|
||||
trace_printk # trace_printk__load unexpected error: -2 (errno 2) (?)
|
||||
trace_vprintk # trace_vprintk__open_and_load unexpected error: -9 (?)
|
||||
trampoline_count # prog 'prog1': failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
|
||||
verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
|
||||
vmlinux # failed to auto-attach program 'handle__fentry': -524 (trampoline)
|
||||
xdp_adjust_tail # case-128 err 0 errno 28 retval 1 size 128 expect-size 3520 (?)
|
||||
xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
|
||||
xdp_bpf2bpf # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
|
||||
map_kptr # failed to open_and_load program: -524 (trampoline)
|
||||
bpf_cookie # failed to open_and_load program: -524 (trampoline)
|
||||
xdp_do_redirect # prog_run_max_size unexpected error: -22 (errno 22)
|
||||
send_signal # intermittently fails to receive signal
|
||||
select_reuseport # intermittently fails on new s390x setup
|
||||
@@ -1,63 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
source $(cd $(dirname $0) && pwd)/helpers.sh
|
||||
|
||||
ARCH=$(uname -m)
|
||||
|
||||
STATUS_FILE=/exitstatus
|
||||
|
||||
read_lists() {
|
||||
(for path in "$@"; do
|
||||
if [[ -s "$path" ]]; then
|
||||
cat "$path"
|
||||
fi;
|
||||
done) | cut -d'#' -f1 | tr -s ' \t\n' ','
|
||||
}
|
||||
|
||||
test_progs() {
|
||||
if [[ "${KERNEL}" != '4.9.0' ]]; then
|
||||
travis_fold start test_progs "Testing test_progs"
|
||||
# "&& true" does not change the return code (it is not executed
|
||||
# if the Python script fails), but it prevents exiting on a
|
||||
# failure due to the "set -e".
|
||||
./test_progs ${BLACKLIST:+-d$BLACKLIST} ${WHITELIST:+-a$WHITELIST} && true
|
||||
echo "test_progs:$?" >> "${STATUS_FILE}"
|
||||
travis_fold end test_progs
|
||||
fi
|
||||
|
||||
travis_fold start test_progs-no_alu32 "Testing test_progs-no_alu32"
|
||||
./test_progs-no_alu32 ${BLACKLIST:+-d$BLACKLIST} ${WHITELIST:+-a$WHITELIST} && true
|
||||
echo "test_progs-no_alu32:$?" >> "${STATUS_FILE}"
|
||||
travis_fold end test_progs-no_alu32
|
||||
}
|
||||
|
||||
test_maps() {
|
||||
travis_fold start test_maps "Testing test_maps"
|
||||
./test_maps && true
|
||||
echo "test_maps:$?" >> "${STATUS_FILE}"
|
||||
travis_fold end test_maps
|
||||
}
|
||||
|
||||
test_verifier() {
|
||||
travis_fold start test_verifier "Testing test_verifier"
|
||||
./test_verifier && true
|
||||
echo "test_verifier:$?" >> "${STATUS_FILE}"
|
||||
travis_fold end test_verifier
|
||||
}
|
||||
|
||||
travis_fold end vm_init
|
||||
|
||||
configs_path=${PROJECT_NAME}/vmtest/configs
|
||||
BLACKLIST=$(read_lists "$configs_path/blacklist/BLACKLIST-${KERNEL}" "$configs_path/blacklist/BLACKLIST-${KERNEL}.${ARCH}")
|
||||
WHITELIST=$(read_lists "$configs_path/whitelist/WHITELIST-${KERNEL}" "$configs_path/whitelist/WHITELIST-${KERNEL}.${ARCH}")
|
||||
|
||||
cd ${PROJECT_NAME}/selftests/bpf
|
||||
|
||||
test_progs
|
||||
|
||||
if [[ "${KERNEL}" == 'latest' ]]; then
|
||||
# test_maps
|
||||
test_verifier
|
||||
fi
|
||||