mirror of
https://github.com/netdata/libbpf.git
synced 2026-03-18 23:39:06 +08:00
Compare commits
22 Commits
master
...
netdata_pa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d42052959d | ||
|
|
360a2fd909 | ||
|
|
8b905090e8 | ||
|
|
dd7dd01114 | ||
|
|
a16e904d6c | ||
|
|
dac1c4b6a8 | ||
|
|
862b60f205 | ||
|
|
70599f3a1e | ||
|
|
e4f2e6e865 | ||
|
|
74c16e9a0c | ||
|
|
e61e089911 | ||
|
|
b9d46530c3 | ||
|
|
86175df408 | ||
|
|
720324afab | ||
|
|
aea40f7179 | ||
|
|
54a7bc87d5 | ||
|
|
9979463ccf | ||
|
|
b91ca01922 | ||
|
|
8ded7c6db0 | ||
|
|
7df4ea0f0d | ||
|
|
02333ba360 | ||
|
|
6921017d25 |
31
.github/actions/build-selftests/action.yml
vendored
Normal file
31
.github/actions/build-selftests/action.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: 'build-selftests'
|
||||||
|
description: 'Build BPF selftests'
|
||||||
|
inputs:
|
||||||
|
repo-path:
|
||||||
|
description: 'where is the source code'
|
||||||
|
required: true
|
||||||
|
kernel:
|
||||||
|
description: 'kernel version or LATEST'
|
||||||
|
required: true
|
||||||
|
default: 'LATEST'
|
||||||
|
vmlinux:
|
||||||
|
description: 'where is vmlinux file'
|
||||||
|
required: true
|
||||||
|
default: '${{ github.workspace }}/vmlinux'
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- shell: bash
|
||||||
|
run: |
|
||||||
|
source $GITHUB_ACTION_PATH/../../../ci/vmtest/helpers.sh
|
||||||
|
foldable start "Setup Env"
|
||||||
|
sudo apt-get install -y qemu-kvm zstd binutils-dev elfutils libcap-dev libelf-dev libdw-dev python3-docutils
|
||||||
|
foldable end
|
||||||
|
- shell: bash
|
||||||
|
run: |
|
||||||
|
export KERNEL=${{ inputs.kernel }}
|
||||||
|
export REPO_ROOT="${{ github.workspace }}"
|
||||||
|
export REPO_PATH="${{ inputs.repo-path }}"
|
||||||
|
export VMLINUX_BTF="${{ inputs.vmlinux }}"
|
||||||
|
${{ github.action_path }}/build_selftests.sh
|
||||||
60
.github/actions/build-selftests/build_selftests.sh
vendored
Executable file
60
.github/actions/build-selftests/build_selftests.sh
vendored
Executable file
@@ -0,0 +1,60 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
THISDIR="$(cd $(dirname $0) && pwd)"
|
||||||
|
|
||||||
|
source ${THISDIR}/helpers.sh
|
||||||
|
|
||||||
|
foldable start prepare_selftests "Building selftests"
|
||||||
|
|
||||||
|
LIBBPF_PATH="${REPO_ROOT}"
|
||||||
|
|
||||||
|
llvm_default_version() {
|
||||||
|
echo "16"
|
||||||
|
}
|
||||||
|
|
||||||
|
llvm_latest_version() {
|
||||||
|
echo "17"
|
||||||
|
}
|
||||||
|
|
||||||
|
LLVM_VERSION=$(llvm_default_version)
|
||||||
|
if [[ "${LLVM_VERSION}" == $(llvm_latest_version) ]]; then
|
||||||
|
REPO_DISTRO_SUFFIX=""
|
||||||
|
else
|
||||||
|
REPO_DISTRO_SUFFIX="-${LLVM_VERSION}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "deb https://apt.llvm.org/focal/ llvm-toolchain-focal${REPO_DISTRO_SUFFIX} main" \
|
||||||
|
| sudo tee /etc/apt/sources.list.d/llvm.list
|
||||||
|
|
||||||
|
PREPARE_SELFTESTS_SCRIPT=${THISDIR}/prepare_selftests-${KERNEL}.sh
|
||||||
|
if [ -f "${PREPARE_SELFTESTS_SCRIPT}" ]; then
|
||||||
|
(cd "${REPO_ROOT}/${REPO_PATH}/tools/testing/selftests/bpf" && ${PREPARE_SELFTESTS_SCRIPT})
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${KERNEL}" = 'LATEST' ]]; then
|
||||||
|
VMLINUX_H=
|
||||||
|
else
|
||||||
|
VMLINUX_H=${THISDIR}/vmlinux.h
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd ${REPO_ROOT}/${REPO_PATH}
|
||||||
|
make headers
|
||||||
|
make \
|
||||||
|
CLANG=clang-${LLVM_VERSION} \
|
||||||
|
LLC=llc-${LLVM_VERSION} \
|
||||||
|
LLVM_STRIP=llvm-strip-${LLVM_VERSION} \
|
||||||
|
VMLINUX_BTF="${VMLINUX_BTF}" \
|
||||||
|
VMLINUX_H=${VMLINUX_H} \
|
||||||
|
-C "${REPO_ROOT}/${REPO_PATH}/tools/testing/selftests/bpf" \
|
||||||
|
-j $((4*$(nproc))) > /dev/null
|
||||||
|
cd -
|
||||||
|
mkdir ${LIBBPF_PATH}/selftests
|
||||||
|
cp -R "${REPO_ROOT}/${REPO_PATH}/tools/testing/selftests/bpf" \
|
||||||
|
${LIBBPF_PATH}/selftests
|
||||||
|
cd ${LIBBPF_PATH}
|
||||||
|
rm selftests/bpf/.gitignore
|
||||||
|
git add selftests
|
||||||
|
|
||||||
|
foldable end prepare_selftests
|
||||||
38
.github/actions/build-selftests/helpers.sh
vendored
Normal file
38
.github/actions/build-selftests/helpers.sh
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# shellcheck shell=bash
|
||||||
|
|
||||||
|
# $1 - start or end
|
||||||
|
# $2 - fold identifier, no spaces
|
||||||
|
# $3 - fold section description
|
||||||
|
foldable() {
|
||||||
|
local YELLOW='\033[1;33m'
|
||||||
|
local NOCOLOR='\033[0m'
|
||||||
|
if [ $1 = "start" ]; then
|
||||||
|
line="::group::$2"
|
||||||
|
if [ ! -z "${3:-}" ]; then
|
||||||
|
line="$line - ${YELLOW}$3${NOCOLOR}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
line="::endgroup::"
|
||||||
|
fi
|
||||||
|
echo -e "$line"
|
||||||
|
}
|
||||||
|
|
||||||
|
__print() {
|
||||||
|
local TITLE=""
|
||||||
|
if [[ -n $2 ]]; then
|
||||||
|
TITLE=" title=$2"
|
||||||
|
fi
|
||||||
|
echo "::$1${TITLE}::$3"
|
||||||
|
}
|
||||||
|
|
||||||
|
# $1 - title
|
||||||
|
# $2 - message
|
||||||
|
print_error() {
|
||||||
|
__print error $1 $2
|
||||||
|
}
|
||||||
|
|
||||||
|
# $1 - title
|
||||||
|
# $2 - message
|
||||||
|
print_notice() {
|
||||||
|
__print notice $1 $2
|
||||||
|
}
|
||||||
3
.github/actions/build-selftests/prepare_selftests-4.9.0.sh
vendored
Executable file
3
.github/actions/build-selftests/prepare_selftests-4.9.0.sh
vendored
Executable file
@@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
printf "all:\n\ttouch bpf_testmod.ko\n\nclean:\n" > bpf_testmod/Makefile
|
||||||
3
.github/actions/build-selftests/prepare_selftests-5.5.0.sh
vendored
Executable file
3
.github/actions/build-selftests/prepare_selftests-5.5.0.sh
vendored
Executable file
@@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
printf "all:\n\ttouch bpf_testmod.ko\n\nclean:\n" > bpf_testmod/Makefile
|
||||||
197537
.github/actions/build-selftests/vmlinux.h
vendored
197537
.github/actions/build-selftests/vmlinux.h
vendored
File diff suppressed because it is too large
Load Diff
99
.github/actions/vmtest/action.yml
vendored
Normal file
99
.github/actions/vmtest/action.yml
vendored
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
name: 'vmtest'
|
||||||
|
description: 'Build + run vmtest'
|
||||||
|
inputs:
|
||||||
|
kernel:
|
||||||
|
description: 'kernel version or LATEST'
|
||||||
|
required: true
|
||||||
|
default: 'LATEST'
|
||||||
|
arch:
|
||||||
|
description: 'what arch to test'
|
||||||
|
required: true
|
||||||
|
default: 'x86_64'
|
||||||
|
pahole:
|
||||||
|
description: 'pahole rev or master'
|
||||||
|
required: true
|
||||||
|
default: 'master'
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
# setup environment
|
||||||
|
- name: Setup environment
|
||||||
|
uses: libbpf/ci/setup-build-env@main
|
||||||
|
with:
|
||||||
|
pahole: ${{ inputs.pahole }}
|
||||||
|
# 1. download CHECKPOINT kernel source
|
||||||
|
- name: Get checkpoint commit
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cat CHECKPOINT-COMMIT
|
||||||
|
echo "CHECKPOINT=$(cat CHECKPOINT-COMMIT)" >> $GITHUB_ENV
|
||||||
|
- name: Get kernel source at checkpoint
|
||||||
|
uses: libbpf/ci/get-linux-source@main
|
||||||
|
with:
|
||||||
|
repo: 'https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git'
|
||||||
|
rev: ${{ env.CHECKPOINT }}
|
||||||
|
dest: '${{ github.workspace }}/.kernel'
|
||||||
|
- name: Patch kernel source
|
||||||
|
uses: libbpf/ci/patch-kernel@main
|
||||||
|
with:
|
||||||
|
patches-root: '${{ github.workspace }}/ci/diffs'
|
||||||
|
repo-root: '.kernel'
|
||||||
|
- name: Prepare to build BPF selftests
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
source $GITHUB_ACTION_PATH/../../../ci/vmtest/helpers.sh
|
||||||
|
foldable start "Prepare building selftest"
|
||||||
|
cd .kernel
|
||||||
|
cat tools/testing/selftests/bpf/config \
|
||||||
|
tools/testing/selftests/bpf/config.${{ inputs.arch }} > .config
|
||||||
|
make olddefconfig && make prepare
|
||||||
|
cd -
|
||||||
|
foldable end
|
||||||
|
# 2. if kernel == LATEST, build kernel image from tree
|
||||||
|
- name: Build kernel image
|
||||||
|
if: ${{ inputs.kernel == 'LATEST' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
source $GITHUB_ACTION_PATH/../../../ci/vmtest/helpers.sh
|
||||||
|
foldable start "Build Kernel Image"
|
||||||
|
cd .kernel
|
||||||
|
make -j $((4*$(nproc))) all > /dev/null
|
||||||
|
cp vmlinux ${{ github.workspace }}
|
||||||
|
cd -
|
||||||
|
foldable end
|
||||||
|
# else, just download prebuilt kernel image
|
||||||
|
- name: Download prebuilt kernel
|
||||||
|
if: ${{ inputs.kernel != 'LATEST' }}
|
||||||
|
uses: libbpf/ci/download-vmlinux@main
|
||||||
|
with:
|
||||||
|
kernel: ${{ inputs.kernel }}
|
||||||
|
arch: ${{ inputs.arch }}
|
||||||
|
# 3. build selftests
|
||||||
|
- name: Build BPF selftests
|
||||||
|
uses: ./.github/actions/build-selftests
|
||||||
|
with:
|
||||||
|
repo-path: '.kernel'
|
||||||
|
kernel: ${{ inputs.kernel }}
|
||||||
|
# 4. prepare rootfs
|
||||||
|
- name: prepare rootfs
|
||||||
|
uses: libbpf/ci/prepare-rootfs@main
|
||||||
|
env:
|
||||||
|
KBUILD_OUTPUT: '.kernel'
|
||||||
|
with:
|
||||||
|
project-name: 'libbpf'
|
||||||
|
arch: ${{ inputs.arch }}
|
||||||
|
kernel: ${{ inputs.kernel }}
|
||||||
|
kernel-root: '.kernel'
|
||||||
|
kbuild-output: ${{ env.KBUILD_OUTPUT }}
|
||||||
|
image-output: '/tmp/root.img'
|
||||||
|
# 5. run selftest in QEMU
|
||||||
|
- name: Run selftests
|
||||||
|
env:
|
||||||
|
KERNEL: ${{ inputs.kernel }}
|
||||||
|
REPO_ROOT: ${{ github.workspace }}
|
||||||
|
uses: libbpf/ci/run-qemu@main
|
||||||
|
with:
|
||||||
|
arch: ${{ inputs.arch }}
|
||||||
|
img: '/tmp/root.img'
|
||||||
|
vmlinuz: 'vmlinuz'
|
||||||
|
kernel-root: '.kernel'
|
||||||
49
.github/workflows/build.yml
vendored
49
.github/workflows/build.yml
vendored
@@ -42,7 +42,7 @@ jobs:
|
|||||||
- name: gcc-12
|
- name: gcc-12
|
||||||
target: RUN_GCC12
|
target: RUN_GCC12
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
name: Checkout
|
name: Checkout
|
||||||
- uses: ./.github/actions/setup
|
- uses: ./.github/actions/setup
|
||||||
name: Setup
|
name: Setup
|
||||||
@@ -53,7 +53,7 @@ jobs:
|
|||||||
|
|
||||||
ubuntu:
|
ubuntu:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Ubuntu Build (${{ matrix.arch }})
|
name: Ubuntu Focal Build (${{ matrix.arch }})
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -61,32 +61,31 @@ jobs:
|
|||||||
- arch: aarch64
|
- arch: aarch64
|
||||||
- arch: ppc64le
|
- arch: ppc64le
|
||||||
- arch: s390x
|
- arch: s390x
|
||||||
- arch: amd64
|
- arch: x86
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
name: Checkout
|
name: Checkout
|
||||||
|
|
||||||
- name: Setup QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
with:
|
|
||||||
image: tonistiigi/binfmt:qemu-v8.1.5
|
|
||||||
|
|
||||||
- uses: ./.github/actions/setup
|
- uses: ./.github/actions/setup
|
||||||
name: Pre-Setup
|
name: Pre-Setup
|
||||||
|
|
||||||
- run: source /tmp/ci_setup && sudo -E $CI_ROOT/managers/ubuntu.sh
|
- run: source /tmp/ci_setup && sudo -E $CI_ROOT/managers/ubuntu.sh
|
||||||
if: matrix.arch == 'amd64'
|
if: matrix.arch == 'x86'
|
||||||
name: Setup
|
name: Setup
|
||||||
|
- uses: uraimo/run-on-arch-action@v2.0.5
|
||||||
- name: Build in docker
|
name: Build in docker
|
||||||
if: matrix.arch != 'amd64'
|
if: matrix.arch != 'x86'
|
||||||
run: |
|
with:
|
||||||
cp /tmp/ci_setup ${GITHUB_WORKSPACE}
|
distro:
|
||||||
docker run --rm \
|
ubuntu20.04
|
||||||
--platform linux/${{ matrix.arch }} \
|
arch:
|
||||||
-v ${GITHUB_WORKSPACE}:${GITHUB_WORKSPACE} \
|
${{ matrix.arch }}
|
||||||
-e GITHUB_WORKSPACE=${GITHUB_WORKSPACE} \
|
setup:
|
||||||
-w /ci/workspace \
|
cp /tmp/ci_setup $GITHUB_WORKSPACE
|
||||||
ubuntu:noble \
|
dockerRunArgs: |
|
||||||
${GITHUB_WORKSPACE}/ci/build-in-docker.sh
|
--volume "${GITHUB_WORKSPACE}:${GITHUB_WORKSPACE}"
|
||||||
|
shell: /bin/bash
|
||||||
|
install: |
|
||||||
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
|
export TZ="America/Los_Angeles"
|
||||||
|
apt-get update -y
|
||||||
|
apt-get install -y tzdata build-essential sudo
|
||||||
|
run: source ${GITHUB_WORKSPACE}/ci_setup && $CI_ROOT/managers/ubuntu.sh
|
||||||
|
|||||||
2
.github/workflows/cifuzz.yml
vendored
2
.github/workflows/cifuzz.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
|||||||
dry-run: false
|
dry-run: false
|
||||||
sanitizer: ${{ matrix.sanitizer }}
|
sanitizer: ${{ matrix.sanitizer }}
|
||||||
- name: Upload Crash
|
- name: Upload Crash
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v1
|
||||||
if: failure() && steps.build.outcome == 'success'
|
if: failure() && steps.build.outcome == 'success'
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.sanitizer }}-artifacts
|
name: ${{ matrix.sanitizer }}-artifacts
|
||||||
|
|||||||
4
.github/workflows/codeql.yml
vendored
4
.github/workflows/codeql.yml
vendored
@@ -17,7 +17,7 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
analyze:
|
analyze:
|
||||||
name: Analyze
|
name: Analyze
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ matrix.language }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ matrix.language }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -32,7 +32,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v2
|
uses: github/codeql-action/init@v2
|
||||||
|
|||||||
14
.github/workflows/coverity.yml
vendored
14
.github/workflows/coverity.yml
vendored
@@ -1,30 +1,30 @@
|
|||||||
name: libbpf-ci-coverity
|
name: libbpf-ci-coverity
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 18 * * *'
|
- cron: '0 18 * * *'
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
coverity:
|
coverity:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository == 'libbpf/libbpf'
|
||||||
name: Coverity
|
name: Coverity
|
||||||
env:
|
|
||||||
COVERITY_SCAN_TOKEN: ${{ secrets.COVERITY_SCAN_TOKEN }}
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
- uses: ./.github/actions/setup
|
- uses: ./.github/actions/setup
|
||||||
- name: Run coverity
|
- name: Run coverity
|
||||||
if: ${{ env.COVERITY_SCAN_TOKEN }}
|
|
||||||
run: |
|
run: |
|
||||||
|
source "${GITHUB_WORKSPACE}"/ci/vmtest/helpers.sh
|
||||||
|
foldable start "Setup CI env"
|
||||||
source /tmp/ci_setup
|
source /tmp/ci_setup
|
||||||
export COVERITY_SCAN_NOTIFICATION_EMAIL="${AUTHOR_EMAIL}"
|
export COVERITY_SCAN_NOTIFICATION_EMAIL="${AUTHOR_EMAIL}"
|
||||||
export COVERITY_SCAN_BRANCH_PATTERN=${GITHUB_REF##refs/*/}
|
export COVERITY_SCAN_BRANCH_PATTERN=${GITHUB_REF##refs/*/}
|
||||||
export TRAVIS_BRANCH=${COVERITY_SCAN_BRANCH_PATTERN}
|
export TRAVIS_BRANCH=${COVERITY_SCAN_BRANCH_PATTERN}
|
||||||
|
foldable end
|
||||||
scripts/coverity.sh
|
scripts/coverity.sh
|
||||||
env:
|
env:
|
||||||
|
COVERITY_SCAN_TOKEN: ${{ secrets.COVERITY_SCAN_TOKEN }}
|
||||||
COVERITY_SCAN_PROJECT_NAME: libbpf
|
COVERITY_SCAN_PROJECT_NAME: libbpf
|
||||||
COVERITY_SCAN_BUILD_COMMAND_PREPEND: 'cd src/'
|
COVERITY_SCAN_BUILD_COMMAND_PREPEND: 'cd src/'
|
||||||
COVERITY_SCAN_BUILD_COMMAND: 'make'
|
COVERITY_SCAN_BUILD_COMMAND: 'make'
|
||||||
|
|||||||
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Run ShellCheck
|
- name: Run ShellCheck
|
||||||
uses: ludeeus/action-shellcheck@master
|
uses: ludeeus/action-shellcheck@master
|
||||||
env:
|
env:
|
||||||
|
|||||||
43
.github/workflows/ondemand.yml
vendored
43
.github/workflows/ondemand.yml
vendored
@@ -3,29 +3,34 @@ name: ondemand
|
|||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
arch:
|
kernel-origin:
|
||||||
default: 'x86_64'
|
description: 'git repo for linux kernel'
|
||||||
|
default: 'https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git'
|
||||||
required: true
|
required: true
|
||||||
llvm-version:
|
kernel-rev:
|
||||||
default: '18'
|
description: 'rev/tag/branch for linux kernel'
|
||||||
required: true
|
|
||||||
kernel:
|
|
||||||
default: 'LATEST'
|
|
||||||
required: true
|
|
||||||
pahole:
|
|
||||||
default: "master"
|
default: "master"
|
||||||
required: true
|
required: true
|
||||||
runs-on:
|
pahole-origin:
|
||||||
default: 'ubuntu-24.04'
|
description: 'git repo for pahole'
|
||||||
|
default: 'https://git.kernel.org/pub/scm/devel/pahole/pahole.git'
|
||||||
|
required: true
|
||||||
|
pahole-rev:
|
||||||
|
description: 'ref/tag/branch for pahole'
|
||||||
|
default: "master"
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
vmtest:
|
vmtest:
|
||||||
name: ${{ inputs.kernel }} kernel llvm-${{ inputs.llvm-version }} pahole@${{ inputs.pahole }}
|
runs-on: ubuntu-latest
|
||||||
uses: ./.github/workflows/vmtest.yml
|
name: vmtest with customized pahole/Kernel
|
||||||
with:
|
steps:
|
||||||
runs_on: ${{ inputs.runs-on }}
|
- uses: actions/checkout@v3
|
||||||
kernel: ${{ inputs.kernel }}
|
- uses: ./.github/actions/setup
|
||||||
arch: ${{ inputs.arch }}
|
- uses: ./.github/actions/vmtest
|
||||||
llvm-version: ${{ inputs.llvm-version }}
|
with:
|
||||||
pahole: ${{ inputs.pahole }}
|
kernel: 'LATEST'
|
||||||
|
kernel-rev: ${{ github.event.inputs.kernel-rev }}
|
||||||
|
kernel-origin: ${{ github.event.inputs.kernel-origin }}
|
||||||
|
pahole: ${{ github.event.inputs.pahole-rev }}
|
||||||
|
pahole-origin: ${{ github.event.inputs.pahole-origin }}
|
||||||
|
|||||||
20
.github/workflows/pahole.yml
vendored
Normal file
20
.github/workflows/pahole.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
name: pahole-staging
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 18 * * *'
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
vmtest:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
name: Kernel LATEST + staging pahole
|
||||||
|
env:
|
||||||
|
STAGING: tmp.master
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: ./.github/actions/setup
|
||||||
|
- uses: ./.github/actions/vmtest
|
||||||
|
with:
|
||||||
|
kernel: LATEST
|
||||||
|
pahole: $STAGING
|
||||||
40
.github/workflows/test.yml
vendored
40
.github/workflows/test.yml
vendored
@@ -1,36 +1,42 @@
|
|||||||
name: libbpf-ci
|
name: libbpf-ci
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
push:
|
push:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 18 * * *'
|
- cron: '0 18 * * *'
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ci-test-${{ github.head_ref }}
|
group: ci-test-${{ github.head_ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
vmtest:
|
vmtest:
|
||||||
|
runs-on: ${{ matrix.runs_on }}
|
||||||
|
name: Kernel ${{ matrix.kernel }} on ${{ matrix.runs_on }} + selftests
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- kernel: 'LATEST'
|
- kernel: 'LATEST'
|
||||||
runs_on: 'ubuntu-24.04'
|
runs_on: ubuntu-20.04
|
||||||
|
arch: 'x86_64'
|
||||||
|
- kernel: '5.5.0'
|
||||||
|
runs_on: ubuntu-20.04
|
||||||
|
arch: 'x86_64'
|
||||||
|
- kernel: '4.9.0'
|
||||||
|
runs_on: ubuntu-20.04
|
||||||
arch: 'x86_64'
|
arch: 'x86_64'
|
||||||
llvm-version: '18'
|
|
||||||
pahole: 'master'
|
|
||||||
- kernel: 'LATEST'
|
- kernel: 'LATEST'
|
||||||
runs_on: 'ubuntu-24.04'
|
runs_on: s390x
|
||||||
arch: 'x86_64'
|
arch: 's390x'
|
||||||
llvm-version: '18'
|
steps:
|
||||||
pahole: 'tmp.master'
|
- uses: actions/checkout@v3
|
||||||
name: Linux ${{ matrix.kernel }} llvm-${{ matrix.llvm-version }}
|
name: Checkout
|
||||||
uses: ./.github/workflows/vmtest.yml
|
- uses: ./.github/actions/setup
|
||||||
with:
|
name: Setup
|
||||||
runs_on: ${{ matrix.runs_on }}
|
- uses: ./.github/actions/vmtest
|
||||||
kernel: ${{ matrix.kernel }}
|
name: vmtest
|
||||||
arch: ${{ matrix.arch }}
|
with:
|
||||||
llvm-version: ${{ matrix.llvm-version }}
|
kernel: ${{ matrix.kernel }}
|
||||||
pahole: ${{ matrix.pahole }}
|
arch: ${{ matrix.arch }}
|
||||||
|
|||||||
117
.github/workflows/vmtest.yml
vendored
117
.github/workflows/vmtest.yml
vendored
@@ -1,117 +0,0 @@
|
|||||||
name: 'Build kernel and selftests/bpf, run selftests via vmtest'
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
runs_on:
|
|
||||||
required: true
|
|
||||||
default: 'ubuntu-24.04'
|
|
||||||
type: string
|
|
||||||
arch:
|
|
||||||
description: 'what arch to test'
|
|
||||||
required: true
|
|
||||||
default: 'x86_64'
|
|
||||||
type: string
|
|
||||||
kernel:
|
|
||||||
description: 'kernel version or LATEST'
|
|
||||||
required: true
|
|
||||||
default: 'LATEST'
|
|
||||||
type: string
|
|
||||||
pahole:
|
|
||||||
description: 'pahole rev or branch'
|
|
||||||
required: false
|
|
||||||
default: 'master'
|
|
||||||
type: string
|
|
||||||
llvm-version:
|
|
||||||
description: 'llvm version'
|
|
||||||
required: false
|
|
||||||
default: '18'
|
|
||||||
type: string
|
|
||||||
jobs:
|
|
||||||
vmtest:
|
|
||||||
name: pahole@${{ inputs.pahole }}
|
|
||||||
runs-on: ${{ inputs.runs_on }}
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Setup environment
|
|
||||||
uses: libbpf/ci/setup-build-env@v3
|
|
||||||
with:
|
|
||||||
pahole: ${{ inputs.pahole }}
|
|
||||||
arch: ${{ inputs.arch }}
|
|
||||||
llvm-version: ${{ inputs.llvm-version }}
|
|
||||||
|
|
||||||
- name: Get checkpoint commit
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cat CHECKPOINT-COMMIT
|
|
||||||
echo "CHECKPOINT=$(cat CHECKPOINT-COMMIT)" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Get kernel source at checkpoint
|
|
||||||
uses: libbpf/ci/get-linux-source@v3
|
|
||||||
with:
|
|
||||||
repo: 'https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git'
|
|
||||||
rev: ${{ env.CHECKPOINT }}
|
|
||||||
dest: '${{ github.workspace }}/.kernel'
|
|
||||||
|
|
||||||
- name: Patch kernel source
|
|
||||||
uses: libbpf/ci/patch-kernel@v3
|
|
||||||
with:
|
|
||||||
patches-root: '${{ github.workspace }}/ci/diffs'
|
|
||||||
repo-root: '.kernel'
|
|
||||||
|
|
||||||
- name: Configure kernel build
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd .kernel
|
|
||||||
cat tools/testing/selftests/bpf/config \
|
|
||||||
tools/testing/selftests/bpf/config.${{ inputs.arch }} > .config
|
|
||||||
# this file might or might not exist depending on kernel version
|
|
||||||
cat tools/testing/selftests/bpf/config.vm >> .config || :
|
|
||||||
make olddefconfig && make prepare
|
|
||||||
cd -
|
|
||||||
|
|
||||||
- name: Build kernel image
|
|
||||||
if: ${{ inputs.kernel == 'LATEST' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd .kernel
|
|
||||||
make -j $((4*$(nproc))) all
|
|
||||||
cp vmlinux ${{ github.workspace }}
|
|
||||||
cd -
|
|
||||||
|
|
||||||
- name: Download prebuilt kernel
|
|
||||||
if: ${{ inputs.kernel != 'LATEST' }}
|
|
||||||
uses: libbpf/ci/download-vmlinux@v3
|
|
||||||
with:
|
|
||||||
kernel: ${{ inputs.kernel }}
|
|
||||||
arch: ${{ inputs.arch }}
|
|
||||||
|
|
||||||
- name: Build selftests/bpf
|
|
||||||
uses: libbpf/ci/build-selftests@v3
|
|
||||||
env:
|
|
||||||
MAX_MAKE_JOBS: 32
|
|
||||||
VMLINUX_BTF: ${{ github.workspace }}/vmlinux
|
|
||||||
VMLINUX_H: ${{ inputs.kernel != 'LATEST' && format('{0}/.github/actions/build-selftests/vmlinux.h', github.workspace) || '' }}
|
|
||||||
with:
|
|
||||||
arch: ${{ inputs.arch }}
|
|
||||||
kernel-root: ${{ github.workspace }}/.kernel
|
|
||||||
llvm-version: ${{ inputs.llvm-version }}
|
|
||||||
|
|
||||||
- name: Run selftests
|
|
||||||
env:
|
|
||||||
ALLOWLIST_FILE: /tmp/allowlist
|
|
||||||
DENYLIST_FILE: /tmp/denylist
|
|
||||||
KERNEL: ${{ inputs.kernel }}
|
|
||||||
VMLINUX: ${{ github.workspace }}/vmlinux
|
|
||||||
LLVM_VERSION: ${{ inputs.llvm-version }}
|
|
||||||
SELFTESTS_BPF: ${{ github.workspace }}/.kernel/tools/testing/selftests/bpf
|
|
||||||
VMTEST_CONFIGS: ${{ github.workspace }}/ci/vmtest/configs
|
|
||||||
uses: libbpf/ci/run-vmtest@v3
|
|
||||||
with:
|
|
||||||
arch: ${{ inputs.arch }}
|
|
||||||
kbuild-output: ${{ github.workspace }}/.kernel
|
|
||||||
kernel-root: ${{ github.workspace }}/.kernel
|
|
||||||
vmlinuz: ${{ inputs.arch }}/vmlinuz-${{ inputs.kernel }}
|
|
||||||
|
|
||||||
22
.mailmap
22
.mailmap
@@ -1,22 +0,0 @@
|
|||||||
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
|
|
||||||
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
|
|
||||||
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@redhat.com>
|
|
||||||
Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
|
|
||||||
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
|
|
||||||
Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
|
|
||||||
Dan Carpenter <error27@gmail.com> <dan.carpenter@oracle.com>
|
|
||||||
Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
|
|
||||||
Herbert Xu <herbert@gondor.apana.org.au>
|
|
||||||
Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
|
|
||||||
Jesper Dangaard Brouer <hawk@kernel.org> <brouer@redhat.com>
|
|
||||||
Kees Cook <kees@kernel.org> <keescook@chromium.org>
|
|
||||||
Leo Yan <leo.yan@linux.dev> <leo.yan@linaro.org>
|
|
||||||
Mark Starovoytov <mstarovo@pm.me> <mstarovoitov@marvell.com>
|
|
||||||
Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@mellanox.com>
|
|
||||||
Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@nvidia.com>
|
|
||||||
Puranjay Mohan <puranjay@kernel.org> <puranjay12@gmail.com>
|
|
||||||
Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
|
|
||||||
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
|
|
||||||
Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
|
|
||||||
Vadim Fedorenko <vadim.fedorenko@linux.dev> <vadfed@meta.com>
|
|
||||||
Vadim Fedorenko <vadim.fedorenko@linux.dev> <vfedorenko@novek.ru>
|
|
||||||
@@ -5,11 +5,6 @@
|
|||||||
# Required
|
# Required
|
||||||
version: 2
|
version: 2
|
||||||
|
|
||||||
build:
|
|
||||||
os: "ubuntu-22.04"
|
|
||||||
tools:
|
|
||||||
python: "3.11"
|
|
||||||
|
|
||||||
# Build documentation in the docs/ directory with Sphinx
|
# Build documentation in the docs/ directory with Sphinx
|
||||||
sphinx:
|
sphinx:
|
||||||
builder: html
|
builder: html
|
||||||
@@ -22,5 +17,6 @@ formats:
|
|||||||
|
|
||||||
# Optionally set the version of Python and requirements required to build your docs
|
# Optionally set the version of Python and requirements required to build your docs
|
||||||
python:
|
python:
|
||||||
|
version: 3.7
|
||||||
install:
|
install:
|
||||||
- requirements: docs/sphinx/requirements.txt
|
- requirements: docs/sphinx/requirements.txt
|
||||||
@@ -1 +1 @@
|
|||||||
b4432656b36e5cc1d50a1f2dc15357543add530e
|
496720b7cfb6574a8f6f4d434f23e3d1e6cfaeb9
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
9325d53fe9adff354b6a93fda5f38c165947da0f
|
a3e7e6b17946f48badce98d7ac360678a0ea7393
|
||||||
|
|||||||
@@ -146,7 +146,7 @@ Distributions packaging libbpf from this mirror:
|
|||||||
- [Gentoo](https://packages.gentoo.org/packages/dev-libs/libbpf)
|
- [Gentoo](https://packages.gentoo.org/packages/dev-libs/libbpf)
|
||||||
- [Debian](https://packages.debian.org/source/sid/libbpf)
|
- [Debian](https://packages.debian.org/source/sid/libbpf)
|
||||||
- [Arch](https://archlinux.org/packages/core/x86_64/libbpf/)
|
- [Arch](https://archlinux.org/packages/core/x86_64/libbpf/)
|
||||||
- [Ubuntu](https://packages.ubuntu.com/source/jammy/libbpf)
|
- [Ubuntu](https://packages.ubuntu.com/source/impish/libbpf)
|
||||||
- [Alpine](https://pkgs.alpinelinux.org/packages?name=libbpf)
|
- [Alpine](https://pkgs.alpinelinux.org/packages?name=libbpf)
|
||||||
|
|
||||||
Benefits of packaging from the mirror over packaging from kernel sources:
|
Benefits of packaging from the mirror over packaging from kernel sources:
|
||||||
|
|||||||
@@ -1,14 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
export DEBIAN_FRONTEND=noninteractive
|
|
||||||
export TZ="America/Los_Angeles"
|
|
||||||
|
|
||||||
apt-get update -y
|
|
||||||
apt-get install -y tzdata build-essential sudo
|
|
||||||
source ${GITHUB_WORKSPACE}/ci_setup
|
|
||||||
|
|
||||||
$CI_ROOT/managers/ubuntu.sh
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
@@ -0,0 +1,70 @@
|
|||||||
|
From 6fba14e2ed9d159f76b23fa5c16f3ea99acbc003 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Masahiro Yamada <masahiroy@kernel.org>
|
||||||
|
Date: Thu, 5 Jan 2023 12:13:06 +0900
|
||||||
|
Subject: [PATCH] s390: define RUNTIME_DISCARD_EXIT to fix link error with GNU
|
||||||
|
ld < 2.36
|
||||||
|
|
||||||
|
Nathan Chancellor reports that the s390 vmlinux fails to link with
|
||||||
|
GNU ld < 2.36 since commit 99cb0d917ffa ("arch: fix broken BuildID
|
||||||
|
for arm64 and riscv").
|
||||||
|
|
||||||
|
It happens for defconfig, or more specifically for CONFIG_EXPOLINE=y.
|
||||||
|
|
||||||
|
$ s390x-linux-gnu-ld --version | head -n1
|
||||||
|
GNU ld (GNU Binutils for Debian) 2.35.2
|
||||||
|
$ make -s ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- allnoconfig
|
||||||
|
$ ./scripts/config -e CONFIG_EXPOLINE
|
||||||
|
$ make -s ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- olddefconfig
|
||||||
|
$ make -s ARCH=s390 CROSS_COMPILE=s390x-linux-gnu-
|
||||||
|
`.exit.text' referenced in section `.s390_return_reg' of drivers/base/dd.o: defined in discarded section `.exit.text' of drivers/base/dd.o
|
||||||
|
make[1]: *** [scripts/Makefile.vmlinux:34: vmlinux] Error 1
|
||||||
|
make: *** [Makefile:1252: vmlinux] Error 2
|
||||||
|
|
||||||
|
arch/s390/kernel/vmlinux.lds.S wants to keep EXIT_TEXT:
|
||||||
|
|
||||||
|
.exit.text : {
|
||||||
|
EXIT_TEXT
|
||||||
|
}
|
||||||
|
|
||||||
|
But, at the same time, EXIT_TEXT is thrown away by DISCARD because
|
||||||
|
s390 does not define RUNTIME_DISCARD_EXIT.
|
||||||
|
|
||||||
|
I still do not understand why the latter wins after 99cb0d917ffa,
|
||||||
|
but defining RUNTIME_DISCARD_EXIT seems correct because the comment
|
||||||
|
line in arch/s390/kernel/vmlinux.lds.S says:
|
||||||
|
|
||||||
|
/*
|
||||||
|
* .exit.text is discarded at runtime, not link time,
|
||||||
|
* to deal with references from __bug_table
|
||||||
|
*/
|
||||||
|
|
||||||
|
Nathan also found that binutils commit 21401fc7bf67 ("Duplicate output
|
||||||
|
sections in scripts") cured this issue, so we cannot reproduce it with
|
||||||
|
binutils 2.36+, but it is better to not rely on it.
|
||||||
|
|
||||||
|
Fixes: 99cb0d917ffa ("arch: fix broken BuildID for arm64 and riscv")
|
||||||
|
Link: https://lore.kernel.org/all/Y7Jal56f6UBh1abE@dev-arch.thelio-3990X/
|
||||||
|
Reported-by: Nathan Chancellor <nathan@kernel.org>
|
||||||
|
Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
|
||||||
|
Link: https://lore.kernel.org/r/20230105031306.1455409-1-masahiroy@kernel.org
|
||||||
|
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
|
||||||
|
---
|
||||||
|
arch/s390/kernel/vmlinux.lds.S | 2 ++
|
||||||
|
1 file changed, 2 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
|
||||||
|
index 5ea3830af0cc..6e101e6f499d 100644
|
||||||
|
--- a/arch/s390/kernel/vmlinux.lds.S
|
||||||
|
+++ b/arch/s390/kernel/vmlinux.lds.S
|
||||||
|
@@ -17,6 +17,8 @@
|
||||||
|
/* Handle ro_after_init data on our own. */
|
||||||
|
#define RO_AFTER_INIT_DATA
|
||||||
|
|
||||||
|
+#define RUNTIME_DISCARD_EXIT
|
||||||
|
+
|
||||||
|
#define EMITS_PT_NOTE
|
||||||
|
|
||||||
|
#include <asm-generic/vmlinux.lds.h>
|
||||||
|
--
|
||||||
|
2.30.2
|
||||||
|
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
From ff8be5401b359e23ec2b74184034082564bac7c5 Mon Sep 17 00:00:00 2001
|
||||||
|
From: =?UTF-8?q?Daniel=20M=C3=BCller?= <deso@posteo.net>
|
||||||
|
Date: Thu, 25 May 2023 16:04:20 -0700
|
||||||
|
Subject: [PATCH] selftests/bpf: Check whether to run selftest
|
||||||
|
MIME-Version: 1.0
|
||||||
|
Content-Type: text/plain; charset=UTF-8
|
||||||
|
Content-Transfer-Encoding: 8bit
|
||||||
|
|
||||||
|
The sockopt test invokes test__start_subtest and then unconditionally
|
||||||
|
asserts the success. That means that even if deny-listed, any test will
|
||||||
|
still run and potentially fail.
|
||||||
|
Evaluate the return value of test__start_subtest() to achieve the
|
||||||
|
desired behavior, as other tests do.
|
||||||
|
|
||||||
|
Signed-off-by: Daniel Müller <deso@posteo.net>
|
||||||
|
---
|
||||||
|
tools/testing/selftests/bpf/prog_tests/sockopt.c | 4 +++-
|
||||||
|
1 file changed, 3 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c
|
||||||
|
index 33dd45..9e6a5e 100644
|
||||||
|
--- a/tools/testing/selftests/bpf/prog_tests/sockopt.c
|
||||||
|
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c
|
||||||
|
@@ -1060,7 +1060,9 @@ void test_sockopt(void)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||||
|
- test__start_subtest(tests[i].descr);
|
||||||
|
+ if (!test__start_subtest(tests[i].descr))
|
||||||
|
+ continue;
|
||||||
|
+
|
||||||
|
ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr);
|
||||||
|
}
|
||||||
|
|
||||||
|
--
|
||||||
|
2.34.1
|
||||||
|
|
||||||
@@ -0,0 +1,46 @@
|
|||||||
|
From a8dfde09c90109e3a98af54847e91bde7dc2d5c2 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Song Liu <song@kernel.org>
|
||||||
|
Date: Tue, 13 Dec 2022 14:05:00 -0800
|
||||||
|
Subject: [PATCH] selftests/bpf: Select CONFIG_FUNCTION_ERROR_INJECTION
|
||||||
|
MIME-Version: 1.0
|
||||||
|
Content-Type: text/plain; charset=UTF-8
|
||||||
|
Content-Transfer-Encoding: 8bit
|
||||||
|
|
||||||
|
BPF selftests require CONFIG_FUNCTION_ERROR_INJECTION to work. However,
|
||||||
|
CONFIG_FUNCTION_ERROR_INJECTION is no longer 'y' by default after recent
|
||||||
|
changes. As a result, we are seeing errors like the following from BPF CI:
|
||||||
|
|
||||||
|
bpf_testmod_test_read() is not modifiable
|
||||||
|
__x64_sys_setdomainname is not sleepable
|
||||||
|
__x64_sys_getpgid is not sleepable
|
||||||
|
|
||||||
|
Fix this by explicitly selecting CONFIG_FUNCTION_ERROR_INJECTION in the
|
||||||
|
selftest config.
|
||||||
|
|
||||||
|
Fixes: a4412fdd49dc ("error-injection: Add prompt for function error injection")
|
||||||
|
Reported-by: Daniel Müller <deso@posteo.net>
|
||||||
|
Signed-off-by: Song Liu <song@kernel.org>
|
||||||
|
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||||
|
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
|
||||||
|
Acked-by: Daniel Müller <deso@posteo.net>
|
||||||
|
Link: https://lore.kernel.org/bpf/20221213220500.3427947-1-song@kernel.org
|
||||||
|
Signed-off-by: Daniel Müller <deso@posteo.net>
|
||||||
|
---
|
||||||
|
tools/testing/selftests/bpf/config | 1 +
|
||||||
|
1 file changed, 1 insertion(+)
|
||||||
|
|
||||||
|
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
|
||||||
|
index 612f69..63cd4a 100644
|
||||||
|
--- a/tools/testing/selftests/bpf/config
|
||||||
|
+++ b/tools/testing/selftests/bpf/config
|
||||||
|
@@ -16,6 +16,7 @@ CONFIG_CRYPTO_USER_API_HASH=y
|
||||||
|
CONFIG_DYNAMIC_FTRACE=y
|
||||||
|
CONFIG_FPROBE=y
|
||||||
|
CONFIG_FTRACE_SYSCALLS=y
|
||||||
|
+CONFIG_FUNCTION_ERROR_INJECTION=y
|
||||||
|
CONFIG_FUNCTION_TRACER=y
|
||||||
|
CONFIG_GENEVE=y
|
||||||
|
CONFIG_IKCONFIG=y
|
||||||
|
--
|
||||||
|
2.30.2
|
||||||
|
|
||||||
@@ -1,85 +0,0 @@
|
|||||||
From e3a4f5092e847ec00e2b66c060f2cef52b8d0177 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Ihor Solodrai <ihor.solodrai@pm.me>
|
|
||||||
Date: Thu, 14 Nov 2024 12:49:34 -0800
|
|
||||||
Subject: [PATCH bpf-next] selftests/bpf: set test path for
|
|
||||||
token/obj_priv_implicit_token_envvar
|
|
||||||
|
|
||||||
token/obj_priv_implicit_token_envvar test may fail in an environment
|
|
||||||
where the process executing tests can not write to the root path.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
https://github.com/libbpf/libbpf/actions/runs/11844507007/job/33007897936
|
|
||||||
|
|
||||||
Change default path used by the test to /tmp/bpf-token-fs, and make it
|
|
||||||
runtime configurable via an environment variable.
|
|
||||||
|
|
||||||
Signed-off-by: Ihor Solodrai <ihor.solodrai@pm.me>
|
|
||||||
---
|
|
||||||
tools/testing/selftests/bpf/prog_tests/token.c | 18 +++++++++++-------
|
|
||||||
1 file changed, 11 insertions(+), 7 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c
|
|
||||||
index fe86e4fdb89c..39f5414b674b 100644
|
|
||||||
--- a/tools/testing/selftests/bpf/prog_tests/token.c
|
|
||||||
+++ b/tools/testing/selftests/bpf/prog_tests/token.c
|
|
||||||
@@ -828,8 +828,11 @@ static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel)
|
|
||||||
return validate_struct_ops_load(mnt_fd, true /* should succeed */);
|
|
||||||
}
|
|
||||||
|
|
||||||
+static const char* token_bpffs_custom_dir() {
|
|
||||||
+ return getenv("BPF_SELFTESTS_BPF_TOKEN_DIR") ? : "/tmp/bpf-token-fs";
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
#define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH"
|
|
||||||
-#define TOKEN_BPFFS_CUSTOM "/bpf-token-fs"
|
|
||||||
|
|
||||||
static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel)
|
|
||||||
{
|
|
||||||
@@ -892,6 +895,7 @@ static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel
|
|
||||||
|
|
||||||
static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel)
|
|
||||||
{
|
|
||||||
+ const char *custom_dir = token_bpffs_custom_dir();
|
|
||||||
LIBBPF_OPTS(bpf_object_open_opts, opts);
|
|
||||||
struct dummy_st_ops_success *skel;
|
|
||||||
int err;
|
|
||||||
@@ -909,10 +913,10 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l
|
|
||||||
* BPF token implicitly, unless pointed to it through
|
|
||||||
* LIBBPF_BPF_TOKEN_PATH envvar
|
|
||||||
*/
|
|
||||||
- rmdir(TOKEN_BPFFS_CUSTOM);
|
|
||||||
- if (!ASSERT_OK(mkdir(TOKEN_BPFFS_CUSTOM, 0777), "mkdir_bpffs_custom"))
|
|
||||||
+ rmdir(custom_dir);
|
|
||||||
+ if (!ASSERT_OK(mkdir(custom_dir, 0777), "mkdir_bpffs_custom"))
|
|
||||||
goto err_out;
|
|
||||||
- err = sys_move_mount(mnt_fd, "", AT_FDCWD, TOKEN_BPFFS_CUSTOM, MOVE_MOUNT_F_EMPTY_PATH);
|
|
||||||
+ err = sys_move_mount(mnt_fd, "", AT_FDCWD, custom_dir, MOVE_MOUNT_F_EMPTY_PATH);
|
|
||||||
if (!ASSERT_OK(err, "move_mount_bpffs"))
|
|
||||||
goto err_out;
|
|
||||||
|
|
||||||
@@ -925,7 +929,7 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
- err = setenv(TOKEN_ENVVAR, TOKEN_BPFFS_CUSTOM, 1 /*overwrite*/);
|
|
||||||
+ err = setenv(TOKEN_ENVVAR, custom_dir, 1 /*overwrite*/);
|
|
||||||
if (!ASSERT_OK(err, "setenv_token_path"))
|
|
||||||
goto err_out;
|
|
||||||
|
|
||||||
@@ -951,11 +955,11 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l
|
|
||||||
if (!ASSERT_ERR(err, "obj_empty_token_path_load"))
|
|
||||||
goto err_out;
|
|
||||||
|
|
||||||
- rmdir(TOKEN_BPFFS_CUSTOM);
|
|
||||||
+ rmdir(custom_dir);
|
|
||||||
unsetenv(TOKEN_ENVVAR);
|
|
||||||
return 0;
|
|
||||||
err_out:
|
|
||||||
- rmdir(TOKEN_BPFFS_CUSTOM);
|
|
||||||
+ rmdir(custom_dir);
|
|
||||||
unsetenv(TOKEN_ENVVAR);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
--
|
|
||||||
2.47.0
|
|
||||||
|
|
||||||
@@ -0,0 +1,68 @@
|
|||||||
|
From d3484f640bc82cff459beb85a00f7ebab20f0a41 Mon Sep 17 00:00:00 2001
|
||||||
|
From: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
|
||||||
|
Date: Sun, 9 Apr 2023 11:28:31 +0900
|
||||||
|
Subject: [PATCH] tracing: fprobe: Initialize ret valiable to fix smatch error
|
||||||
|
|
||||||
|
The commit 39d954200bf6 ("fprobe: Skip exit_handler if entry_handler returns
|
||||||
|
!0") introduced a hidden dependency of 'ret' local variable in the
|
||||||
|
fprobe_handler(), Smatch warns the `ret` can be accessed without
|
||||||
|
initialization.
|
||||||
|
|
||||||
|
kernel/trace/fprobe.c:59 fprobe_handler()
|
||||||
|
error: uninitialized symbol 'ret'.
|
||||||
|
|
||||||
|
kernel/trace/fprobe.c
|
||||||
|
49 fpr->entry_ip = ip;
|
||||||
|
50 if (fp->entry_data_size)
|
||||||
|
51 entry_data = fpr->data;
|
||||||
|
52 }
|
||||||
|
53
|
||||||
|
54 if (fp->entry_handler)
|
||||||
|
55 ret = fp->entry_handler(fp, ip, ftrace_get_regs(fregs), entry_data);
|
||||||
|
|
||||||
|
ret is only initialized if there is an ->entry_handler
|
||||||
|
|
||||||
|
56
|
||||||
|
57 /* If entry_handler returns !0, nmissed is not counted. */
|
||||||
|
58 if (rh) {
|
||||||
|
|
||||||
|
rh is only true if there is an ->exit_handler. Presumably if you have
|
||||||
|
and ->exit_handler that means you also have a ->entry_handler but Smatch
|
||||||
|
is not smart enough to figure it out.
|
||||||
|
|
||||||
|
--> 59 if (ret)
|
||||||
|
^^^
|
||||||
|
Warning here.
|
||||||
|
|
||||||
|
60 rethook_recycle(rh);
|
||||||
|
61 else
|
||||||
|
62 rethook_hook(rh, ftrace_get_regs(fregs), true);
|
||||||
|
63 }
|
||||||
|
64 out:
|
||||||
|
65 ftrace_test_recursion_unlock(bit);
|
||||||
|
66 }
|
||||||
|
|
||||||
|
Reported-by: Dan Carpenter <error27@gmail.com>
|
||||||
|
Link: https://lore.kernel.org/all/85429a5c-a4b9-499e-b6c0-cbd313291c49@kili.mountain
|
||||||
|
Fixes: 39d954200bf6 ("fprobe: Skip exit_handler if entry_handler returns !0")
|
||||||
|
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
|
||||||
|
---
|
||||||
|
kernel/trace/fprobe.c | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
|
||||||
|
index 9abb3905bc8e..293184227394 100644
|
||||||
|
--- a/kernel/trace/fprobe.c
|
||||||
|
+++ b/kernel/trace/fprobe.c
|
||||||
|
@@ -27,7 +27,7 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
|
||||||
|
struct rethook_node *rh = NULL;
|
||||||
|
struct fprobe *fp;
|
||||||
|
void *entry_data = NULL;
|
||||||
|
- int bit, ret;
|
||||||
|
+ int bit, ret = 0;
|
||||||
|
|
||||||
|
fp = container_of(ops, struct fprobe, ops);
|
||||||
|
if (fprobe_disabled(fp))
|
||||||
|
--
|
||||||
|
2.34.1
|
||||||
|
|
||||||
@@ -0,0 +1,83 @@
|
|||||||
|
From 8267fc71abb2dc47338570e56dd3473a58313fce Mon Sep 17 00:00:00 2001
|
||||||
|
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||||
|
Date: Mon, 17 Apr 2023 23:53:22 +0200
|
||||||
|
Subject: [PATCH] veth: take into account peer device for
|
||||||
|
NETDEV_XDP_ACT_NDO_XMIT xdp_features flag
|
||||||
|
|
||||||
|
For veth pairs, NETDEV_XDP_ACT_NDO_XMIT is supported by the current
|
||||||
|
device if the peer one is running a XDP program or if it has GRO enabled.
|
||||||
|
Fix the xdp_features flags reporting considering peer device and not
|
||||||
|
current one for NETDEV_XDP_ACT_NDO_XMIT.
|
||||||
|
|
||||||
|
Fixes: fccca038f300 ("veth: take into account device reconfiguration for xdp_features flag")
|
||||||
|
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||||
|
Link: https://lore.kernel.org/r/4f1ca6f6f6b42ae125bfdb5c7782217c83968b2e.1681767806.git.lorenzo@kernel.org
|
||||||
|
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
|
||||||
|
---
|
||||||
|
drivers/net/veth.c | 17 +++++++++++------
|
||||||
|
1 file changed, 11 insertions(+), 6 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
|
||||||
|
index e1b38fbf1dd9..4b3c6647edc6 100644
|
||||||
|
--- a/drivers/net/veth.c
|
||||||
|
+++ b/drivers/net/veth.c
|
||||||
|
@@ -1262,11 +1262,12 @@ static void veth_set_xdp_features(struct net_device *dev)
|
||||||
|
|
||||||
|
peer = rtnl_dereference(priv->peer);
|
||||||
|
if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) {
|
||||||
|
+ struct veth_priv *priv_peer = netdev_priv(peer);
|
||||||
|
xdp_features_t val = NETDEV_XDP_ACT_BASIC |
|
||||||
|
NETDEV_XDP_ACT_REDIRECT |
|
||||||
|
NETDEV_XDP_ACT_RX_SG;
|
||||||
|
|
||||||
|
- if (priv->_xdp_prog || veth_gro_requested(dev))
|
||||||
|
+ if (priv_peer->_xdp_prog || veth_gro_requested(peer))
|
||||||
|
val |= NETDEV_XDP_ACT_NDO_XMIT |
|
||||||
|
NETDEV_XDP_ACT_NDO_XMIT_SG;
|
||||||
|
xdp_set_features_flag(dev, val);
|
||||||
|
@@ -1504,19 +1505,23 @@ static int veth_set_features(struct net_device *dev,
|
||||||
|
{
|
||||||
|
netdev_features_t changed = features ^ dev->features;
|
||||||
|
struct veth_priv *priv = netdev_priv(dev);
|
||||||
|
+ struct net_device *peer;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
+ peer = rtnl_dereference(priv->peer);
|
||||||
|
if (features & NETIF_F_GRO) {
|
||||||
|
err = veth_napi_enable(dev);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
- xdp_features_set_redirect_target(dev, true);
|
||||||
|
+ if (peer)
|
||||||
|
+ xdp_features_set_redirect_target(peer, true);
|
||||||
|
} else {
|
||||||
|
- xdp_features_clear_redirect_target(dev);
|
||||||
|
+ if (peer)
|
||||||
|
+ xdp_features_clear_redirect_target(peer);
|
||||||
|
veth_napi_del(dev);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
@@ -1598,13 +1603,13 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
||||||
|
peer->max_mtu = max_mtu;
|
||||||
|
}
|
||||||
|
|
||||||
|
- xdp_features_set_redirect_target(dev, true);
|
||||||
|
+ xdp_features_set_redirect_target(peer, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (old_prog) {
|
||||||
|
if (!prog) {
|
||||||
|
- if (!veth_gro_requested(dev))
|
||||||
|
- xdp_features_clear_redirect_target(dev);
|
||||||
|
+ if (peer && !veth_gro_requested(dev))
|
||||||
|
+ xdp_features_clear_redirect_target(peer);
|
||||||
|
|
||||||
|
if (dev->flags & IFF_UP)
|
||||||
|
veth_disable_xdp(dev);
|
||||||
|
--
|
||||||
|
2.34.1
|
||||||
|
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
From bd06a13f44e15e2e83561ea165061c445a15bd9e Mon Sep 17 00:00:00 2001
|
|
||||||
From: Song Liu <song@kernel.org>
|
|
||||||
Date: Thu, 27 Mar 2025 11:55:28 -0700
|
|
||||||
Subject: [PATCH 4000/4002] selftests/bpf: Fix tests after fields reorder in
|
|
||||||
struct file
|
|
||||||
|
|
||||||
The change in struct file [1] moved f_ref to the 3rd cache line.
|
|
||||||
It made *(u64 *)file dereference invalid from the verifier point of view,
|
|
||||||
because btf_struct_walk() walks into f_lock field, which is 4-byte long.
|
|
||||||
|
|
||||||
Fix the selftests to deference the file pointer as a 4-byte access.
|
|
||||||
|
|
||||||
[1] commit e249056c91a2 ("fs: place f_ref to 3rd cache line in struct file to resolve false sharing")
|
|
||||||
Reported-by: Jakub Kicinski <kuba@kernel.org>
|
|
||||||
Signed-off-by: Song Liu <song@kernel.org>
|
|
||||||
Link: https://lore.kernel.org/r/20250327185528.1740787-1-song@kernel.org
|
|
||||||
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
|
|
||||||
---
|
|
||||||
tools/testing/selftests/bpf/progs/test_module_attach.c | 2 +-
|
|
||||||
tools/testing/selftests/bpf/progs/test_subprogs_extable.c | 6 +++---
|
|
||||||
2 files changed, 4 insertions(+), 4 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c
|
|
||||||
index fb07f5773888..7f3c233943b3 100644
|
|
||||||
--- a/tools/testing/selftests/bpf/progs/test_module_attach.c
|
|
||||||
+++ b/tools/testing/selftests/bpf/progs/test_module_attach.c
|
|
||||||
@@ -117,7 +117,7 @@ int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
|
|
||||||
|
|
||||||
bpf_probe_read_kernel(&buf, 8, ret);
|
|
||||||
bpf_probe_read_kernel(&buf, 8, (char *)ret + 256);
|
|
||||||
- *(volatile long long *)ret;
|
|
||||||
+ *(volatile int *)ret;
|
|
||||||
*(volatile int *)&ret->f_mode;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs_extable.c b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
|
|
||||||
index e2a21fbd4e44..dcac69f5928a 100644
|
|
||||||
--- a/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
|
|
||||||
+++ b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
|
|
||||||
@@ -21,7 +21,7 @@ static __u64 test_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
|
|
||||||
SEC("fexit/bpf_testmod_return_ptr")
|
|
||||||
int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret)
|
|
||||||
{
|
|
||||||
- *(volatile long *)ret;
|
|
||||||
+ *(volatile int *)ret;
|
|
||||||
*(volatile int *)&ret->f_mode;
|
|
||||||
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
|
|
||||||
triggered++;
|
|
||||||
@@ -31,7 +31,7 @@ int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret)
|
|
||||||
SEC("fexit/bpf_testmod_return_ptr")
|
|
||||||
int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret)
|
|
||||||
{
|
|
||||||
- *(volatile long *)ret;
|
|
||||||
+ *(volatile int *)ret;
|
|
||||||
*(volatile int *)&ret->f_mode;
|
|
||||||
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
|
|
||||||
triggered++;
|
|
||||||
@@ -41,7 +41,7 @@ int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret)
|
|
||||||
SEC("fexit/bpf_testmod_return_ptr")
|
|
||||||
int BPF_PROG(handle_fexit_ret_subprogs3, int arg, struct file *ret)
|
|
||||||
{
|
|
||||||
- *(volatile long *)ret;
|
|
||||||
+ *(volatile int *)ret;
|
|
||||||
*(volatile int *)&ret->f_mode;
|
|
||||||
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
|
|
||||||
triggered++;
|
|
||||||
--
|
|
||||||
2.49.0
|
|
||||||
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
From 8be3a12f9f266aaf3f06f0cfe0e90cfe4d956f3d Mon Sep 17 00:00:00 2001
|
|
||||||
From: Song Liu <song@kernel.org>
|
|
||||||
Date: Fri, 28 Mar 2025 12:31:24 -0700
|
|
||||||
Subject: [PATCH 4001/4002] selftests/bpf: Fix verifier_bpf_fastcall test
|
|
||||||
|
|
||||||
Commit [1] moves percpu data on x86 from address 0x000... to address
|
|
||||||
0xfff...
|
|
||||||
|
|
||||||
Before [1]:
|
|
||||||
|
|
||||||
159020: 0000000000030700 0 OBJECT GLOBAL DEFAULT 23 pcpu_hot
|
|
||||||
|
|
||||||
After [1]:
|
|
||||||
|
|
||||||
152602: ffffffff83a3e034 4 OBJECT GLOBAL DEFAULT 35 pcpu_hot
|
|
||||||
|
|
||||||
As a result, verifier_bpf_fastcall tests should now expect a negative
|
|
||||||
value for pcpu_hot, IOW, the disassemble should show "r=" instead of
|
|
||||||
"w=".
|
|
||||||
|
|
||||||
Fix this in the test.
|
|
||||||
|
|
||||||
Note that, a later change created a new variable "cpu_number" for
|
|
||||||
bpf_get_smp_processor_id() [2]. The inlining logic is updated properly
|
|
||||||
as part of this change, so there is no need to fix anything on the
|
|
||||||
kernel side.
|
|
||||||
|
|
||||||
[1] commit 9d7de2aa8b41 ("x86/percpu/64: Use relative percpu offsets")
|
|
||||||
[2] commit 01c7bc5198e9 ("x86/smp: Move cpu number to percpu hot section")
|
|
||||||
Reported-by: Jakub Kicinski <kuba@kernel.org>
|
|
||||||
Signed-off-by: Song Liu <song@kernel.org>
|
|
||||||
Link: https://lore.kernel.org/r/20250328193124.808784-1-song@kernel.org
|
|
||||||
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
|
|
||||||
---
|
|
||||||
tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c | 6 +++---
|
|
||||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
|
|
||||||
index a9be6ae49454..c258b0722e04 100644
|
|
||||||
--- a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
|
|
||||||
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
|
|
||||||
@@ -12,7 +12,7 @@ SEC("raw_tp")
|
|
||||||
__arch_x86_64
|
|
||||||
__log_level(4) __msg("stack depth 8")
|
|
||||||
__xlated("4: r5 = 5")
|
|
||||||
-__xlated("5: w0 = ")
|
|
||||||
+__xlated("5: r0 = ")
|
|
||||||
__xlated("6: r0 = &(void __percpu *)(r0)")
|
|
||||||
__xlated("7: r0 = *(u32 *)(r0 +0)")
|
|
||||||
__xlated("8: exit")
|
|
||||||
@@ -704,7 +704,7 @@ SEC("raw_tp")
|
|
||||||
__arch_x86_64
|
|
||||||
__log_level(4) __msg("stack depth 32+0")
|
|
||||||
__xlated("2: r1 = 1")
|
|
||||||
-__xlated("3: w0 =")
|
|
||||||
+__xlated("3: r0 =")
|
|
||||||
__xlated("4: r0 = &(void __percpu *)(r0)")
|
|
||||||
__xlated("5: r0 = *(u32 *)(r0 +0)")
|
|
||||||
/* bpf_loop params setup */
|
|
||||||
@@ -753,7 +753,7 @@ __arch_x86_64
|
|
||||||
__log_level(4) __msg("stack depth 40+0")
|
|
||||||
/* call bpf_get_smp_processor_id */
|
|
||||||
__xlated("2: r1 = 42")
|
|
||||||
-__xlated("3: w0 =")
|
|
||||||
+__xlated("3: r0 =")
|
|
||||||
__xlated("4: r0 = &(void __percpu *)(r0)")
|
|
||||||
__xlated("5: r0 = *(u32 *)(r0 +0)")
|
|
||||||
/* call bpf_get_prandom_u32 */
|
|
||||||
--
|
|
||||||
2.49.0
|
|
||||||
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
From 07be1f644ff9eeb842fd0490ddd824df0828cb0e Mon Sep 17 00:00:00 2001
|
|
||||||
From: Yonghong Song <yonghong.song@linux.dev>
|
|
||||||
Date: Sun, 30 Mar 2025 20:38:28 -0700
|
|
||||||
Subject: [PATCH 4002/4002] selftests/bpf: Fix verifier_private_stack test
|
|
||||||
failure
|
|
||||||
|
|
||||||
Several verifier_private_stack tests failed with latest bpf-next.
|
|
||||||
For example, for 'Private stack, single prog' subtest, the
|
|
||||||
jitted code:
|
|
||||||
func #0:
|
|
||||||
0: f3 0f 1e fa endbr64
|
|
||||||
4: 0f 1f 44 00 00 nopl (%rax,%rax)
|
|
||||||
9: 0f 1f 00 nopl (%rax)
|
|
||||||
c: 55 pushq %rbp
|
|
||||||
d: 48 89 e5 movq %rsp, %rbp
|
|
||||||
10: f3 0f 1e fa endbr64
|
|
||||||
14: 49 b9 58 74 8a 8f 7d 60 00 00 movabsq $0x607d8f8a7458, %r9
|
|
||||||
1e: 65 4c 03 0c 25 28 c0 48 87 addq %gs:-0x78b73fd8, %r9
|
|
||||||
27: bf 2a 00 00 00 movl $0x2a, %edi
|
|
||||||
2c: 49 89 b9 00 ff ff ff movq %rdi, -0x100(%r9)
|
|
||||||
33: 31 c0 xorl %eax, %eax
|
|
||||||
35: c9 leave
|
|
||||||
36: e9 20 5d 0f e1 jmp 0xffffffffe10f5d5b
|
|
||||||
|
|
||||||
The insn 'addq %gs:-0x78b73fd8, %r9' does not match the expected
|
|
||||||
regex 'addq %gs:0x{{.*}}, %r9' and this caused test failure.
|
|
||||||
|
|
||||||
Fix it by changing '%gs:0x{{.*}}' to '%gs:{{.*}}' to accommodate the
|
|
||||||
possible negative offset. A few other subtests are fixed in a similar way.
|
|
||||||
|
|
||||||
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
|
|
||||||
Link: https://lore.kernel.org/r/20250331033828.365077-1-yonghong.song@linux.dev
|
|
||||||
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
|
|
||||||
---
|
|
||||||
tools/testing/selftests/bpf/progs/verifier_private_stack.c | 6 +++---
|
|
||||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
|
|
||||||
index b1fbdf119553..fc91b414364e 100644
|
|
||||||
--- a/tools/testing/selftests/bpf/progs/verifier_private_stack.c
|
|
||||||
+++ b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
|
|
||||||
@@ -27,7 +27,7 @@ __description("Private stack, single prog")
|
|
||||||
__success
|
|
||||||
__arch_x86_64
|
|
||||||
__jited(" movabsq $0x{{.*}}, %r9")
|
|
||||||
-__jited(" addq %gs:0x{{.*}}, %r9")
|
|
||||||
+__jited(" addq %gs:{{.*}}, %r9")
|
|
||||||
__jited(" movl $0x2a, %edi")
|
|
||||||
__jited(" movq %rdi, -0x100(%r9)")
|
|
||||||
__naked void private_stack_single_prog(void)
|
|
||||||
@@ -74,7 +74,7 @@ __success
|
|
||||||
__arch_x86_64
|
|
||||||
/* private stack fp for the main prog */
|
|
||||||
__jited(" movabsq $0x{{.*}}, %r9")
|
|
||||||
-__jited(" addq %gs:0x{{.*}}, %r9")
|
|
||||||
+__jited(" addq %gs:{{.*}}, %r9")
|
|
||||||
__jited(" movl $0x2a, %edi")
|
|
||||||
__jited(" movq %rdi, -0x200(%r9)")
|
|
||||||
__jited(" pushq %r9")
|
|
||||||
@@ -122,7 +122,7 @@ __jited(" pushq %rbp")
|
|
||||||
__jited(" movq %rsp, %rbp")
|
|
||||||
__jited(" endbr64")
|
|
||||||
__jited(" movabsq $0x{{.*}}, %r9")
|
|
||||||
-__jited(" addq %gs:0x{{.*}}, %r9")
|
|
||||||
+__jited(" addq %gs:{{.*}}, %r9")
|
|
||||||
__jited(" pushq %r9")
|
|
||||||
__jited(" callq")
|
|
||||||
__jited(" popq %r9")
|
|
||||||
--
|
|
||||||
2.49.0
|
|
||||||
|
|
||||||
8
ci/vmtest/configs/ALLOWLIST-4.9.0
Normal file
8
ci/vmtest/configs/ALLOWLIST-4.9.0
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# btf_dump -- need to disable data dump sub-tests
|
||||||
|
core_retro
|
||||||
|
cpu_mask
|
||||||
|
hashmap
|
||||||
|
legacy_printk
|
||||||
|
perf_buffer
|
||||||
|
section_names
|
||||||
|
|
||||||
51
ci/vmtest/configs/ALLOWLIST-5.5.0
Normal file
51
ci/vmtest/configs/ALLOWLIST-5.5.0
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# attach_probe
|
||||||
|
autoload
|
||||||
|
bpf_verif_scale
|
||||||
|
cgroup_attach_autodetach
|
||||||
|
cgroup_attach_override
|
||||||
|
core_autosize
|
||||||
|
core_extern
|
||||||
|
core_read_macros
|
||||||
|
core_reloc
|
||||||
|
core_retro
|
||||||
|
cpu_mask
|
||||||
|
endian
|
||||||
|
get_branch_snapshot
|
||||||
|
get_stackid_cannot_attach
|
||||||
|
global_data
|
||||||
|
global_data_init
|
||||||
|
global_func_args
|
||||||
|
hashmap
|
||||||
|
legacy_printk
|
||||||
|
linked_funcs
|
||||||
|
linked_maps
|
||||||
|
map_lock
|
||||||
|
obj_name
|
||||||
|
perf_buffer
|
||||||
|
perf_event_stackmap
|
||||||
|
pinning
|
||||||
|
pkt_md_access
|
||||||
|
probe_user
|
||||||
|
queue_stack_map
|
||||||
|
raw_tp_writable_reject_nbd_invalid
|
||||||
|
raw_tp_writable_test_run
|
||||||
|
rdonly_maps
|
||||||
|
section_names
|
||||||
|
signal_pending
|
||||||
|
skeleton
|
||||||
|
sockmap_ktls
|
||||||
|
sockopt
|
||||||
|
spinlock
|
||||||
|
stacktrace_map
|
||||||
|
stacktrace_map_raw_tp
|
||||||
|
static_linked
|
||||||
|
task_fd_query_rawtp
|
||||||
|
task_fd_query_tp
|
||||||
|
tc_bpf
|
||||||
|
tcp_estats
|
||||||
|
tcp_rtt
|
||||||
|
tp_attach_query
|
||||||
|
usdt/urand_pid_attach
|
||||||
|
xdp
|
||||||
|
xdp_noinline
|
||||||
|
xdp_perf
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
# TEMPORARY
|
|
||||||
btf_dump/btf_dump: syntax
|
|
||||||
kprobe_multi_bench_attach
|
|
||||||
core_reloc/enum64val
|
|
||||||
core_reloc/size___diff_sz
|
|
||||||
core_reloc/type_based___diff_sz
|
|
||||||
test_ima # All of CI is broken on it following 6.3-rc1 merge
|
|
||||||
|
|
||||||
lwt_reroute # crashes kernel after netnext merge from 2ab1efad60ad "net/sched: cls_api: complement tcf_tfilter_dump_policy"
|
|
||||||
tc_links_ingress # started failing after net-next merge from 2ab1efad60ad "net/sched: cls_api: complement tcf_tfilter_dump_policy"
|
|
||||||
xdp_bonding/xdp_bonding_features # started failing after net merge from 359e54a93ab4 "l2tp: pass correct message length to ip6_append_data"
|
|
||||||
tc_redirect/tc_redirect_dtime # uapi breakage after net-next commit 885c36e59f46 ("net: Re-use and set mono_delivery_time bit for userspace tstamp packets")
|
|
||||||
migrate_reuseport/IPv4 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation
|
|
||||||
migrate_reuseport/IPv6 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation
|
|
||||||
verify_pkcs7_sig # keeps failing
|
|
||||||
120
ci/vmtest/configs/DENYLIST-5.5.0
Normal file
120
ci/vmtest/configs/DENYLIST-5.5.0
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
# This file is not used and is there for historic purposes only.
|
||||||
|
# See ALLOWLIST-5.5.0 instead.
|
||||||
|
|
||||||
|
# PERMANENTLY DISABLED
|
||||||
|
align # verifier output format changed
|
||||||
|
atomics # new atomic operations (v5.12+)
|
||||||
|
atomic_bounds # new atomic operations (v5.12+)
|
||||||
|
bind_perm # changed semantics of return values (v5.12+)
|
||||||
|
bpf_cookie # 5.15+
|
||||||
|
bpf_iter # bpf_iter support is missing
|
||||||
|
bpf_obj_id # bpf_link support missing for GET_OBJ_INFO, GET_FD_BY_ID, etc
|
||||||
|
bpf_tcp_ca # STRUCT_OPS is missing
|
||||||
|
btf_map_in_map # inner map leak fixed in 5.8
|
||||||
|
btf_skc_cls_ingress # v5.10+ functionality
|
||||||
|
cg_storage_multi # v5.9+ functionality
|
||||||
|
cgroup_attach_multi # BPF_F_REPLACE_PROG missing
|
||||||
|
cgroup_link # LINK_CREATE is missing
|
||||||
|
cgroup_skb_sk_lookup # bpf_sk_lookup_tcp() helper is missing
|
||||||
|
check_mtu # missing BPF helper (v5.12+)
|
||||||
|
cls_redirect # bpf_csum_level() helper is missing
|
||||||
|
connect_force_port # cgroup/get{peer,sock}name{4,6} support is missing
|
||||||
|
d_path # v5.10+ feature
|
||||||
|
enable_stats # BPF_ENABLE_STATS support is missing
|
||||||
|
fentry_fexit # bpf_prog_test_tracing missing
|
||||||
|
fentry_test # bpf_prog_test_tracing missing
|
||||||
|
fexit_bpf2bpf # freplace is missing
|
||||||
|
fexit_sleep # relies on bpf_trampoline fix in 5.12+
|
||||||
|
fexit_test # bpf_prog_test_tracing missing
|
||||||
|
flow_dissector # bpf_link-based flow dissector is in 5.8+
|
||||||
|
flow_dissector_reattach
|
||||||
|
for_each # v5.12+
|
||||||
|
get_func_ip_test # v5.15+
|
||||||
|
get_stack_raw_tp # exercising BPF verifier bug causing infinite loop
|
||||||
|
hash_large_key # v5.11+
|
||||||
|
ima # v5.11+
|
||||||
|
kfree_skb # 32-bit pointer arith in test_pkt_access
|
||||||
|
ksyms # __start_BTF has different name
|
||||||
|
kfunc_call # v5.13+
|
||||||
|
link_pinning # bpf_link is missing
|
||||||
|
linked_vars # v5.13+
|
||||||
|
load_bytes_relative # new functionality in 5.8
|
||||||
|
lookup_and_delete # v5.14+
|
||||||
|
map_init # per-CPU LRU missing
|
||||||
|
map_ptr # test uses BPF_MAP_TYPE_RINGBUF, added in 5.8
|
||||||
|
metadata # v5.10+
|
||||||
|
migrate_reuseport # v5.14+
|
||||||
|
mmap # 5.5 kernel is too permissive with re-mmaping
|
||||||
|
modify_return # fmod_ret support is missing
|
||||||
|
module_attach # module BTF support missing (v5.11+)
|
||||||
|
netcnt
|
||||||
|
netns_cookie # v5.15+
|
||||||
|
ns_current_pid_tgid # bpf_get_ns_current_pid_tgid() helper is missing
|
||||||
|
pe_preserve_elems # v5.10+
|
||||||
|
perf_branches # bpf_read_branch_records() helper is missing
|
||||||
|
perf_link # v5.15+
|
||||||
|
pkt_access # 32-bit pointer arith in test_pkt_access
|
||||||
|
probe_read_user_str # kernel bug with garbage bytes at the end
|
||||||
|
prog_run_xattr # 32-bit pointer arith in test_pkt_access
|
||||||
|
raw_tp_test_run # v5.10+
|
||||||
|
recursion # v5.12+
|
||||||
|
ringbuf # BPF_MAP_TYPE_RINGBUF is supported in 5.8+
|
||||||
|
|
||||||
|
# bug in verifier w/ tracking references
|
||||||
|
#reference_tracking/classifier/sk_lookup_success
|
||||||
|
reference_tracking
|
||||||
|
|
||||||
|
select_reuseport # UDP support is missing
|
||||||
|
send_signal # bpf_send_signal_thread() helper is missing
|
||||||
|
sk_assign # bpf_sk_assign helper missing
|
||||||
|
sk_lookup # v5.9+
|
||||||
|
sk_storage_tracing # missing bpf_sk_storage_get() helper
|
||||||
|
skb_ctx # ctx_{size, }_{in, out} in BPF_PROG_TEST_RUN is missing
|
||||||
|
skb_helpers # helpers added in 5.8+
|
||||||
|
skeleton # creates too big ARRAY map
|
||||||
|
snprintf # v5.13+
|
||||||
|
snprintf_btf # v5.10+
|
||||||
|
sock_fields # v5.10+
|
||||||
|
socket_cookie # v5.12+
|
||||||
|
sockmap_basic # uses new socket fields, 5.8+
|
||||||
|
sockmap_listen # no listen socket supportin SOCKMAP
|
||||||
|
sockopt/getsockopt: ignore >PAGE_SIZE optlen
|
||||||
|
sockopt/setsockopt: ignore >PAGE_SIZE optlen
|
||||||
|
sockopt_sk
|
||||||
|
sockopt_qos_to_cc # v5.15+
|
||||||
|
stacktrace_build_id # v5.9+
|
||||||
|
stack_var_off # v5.12+
|
||||||
|
syscall # v5.14+
|
||||||
|
task_local_storage # v5.12+
|
||||||
|
task_pt_regs # v5.15+
|
||||||
|
tcp_hdr_options # v5.10+, new TCP header options feature in BPF
|
||||||
|
tcpbpf_user # LINK_CREATE is missing
|
||||||
|
tc_redirect # v5.14+
|
||||||
|
test_bpffs # v5.10+, new CONFIG_BPF_PRELOAD=y and CONFIG_BPF_PRELOAD_UMG=y|m
|
||||||
|
test_bprm_opts # v5.11+
|
||||||
|
test_global_funcs # kernel doesn't support BTF linkage=global on FUNCs
|
||||||
|
test_local_storage # v5.10+ feature
|
||||||
|
test_lsm # no BPF_LSM support
|
||||||
|
test_overhead # no fmod_ret support
|
||||||
|
test_profiler # needs verifier logic improvements from v5.10+
|
||||||
|
test_skb_pkt_end # v5.11+
|
||||||
|
timer # v5.15+
|
||||||
|
timer_mim # v5.15+
|
||||||
|
trace_ext # v5.10+
|
||||||
|
trace_printk # v5.14+
|
||||||
|
trampoline_count # v5.12+ have lower allowed limits
|
||||||
|
udp_limit # no cgroup/sock_release BPF program type (5.9+)
|
||||||
|
varlen # verifier bug fixed in later kernels
|
||||||
|
vmlinux # hrtimer_nanosleep() signature changed incompatibly
|
||||||
|
xdp_adjust_tail # new XDP functionality added in 5.8
|
||||||
|
xdp_attach # IFLA_XDP_EXPECTED_FD support is missing
|
||||||
|
xdp_bonding # v5.15+
|
||||||
|
xdp_bpf2bpf # freplace is missing
|
||||||
|
xdp_context_test_run # v5.15+
|
||||||
|
xdp_cpumap_attach # v5.9+
|
||||||
|
xdp_devmap_attach # new feature in 5.8
|
||||||
|
xdp_link # v5.9+
|
||||||
|
|
||||||
|
# SUBTESTS FAILING (block entire test until blocking subtests works properly)
|
||||||
|
btf # "size check test", "func (Non zero vlen)"
|
||||||
|
tailcalls # tailcall_bpf2bpf_1, tailcall_bpf2bpf_2, tailcall_bpf2bpf_3
|
||||||
@@ -1,13 +1,4 @@
|
|||||||
decap_sanity # weird failure with decap_sanity_ns netns already existing, TBD
|
decap_sanity # weird failure with decap_sanity_ns netns already existing, TBD
|
||||||
empty_skb # waiting the fix in bpf tree to make it to bpf-next
|
|
||||||
bpf_nf/tc-bpf-ct # test consistently failing on x86: https://github.com/libbpf/libbpf/pull/698#issuecomment-1590341200
|
bpf_nf/tc-bpf-ct # test consistently failing on x86: https://github.com/libbpf/libbpf/pull/698#issuecomment-1590341200
|
||||||
bpf_nf/xdp-ct # test consistently failing on x86: https://github.com/libbpf/libbpf/pull/698#issuecomment-1590341200
|
bpf_nf/xdp-ct # test consistently failing on x86: https://github.com/libbpf/libbpf/pull/698#issuecomment-1590341200
|
||||||
kprobe_multi_bench_attach # suspected to cause crashes in CI
|
kprobe_multi_bench_attach # suspected to cause crashes in CI
|
||||||
find_vma # test consistently fails on latest kernel, see https://github.com/libbpf/libbpf/issues/754 for details
|
|
||||||
bpf_cookie/perf_event
|
|
||||||
send_signal/send_signal_nmi
|
|
||||||
send_signal/send_signal_nmi_thread
|
|
||||||
|
|
||||||
lwt_reroute # crashes kernel, fix pending upstream
|
|
||||||
tc_links_ingress # fails, same fix is pending upstream
|
|
||||||
tc_redirect # enough is enough, banned for life for flakiness
|
|
||||||
|
|||||||
@@ -2,16 +2,3 @@
|
|||||||
sockmap_listen/sockhash VSOCK test_vsock_redir
|
sockmap_listen/sockhash VSOCK test_vsock_redir
|
||||||
usdt/basic # failing verifier due to bounds check after LLVM update
|
usdt/basic # failing verifier due to bounds check after LLVM update
|
||||||
usdt/multispec # same as above
|
usdt/multispec # same as above
|
||||||
|
|
||||||
deny_namespace # not yet in bpf denylist
|
|
||||||
tc_redirect/tc_redirect_dtime # very flaky
|
|
||||||
lru_bug # not yet in bpf-next denylist
|
|
||||||
|
|
||||||
# Disabled temporarily for a crash.
|
|
||||||
# https://lore.kernel.org/bpf/c9923c1d-971d-4022-8dc8-1364e929d34c@gmail.com/
|
|
||||||
dummy_st_ops/dummy_init_ptr_arg
|
|
||||||
fexit_bpf2bpf
|
|
||||||
tailcalls
|
|
||||||
trace_ext
|
|
||||||
xdp_bpf2bpf
|
|
||||||
xdp_metadata
|
|
||||||
|
|||||||
@@ -1,37 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This file is sourced by libbpf/ci/run-vmtest Github Action scripts.
|
|
||||||
|
|
||||||
# $SELFTESTS_BPF and $VMTEST_CONFIGS are set in the workflow, before
|
|
||||||
# libbpf/ci/run-vmtest action is called
|
|
||||||
# See .github/workflows/kernel-test.yml
|
|
||||||
|
|
||||||
ALLOWLIST_FILES=(
|
|
||||||
"${SELFTESTS_BPF}/ALLOWLIST"
|
|
||||||
"${SELFTESTS_BPF}/ALLOWLIST.${ARCH}"
|
|
||||||
"${VMTEST_CONFIGS}/ALLOWLIST"
|
|
||||||
"${VMTEST_CONFIGS}/ALLOWLIST-${KERNEL}"
|
|
||||||
"${VMTEST_CONFIGS}/ALLOWLIST-${KERNEL}.${ARCH}"
|
|
||||||
)
|
|
||||||
|
|
||||||
DENYLIST_FILES=(
|
|
||||||
"${SELFTESTS_BPF}/DENYLIST"
|
|
||||||
"${SELFTESTS_BPF}/DENYLIST.${ARCH}"
|
|
||||||
"${VMTEST_CONFIGS}/DENYLIST"
|
|
||||||
"${VMTEST_CONFIGS}/DENYLIST-${KERNEL}"
|
|
||||||
"${VMTEST_CONFIGS}/DENYLIST-${KERNEL}.${ARCH}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Export pipe-separated strings, because bash doesn't support array export
|
|
||||||
export SELFTESTS_BPF_ALLOWLIST_FILES=$(IFS="|"; echo "${ALLOWLIST_FILES[*]}")
|
|
||||||
export SELFTESTS_BPF_DENYLIST_FILES=$(IFS="|"; echo "${DENYLIST_FILES[*]}")
|
|
||||||
|
|
||||||
if [[ "${LLVM_VERSION}" -lt 18 ]]; then
|
|
||||||
echo "KERNEL_TEST=test_progs test_progs_no_alu32 test_maps test_verifier" >> $GITHUB_ENV
|
|
||||||
else # all
|
|
||||||
echo "KERNEL_TEST=test_progs test_progs_cpuv4 test_progs_no_alu32 test_maps test_verifier" >> $GITHUB_ENV
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "cp -R ${SELFTESTS_BPF} ${GITHUB_WORKSPACE}/selftests"
|
|
||||||
mkdir -p "${GITHUB_WORKSPACE}/selftests"
|
|
||||||
cp -R "${SELFTESTS_BPF}" "${GITHUB_WORKSPACE}/selftests"
|
|
||||||
38
ci/vmtest/helpers.sh
Executable file
38
ci/vmtest/helpers.sh
Executable file
@@ -0,0 +1,38 @@
|
|||||||
|
# shellcheck shell=bash
|
||||||
|
|
||||||
|
# $1 - start or end
|
||||||
|
# $2 - fold identifier, no spaces
|
||||||
|
# $3 - fold section description
|
||||||
|
foldable() {
|
||||||
|
local YELLOW='\033[1;33m'
|
||||||
|
local NOCOLOR='\033[0m'
|
||||||
|
if [ $1 = "start" ]; then
|
||||||
|
line="::group::$2"
|
||||||
|
if [ ! -z "${3:-}" ]; then
|
||||||
|
line="$line - ${YELLOW}$3${NOCOLOR}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
line="::endgroup::"
|
||||||
|
fi
|
||||||
|
echo -e "$line"
|
||||||
|
}
|
||||||
|
|
||||||
|
__print() {
|
||||||
|
local TITLE=""
|
||||||
|
if [[ -n $2 ]]; then
|
||||||
|
TITLE=" title=$2"
|
||||||
|
fi
|
||||||
|
echo "::$1${TITLE}::$3"
|
||||||
|
}
|
||||||
|
|
||||||
|
# $1 - title
|
||||||
|
# $2 - message
|
||||||
|
print_error() {
|
||||||
|
__print error $1 $2
|
||||||
|
}
|
||||||
|
|
||||||
|
# $1 - title
|
||||||
|
# $2 - message
|
||||||
|
print_notice() {
|
||||||
|
__print notice $1 $2
|
||||||
|
}
|
||||||
94
ci/vmtest/run_selftests.sh
Executable file
94
ci/vmtest/run_selftests.sh
Executable file
@@ -0,0 +1,94 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
source $(cd $(dirname $0) && pwd)/helpers.sh
|
||||||
|
|
||||||
|
ARCH=$(uname -m)
|
||||||
|
|
||||||
|
STATUS_FILE=/exitstatus
|
||||||
|
|
||||||
|
read_lists() {
|
||||||
|
(for path in "$@"; do
|
||||||
|
if [[ -s "$path" ]]; then
|
||||||
|
cat "$path"
|
||||||
|
fi;
|
||||||
|
done) | cut -d'#' -f1 | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' | tr -s '\n' ','
|
||||||
|
}
|
||||||
|
|
||||||
|
test_progs() {
|
||||||
|
if [[ "${KERNEL}" != '4.9.0' ]]; then
|
||||||
|
foldable start test_progs "Testing test_progs"
|
||||||
|
# "&& true" does not change the return code (it is not executed
|
||||||
|
# if the Python script fails), but it prevents exiting on a
|
||||||
|
# failure due to the "set -e".
|
||||||
|
./test_progs ${DENYLIST:+-d"$DENYLIST"} ${ALLOWLIST:+-a"$ALLOWLIST"} && true
|
||||||
|
echo "test_progs:$?" >> "${STATUS_FILE}"
|
||||||
|
foldable end test_progs
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
test_progs_no_alu32() {
|
||||||
|
foldable start test_progs-no_alu32 "Testing test_progs-no_alu32"
|
||||||
|
./test_progs-no_alu32 ${DENYLIST:+-d"$DENYLIST"} ${ALLOWLIST:+-a"$ALLOWLIST"} && true
|
||||||
|
echo "test_progs-no_alu32:$?" >> "${STATUS_FILE}"
|
||||||
|
foldable end test_progs-no_alu32
|
||||||
|
}
|
||||||
|
|
||||||
|
test_maps() {
|
||||||
|
if [[ "${KERNEL}" == 'latest' ]]; then
|
||||||
|
foldable start test_maps "Testing test_maps"
|
||||||
|
./test_maps && true
|
||||||
|
echo "test_maps:$?" >> "${STATUS_FILE}"
|
||||||
|
foldable end test_maps
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
test_verifier() {
|
||||||
|
if [[ "${KERNEL}" == 'latest' ]]; then
|
||||||
|
foldable start test_verifier "Testing test_verifier"
|
||||||
|
./test_verifier && true
|
||||||
|
echo "test_verifier:$?" >> "${STATUS_FILE}"
|
||||||
|
foldable end test_verifier
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
foldable end vm_init
|
||||||
|
|
||||||
|
foldable start kernel_config "Kconfig"
|
||||||
|
|
||||||
|
zcat /proc/config.gz
|
||||||
|
|
||||||
|
foldable end kernel_config
|
||||||
|
|
||||||
|
|
||||||
|
configs_path=/${PROJECT_NAME}/selftests/bpf
|
||||||
|
local_configs_path=${PROJECT_NAME}/vmtest/configs
|
||||||
|
DENYLIST=$(read_lists \
|
||||||
|
"$configs_path/DENYLIST" \
|
||||||
|
"$configs_path/DENYLIST.${ARCH}" \
|
||||||
|
"$local_configs_path/DENYLIST-${KERNEL}" \
|
||||||
|
"$local_configs_path/DENYLIST-${KERNEL}.${ARCH}" \
|
||||||
|
)
|
||||||
|
ALLOWLIST=$(read_lists \
|
||||||
|
"$configs_path/ALLOWLIST" \
|
||||||
|
"$configs_path/ALLOWLIST.${ARCH}" \
|
||||||
|
"$local_configs_path/ALLOWLIST-${KERNEL}" \
|
||||||
|
"$local_configs_path/ALLOWLIST-${KERNEL}.${ARCH}" \
|
||||||
|
)
|
||||||
|
|
||||||
|
echo "DENYLIST: ${DENYLIST}"
|
||||||
|
echo "ALLOWLIST: ${ALLOWLIST}"
|
||||||
|
|
||||||
|
cd ${PROJECT_NAME}/selftests/bpf
|
||||||
|
|
||||||
|
if [ $# -eq 0 ]; then
|
||||||
|
test_progs
|
||||||
|
test_progs_no_alu32
|
||||||
|
# test_maps
|
||||||
|
test_verifier
|
||||||
|
else
|
||||||
|
for test_name in "$@"; do
|
||||||
|
"${test_name}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
@@ -18,7 +18,6 @@ extensions = [
|
|||||||
'sphinx.ext.viewcode',
|
'sphinx.ext.viewcode',
|
||||||
'sphinx.ext.imgmath',
|
'sphinx.ext.imgmath',
|
||||||
'sphinx.ext.todo',
|
'sphinx.ext.todo',
|
||||||
'sphinx_rtd_theme',
|
|
||||||
'breathe',
|
'breathe',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -219,14 +219,6 @@ compilation and skeleton generation. Using Libbpf-rs will make building user
|
|||||||
space part of the BPF application easier. Note that the BPF program themselves
|
space part of the BPF application easier. Note that the BPF program themselves
|
||||||
must still be written in plain C.
|
must still be written in plain C.
|
||||||
|
|
||||||
libbpf logging
|
|
||||||
==============
|
|
||||||
|
|
||||||
By default, libbpf logs informational and warning messages to stderr. The
|
|
||||||
verbosity of these messages can be controlled by setting the environment
|
|
||||||
variable LIBBPF_LOG_LEVEL to either warn, info, or debug. A custom log
|
|
||||||
callback can be set using ``libbpf_set_print()``.
|
|
||||||
|
|
||||||
Additional Documentation
|
Additional Documentation
|
||||||
========================
|
========================
|
||||||
|
|
||||||
|
|||||||
@@ -56,16 +56,6 @@ described in more detail in the footnotes.
|
|||||||
| | ``BPF_CGROUP_UDP6_RECVMSG`` | ``cgroup/recvmsg6`` | |
|
| | ``BPF_CGROUP_UDP6_RECVMSG`` | ``cgroup/recvmsg6`` | |
|
||||||
+ +----------------------------------------+----------------------------------+-----------+
|
+ +----------------------------------------+----------------------------------+-----------+
|
||||||
| | ``BPF_CGROUP_UDP6_SENDMSG`` | ``cgroup/sendmsg6`` | |
|
| | ``BPF_CGROUP_UDP6_SENDMSG`` | ``cgroup/sendmsg6`` | |
|
||||||
| +----------------------------------------+----------------------------------+-----------+
|
|
||||||
| | ``BPF_CGROUP_UNIX_CONNECT`` | ``cgroup/connect_unix`` | |
|
|
||||||
| +----------------------------------------+----------------------------------+-----------+
|
|
||||||
| | ``BPF_CGROUP_UNIX_SENDMSG`` | ``cgroup/sendmsg_unix`` | |
|
|
||||||
| +----------------------------------------+----------------------------------+-----------+
|
|
||||||
| | ``BPF_CGROUP_UNIX_RECVMSG`` | ``cgroup/recvmsg_unix`` | |
|
|
||||||
| +----------------------------------------+----------------------------------+-----------+
|
|
||||||
| | ``BPF_CGROUP_UNIX_GETPEERNAME`` | ``cgroup/getpeername_unix`` | |
|
|
||||||
| +----------------------------------------+----------------------------------+-----------+
|
|
||||||
| | ``BPF_CGROUP_UNIX_GETSOCKNAME`` | ``cgroup/getsockname_unix`` | |
|
|
||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
| ``BPF_PROG_TYPE_CGROUP_SOCK`` | ``BPF_CGROUP_INET4_POST_BIND`` | ``cgroup/post_bind4`` | |
|
| ``BPF_PROG_TYPE_CGROUP_SOCK`` | ``BPF_CGROUP_INET4_POST_BIND`` | ``cgroup/post_bind4`` | |
|
||||||
+ +----------------------------------------+----------------------------------+-----------+
|
+ +----------------------------------------+----------------------------------+-----------+
|
||||||
@@ -121,8 +111,6 @@ described in more detail in the footnotes.
|
|||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
| ``BPF_PROG_TYPE_LWT_XMIT`` | | ``lwt_xmit`` | |
|
| ``BPF_PROG_TYPE_LWT_XMIT`` | | ``lwt_xmit`` | |
|
||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
| ``BPF_PROG_TYPE_NETFILTER`` | | ``netfilter`` | |
|
|
||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
|
||||||
| ``BPF_PROG_TYPE_PERF_EVENT`` | | ``perf_event`` | |
|
| ``BPF_PROG_TYPE_PERF_EVENT`` | | ``perf_event`` | |
|
||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
| ``BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE`` | | ``raw_tp.w+`` [#rawtp]_ | |
|
| ``BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE`` | | ``raw_tp.w+`` [#rawtp]_ | |
|
||||||
@@ -133,23 +121,11 @@ described in more detail in the footnotes.
|
|||||||
+ + +----------------------------------+-----------+
|
+ + +----------------------------------+-----------+
|
||||||
| | | ``raw_tracepoint+`` | |
|
| | | ``raw_tracepoint+`` | |
|
||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` [#tc_legacy]_ | |
|
| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` | |
|
||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` [#tc_legacy]_ | |
|
| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` | |
|
||||||
+ + +----------------------------------+-----------+
|
+ + +----------------------------------+-----------+
|
||||||
| | | ``tc`` [#tc_legacy]_ | |
|
| | | ``tc`` | |
|
||||||
+ +----------------------------------------+----------------------------------+-----------+
|
|
||||||
| | ``BPF_NETKIT_PRIMARY`` | ``netkit/primary`` | |
|
|
||||||
+ +----------------------------------------+----------------------------------+-----------+
|
|
||||||
| | ``BPF_NETKIT_PEER`` | ``netkit/peer`` | |
|
|
||||||
+ +----------------------------------------+----------------------------------+-----------+
|
|
||||||
| | ``BPF_TCX_INGRESS`` | ``tc/ingress`` | |
|
|
||||||
+ +----------------------------------------+----------------------------------+-----------+
|
|
||||||
| | ``BPF_TCX_EGRESS`` | ``tc/egress`` | |
|
|
||||||
+ +----------------------------------------+----------------------------------+-----------+
|
|
||||||
| | ``BPF_TCX_INGRESS`` | ``tcx/ingress`` | |
|
|
||||||
+ +----------------------------------------+----------------------------------+-----------+
|
|
||||||
| | ``BPF_TCX_EGRESS`` | ``tcx/egress`` | |
|
|
||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
| ``BPF_PROG_TYPE_SK_LOOKUP`` | ``BPF_SK_LOOKUP`` | ``sk_lookup`` | |
|
| ``BPF_PROG_TYPE_SK_LOOKUP`` | ``BPF_SK_LOOKUP`` | ``sk_lookup`` | |
|
||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
@@ -169,9 +145,7 @@ described in more detail in the footnotes.
|
|||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
| ``BPF_PROG_TYPE_SOCK_OPS`` | ``BPF_CGROUP_SOCK_OPS`` | ``sockops`` | |
|
| ``BPF_PROG_TYPE_SOCK_OPS`` | ``BPF_CGROUP_SOCK_OPS`` | ``sockops`` | |
|
||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` [#struct_ops]_ | |
|
| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` | |
|
||||||
+ + +----------------------------------+-----------+
|
|
||||||
| | | ``struct_ops.s+`` [#struct_ops]_ | Yes |
|
|
||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
| ``BPF_PROG_TYPE_SYSCALL`` | | ``syscall`` | Yes |
|
| ``BPF_PROG_TYPE_SYSCALL`` | | ``syscall`` | Yes |
|
||||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||||
@@ -225,11 +199,5 @@ described in more detail in the footnotes.
|
|||||||
``a-zA-Z0-9_.*?``.
|
``a-zA-Z0-9_.*?``.
|
||||||
.. [#lsm] The ``lsm`` attachment format is ``lsm[.s]/<hook>``.
|
.. [#lsm] The ``lsm`` attachment format is ``lsm[.s]/<hook>``.
|
||||||
.. [#rawtp] The ``raw_tp`` attach format is ``raw_tracepoint[.w]/<tracepoint>``.
|
.. [#rawtp] The ``raw_tp`` attach format is ``raw_tracepoint[.w]/<tracepoint>``.
|
||||||
.. [#tc_legacy] The ``tc``, ``classifier`` and ``action`` attach types are deprecated, use
|
|
||||||
``tcx/*`` instead.
|
|
||||||
.. [#struct_ops] The ``struct_ops`` attach format supports ``struct_ops[.s]/<name>`` convention,
|
|
||||||
but ``name`` is ignored and it is recommended to just use plain
|
|
||||||
``SEC("struct_ops[.s]")``. The attachments are defined in a struct initializer
|
|
||||||
that is tagged with ``SEC(".struct_ops[.link]")``.
|
|
||||||
.. [#tp] The ``tracepoint`` attach format is ``tracepoint/<category>/<name>``.
|
.. [#tp] The ``tracepoint`` attach format is ``tracepoint/<category>/<name>``.
|
||||||
.. [#iter] The ``iter`` attach format is ``iter[.s]/<struct-name>``.
|
.. [#iter] The ``iter`` attach format is ``iter[.s]/<struct-name>``.
|
||||||
|
|||||||
@@ -1,2 +1 @@
|
|||||||
breathe
|
breathe
|
||||||
sphinx_rtd_theme
|
|
||||||
@@ -37,14 +37,6 @@
|
|||||||
.off = 0, \
|
.off = 0, \
|
||||||
.imm = IMM })
|
.imm = IMM })
|
||||||
|
|
||||||
#define BPF_CALL_REL(DST) \
|
|
||||||
((struct bpf_insn) { \
|
|
||||||
.code = BPF_JMP | BPF_CALL, \
|
|
||||||
.dst_reg = 0, \
|
|
||||||
.src_reg = BPF_PSEUDO_CALL, \
|
|
||||||
.off = 0, \
|
|
||||||
.imm = DST })
|
|
||||||
|
|
||||||
#define BPF_EXIT_INSN() \
|
#define BPF_EXIT_INSN() \
|
||||||
((struct bpf_insn) { \
|
((struct bpf_insn) { \
|
||||||
.code = BPF_JMP | BPF_EXIT, \
|
.code = BPF_JMP | BPF_EXIT, \
|
||||||
|
|||||||
@@ -3,8 +3,6 @@
|
|||||||
#ifndef __LINUX_KERNEL_H
|
#ifndef __LINUX_KERNEL_H
|
||||||
#define __LINUX_KERNEL_H
|
#define __LINUX_KERNEL_H
|
||||||
|
|
||||||
#include <linux/compiler.h>
|
|
||||||
|
|
||||||
#ifndef offsetof
|
#ifndef offsetof
|
||||||
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
|
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
/* ld/ldx fields */
|
/* ld/ldx fields */
|
||||||
#define BPF_DW 0x18 /* double word (64-bit) */
|
#define BPF_DW 0x18 /* double word (64-bit) */
|
||||||
#define BPF_MEMSX 0x80 /* load with sign extension */
|
|
||||||
#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
|
#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
|
||||||
#define BPF_XADD 0xc0 /* exclusive add - legacy name */
|
#define BPF_XADD 0xc0 /* exclusive add - legacy name */
|
||||||
|
|
||||||
@@ -42,7 +41,6 @@
|
|||||||
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
|
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
|
||||||
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
|
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
|
||||||
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
|
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
|
||||||
#define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */
|
|
||||||
#define BPF_CALL 0x80 /* function call */
|
#define BPF_CALL 0x80 /* function call */
|
||||||
#define BPF_EXIT 0x90 /* function return */
|
#define BPF_EXIT 0x90 /* function return */
|
||||||
|
|
||||||
@@ -51,13 +49,6 @@
|
|||||||
#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
|
#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
|
||||||
#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
|
#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
|
||||||
|
|
||||||
#define BPF_LOAD_ACQ 0x100 /* load-acquire */
|
|
||||||
#define BPF_STORE_REL 0x110 /* store-release */
|
|
||||||
|
|
||||||
enum bpf_cond_pseudo_jmp {
|
|
||||||
BPF_MAY_GOTO = 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Register numbers */
|
/* Register numbers */
|
||||||
enum {
|
enum {
|
||||||
BPF_REG_0 = 0,
|
BPF_REG_0 = 0,
|
||||||
@@ -85,29 +76,12 @@ struct bpf_insn {
|
|||||||
__s32 imm; /* signed immediate constant */
|
__s32 imm; /* signed immediate constant */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
|
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
|
||||||
* byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
|
|
||||||
* the trailing flexible array member) instead.
|
|
||||||
*/
|
|
||||||
struct bpf_lpm_trie_key {
|
struct bpf_lpm_trie_key {
|
||||||
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
|
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
|
||||||
__u8 data[0]; /* Arbitrary size */
|
__u8 data[0]; /* Arbitrary size */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Header for bpf_lpm_trie_key structs */
|
|
||||||
struct bpf_lpm_trie_key_hdr {
|
|
||||||
__u32 prefixlen;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
|
|
||||||
struct bpf_lpm_trie_key_u8 {
|
|
||||||
union {
|
|
||||||
struct bpf_lpm_trie_key_hdr hdr;
|
|
||||||
__u32 prefixlen;
|
|
||||||
};
|
|
||||||
__u8 data[]; /* Arbitrary size */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct bpf_cgroup_storage_key {
|
struct bpf_cgroup_storage_key {
|
||||||
__u64 cgroup_inode_id; /* cgroup inode id */
|
__u64 cgroup_inode_id; /* cgroup inode id */
|
||||||
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
|
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
|
||||||
@@ -642,11 +616,7 @@ union bpf_iter_link_info {
|
|||||||
* to NULL to begin the batched operation. After each subsequent
|
* to NULL to begin the batched operation. After each subsequent
|
||||||
* **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
|
* **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
|
||||||
* *out_batch* as the *in_batch* for the next operation to
|
* *out_batch* as the *in_batch* for the next operation to
|
||||||
* continue iteration from the current point. Both *in_batch* and
|
* continue iteration from the current point.
|
||||||
* *out_batch* must point to memory large enough to hold a key,
|
|
||||||
* except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
|
|
||||||
* LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters
|
|
||||||
* must be at least 4 bytes wide regardless of key size.
|
|
||||||
*
|
*
|
||||||
* The *keys* and *values* are output parameters which must point
|
* The *keys* and *values* are output parameters which must point
|
||||||
* to memory large enough to hold *count* items based on the key
|
* to memory large enough to hold *count* items based on the key
|
||||||
@@ -876,36 +846,6 @@ union bpf_iter_link_info {
|
|||||||
* Returns zero on success. On error, -1 is returned and *errno*
|
* Returns zero on success. On error, -1 is returned and *errno*
|
||||||
* is set appropriately.
|
* is set appropriately.
|
||||||
*
|
*
|
||||||
* BPF_TOKEN_CREATE
|
|
||||||
* Description
|
|
||||||
* Create BPF token with embedded information about what
|
|
||||||
* BPF-related functionality it allows:
|
|
||||||
* - a set of allowed bpf() syscall commands;
|
|
||||||
* - a set of allowed BPF map types to be created with
|
|
||||||
* BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
|
|
||||||
* - a set of allowed BPF program types and BPF program attach
|
|
||||||
* types to be loaded with BPF_PROG_LOAD command, if
|
|
||||||
* BPF_PROG_LOAD itself is allowed.
|
|
||||||
*
|
|
||||||
* BPF token is created (derived) from an instance of BPF FS,
|
|
||||||
* assuming it has necessary delegation mount options specified.
|
|
||||||
* This BPF token can be passed as an extra parameter to various
|
|
||||||
* bpf() syscall commands to grant BPF subsystem functionality to
|
|
||||||
* unprivileged processes.
|
|
||||||
*
|
|
||||||
* When created, BPF token is "associated" with the owning
|
|
||||||
* user namespace of BPF FS instance (super block) that it was
|
|
||||||
* derived from, and subsequent BPF operations performed with
|
|
||||||
* BPF token would be performing capabilities checks (i.e.,
|
|
||||||
* CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
|
|
||||||
* that user namespace. Without BPF token, such capabilities
|
|
||||||
* have to be granted in init user namespace, making bpf()
|
|
||||||
* syscall incompatible with user namespace, for the most part.
|
|
||||||
*
|
|
||||||
* Return
|
|
||||||
* A new file descriptor (a nonnegative integer), or -1 if an
|
|
||||||
* error occurred (in which case, *errno* is set appropriately).
|
|
||||||
*
|
|
||||||
* NOTES
|
* NOTES
|
||||||
* eBPF objects (maps and programs) can be shared between processes.
|
* eBPF objects (maps and programs) can be shared between processes.
|
||||||
*
|
*
|
||||||
@@ -960,8 +900,6 @@ enum bpf_cmd {
|
|||||||
BPF_ITER_CREATE,
|
BPF_ITER_CREATE,
|
||||||
BPF_LINK_DETACH,
|
BPF_LINK_DETACH,
|
||||||
BPF_PROG_BIND_MAP,
|
BPF_PROG_BIND_MAP,
|
||||||
BPF_TOKEN_CREATE,
|
|
||||||
__MAX_BPF_CMD,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum bpf_map_type {
|
enum bpf_map_type {
|
||||||
@@ -993,14 +931,7 @@ enum bpf_map_type {
|
|||||||
*/
|
*/
|
||||||
BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
|
BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
|
||||||
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
|
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
|
||||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
|
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
|
||||||
/* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
|
|
||||||
* attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
|
|
||||||
* local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
|
|
||||||
* functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
|
|
||||||
* deprecated.
|
|
||||||
*/
|
|
||||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
|
|
||||||
BPF_MAP_TYPE_QUEUE,
|
BPF_MAP_TYPE_QUEUE,
|
||||||
BPF_MAP_TYPE_STACK,
|
BPF_MAP_TYPE_STACK,
|
||||||
BPF_MAP_TYPE_SK_STORAGE,
|
BPF_MAP_TYPE_SK_STORAGE,
|
||||||
@@ -1012,8 +943,6 @@ enum bpf_map_type {
|
|||||||
BPF_MAP_TYPE_BLOOM_FILTER,
|
BPF_MAP_TYPE_BLOOM_FILTER,
|
||||||
BPF_MAP_TYPE_USER_RINGBUF,
|
BPF_MAP_TYPE_USER_RINGBUF,
|
||||||
BPF_MAP_TYPE_CGRP_STORAGE,
|
BPF_MAP_TYPE_CGRP_STORAGE,
|
||||||
BPF_MAP_TYPE_ARENA,
|
|
||||||
__MAX_BPF_MAP_TYPE
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Note that tracing related programs such as
|
/* Note that tracing related programs such as
|
||||||
@@ -1058,7 +987,6 @@ enum bpf_prog_type {
|
|||||||
BPF_PROG_TYPE_SK_LOOKUP,
|
BPF_PROG_TYPE_SK_LOOKUP,
|
||||||
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
|
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
|
||||||
BPF_PROG_TYPE_NETFILTER,
|
BPF_PROG_TYPE_NETFILTER,
|
||||||
__MAX_BPF_PROG_TYPE
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum bpf_attach_type {
|
enum bpf_attach_type {
|
||||||
@@ -1108,26 +1036,11 @@ enum bpf_attach_type {
|
|||||||
BPF_LSM_CGROUP,
|
BPF_LSM_CGROUP,
|
||||||
BPF_STRUCT_OPS,
|
BPF_STRUCT_OPS,
|
||||||
BPF_NETFILTER,
|
BPF_NETFILTER,
|
||||||
BPF_TCX_INGRESS,
|
|
||||||
BPF_TCX_EGRESS,
|
|
||||||
BPF_TRACE_UPROBE_MULTI,
|
|
||||||
BPF_CGROUP_UNIX_CONNECT,
|
|
||||||
BPF_CGROUP_UNIX_SENDMSG,
|
|
||||||
BPF_CGROUP_UNIX_RECVMSG,
|
|
||||||
BPF_CGROUP_UNIX_GETPEERNAME,
|
|
||||||
BPF_CGROUP_UNIX_GETSOCKNAME,
|
|
||||||
BPF_NETKIT_PRIMARY,
|
|
||||||
BPF_NETKIT_PEER,
|
|
||||||
BPF_TRACE_KPROBE_SESSION,
|
|
||||||
BPF_TRACE_UPROBE_SESSION,
|
|
||||||
__MAX_BPF_ATTACH_TYPE
|
__MAX_BPF_ATTACH_TYPE
|
||||||
};
|
};
|
||||||
|
|
||||||
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
|
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
|
||||||
|
|
||||||
/* Add BPF_LINK_TYPE(type, name) in bpf_types.h to keep bpf_link_type_strs[]
|
|
||||||
* in sync with the definitions below.
|
|
||||||
*/
|
|
||||||
enum bpf_link_type {
|
enum bpf_link_type {
|
||||||
BPF_LINK_TYPE_UNSPEC = 0,
|
BPF_LINK_TYPE_UNSPEC = 0,
|
||||||
BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
|
BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
|
||||||
@@ -1140,23 +1053,8 @@ enum bpf_link_type {
|
|||||||
BPF_LINK_TYPE_KPROBE_MULTI = 8,
|
BPF_LINK_TYPE_KPROBE_MULTI = 8,
|
||||||
BPF_LINK_TYPE_STRUCT_OPS = 9,
|
BPF_LINK_TYPE_STRUCT_OPS = 9,
|
||||||
BPF_LINK_TYPE_NETFILTER = 10,
|
BPF_LINK_TYPE_NETFILTER = 10,
|
||||||
BPF_LINK_TYPE_TCX = 11,
|
|
||||||
BPF_LINK_TYPE_UPROBE_MULTI = 12,
|
|
||||||
BPF_LINK_TYPE_NETKIT = 13,
|
|
||||||
BPF_LINK_TYPE_SOCKMAP = 14,
|
|
||||||
__MAX_BPF_LINK_TYPE,
|
|
||||||
};
|
|
||||||
|
|
||||||
#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
|
MAX_BPF_LINK_TYPE,
|
||||||
|
|
||||||
enum bpf_perf_event_type {
|
|
||||||
BPF_PERF_EVENT_UNSPEC = 0,
|
|
||||||
BPF_PERF_EVENT_UPROBE = 1,
|
|
||||||
BPF_PERF_EVENT_URETPROBE = 2,
|
|
||||||
BPF_PERF_EVENT_KPROBE = 3,
|
|
||||||
BPF_PERF_EVENT_KRETPROBE = 4,
|
|
||||||
BPF_PERF_EVENT_TRACEPOINT = 5,
|
|
||||||
BPF_PERF_EVENT_EVENT = 6,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
|
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
|
||||||
@@ -1205,13 +1103,7 @@ enum bpf_perf_event_type {
|
|||||||
*/
|
*/
|
||||||
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
|
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
|
||||||
#define BPF_F_ALLOW_MULTI (1U << 1)
|
#define BPF_F_ALLOW_MULTI (1U << 1)
|
||||||
/* Generic attachment flags. */
|
|
||||||
#define BPF_F_REPLACE (1U << 2)
|
#define BPF_F_REPLACE (1U << 2)
|
||||||
#define BPF_F_BEFORE (1U << 3)
|
|
||||||
#define BPF_F_AFTER (1U << 4)
|
|
||||||
#define BPF_F_ID (1U << 5)
|
|
||||||
#define BPF_F_PREORDER (1U << 6)
|
|
||||||
#define BPF_F_LINK BPF_F_LINK /* 1 << 13 */
|
|
||||||
|
|
||||||
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
|
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
|
||||||
* verifier will perform strict alignment checking as if the kernel
|
* verifier will perform strict alignment checking as if the kernel
|
||||||
@@ -1273,27 +1165,10 @@ enum bpf_perf_event_type {
|
|||||||
*/
|
*/
|
||||||
#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
|
#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
|
||||||
|
|
||||||
/* The verifier internal test flag. Behavior is undefined */
|
|
||||||
#define BPF_F_TEST_REG_INVARIANTS (1U << 7)
|
|
||||||
|
|
||||||
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
|
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
|
||||||
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
|
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
|
||||||
*/
|
*/
|
||||||
enum {
|
#define BPF_F_KPROBE_MULTI_RETURN (1U << 0)
|
||||||
BPF_F_KPROBE_MULTI_RETURN = (1U << 0)
|
|
||||||
};
|
|
||||||
|
|
||||||
/* link_create.uprobe_multi.flags used in LINK_CREATE command for
|
|
||||||
* BPF_TRACE_UPROBE_MULTI attach type to create return probe.
|
|
||||||
*/
|
|
||||||
enum {
|
|
||||||
BPF_F_UPROBE_MULTI_RETURN = (1U << 0)
|
|
||||||
};
|
|
||||||
|
|
||||||
/* link_create.netfilter.flags used in LINK_CREATE command for
|
|
||||||
* BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation.
|
|
||||||
*/
|
|
||||||
#define BPF_F_NETFILTER_IP_DEFRAG (1U << 0)
|
|
||||||
|
|
||||||
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
|
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
|
||||||
* the following extensions:
|
* the following extensions:
|
||||||
@@ -1349,10 +1224,6 @@ enum {
|
|||||||
*/
|
*/
|
||||||
#define BPF_PSEUDO_KFUNC_CALL 2
|
#define BPF_PSEUDO_KFUNC_CALL 2
|
||||||
|
|
||||||
enum bpf_addr_space_cast {
|
|
||||||
BPF_ADDR_SPACE_CAST = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* flags for BPF_MAP_UPDATE_ELEM command */
|
/* flags for BPF_MAP_UPDATE_ELEM command */
|
||||||
enum {
|
enum {
|
||||||
BPF_ANY = 0, /* create new element or update existing */
|
BPF_ANY = 0, /* create new element or update existing */
|
||||||
@@ -1405,18 +1276,6 @@ enum {
|
|||||||
|
|
||||||
/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
|
/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
|
||||||
BPF_F_PATH_FD = (1U << 14),
|
BPF_F_PATH_FD = (1U << 14),
|
||||||
|
|
||||||
/* Flag for value_type_btf_obj_fd, the fd is available */
|
|
||||||
BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
|
|
||||||
|
|
||||||
/* BPF token FD is passed in a corresponding command's token_fd field */
|
|
||||||
BPF_F_TOKEN_FD = (1U << 16),
|
|
||||||
|
|
||||||
/* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */
|
|
||||||
BPF_F_SEGV_ON_FAULT = (1U << 17),
|
|
||||||
|
|
||||||
/* Do not translate kernel bpf_arena pointers to user pointers */
|
|
||||||
BPF_F_NO_USER_CONV = (1U << 18),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Flags for BPF_PROG_QUERY. */
|
/* Flags for BPF_PROG_QUERY. */
|
||||||
@@ -1433,8 +1292,6 @@ enum {
|
|||||||
#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
|
#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
|
||||||
/* If set, XDP frames will be transmitted after processing */
|
/* If set, XDP frames will be transmitted after processing */
|
||||||
#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
|
#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
|
||||||
/* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */
|
|
||||||
#define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2)
|
|
||||||
|
|
||||||
/* type for BPF_ENABLE_STATS */
|
/* type for BPF_ENABLE_STATS */
|
||||||
enum bpf_stats_type {
|
enum bpf_stats_type {
|
||||||
@@ -1490,23 +1347,11 @@ union bpf_attr {
|
|||||||
* BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
|
* BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
|
||||||
* number of hash functions (if 0, the bloom filter will default
|
* number of hash functions (if 0, the bloom filter will default
|
||||||
* to using 5 hash functions).
|
* to using 5 hash functions).
|
||||||
*
|
|
||||||
* BPF_MAP_TYPE_ARENA - contains the address where user space
|
|
||||||
* is going to mmap() the arena. It has to be page aligned.
|
|
||||||
*/
|
*/
|
||||||
__u64 map_extra;
|
__u64 map_extra;
|
||||||
|
|
||||||
__s32 value_type_btf_obj_fd; /* fd pointing to a BTF
|
|
||||||
* type data for
|
|
||||||
* btf_vmlinux_value_type_id.
|
|
||||||
*/
|
|
||||||
/* BPF token FD to use with BPF_MAP_CREATE operation.
|
|
||||||
* If provided, map_flags should have BPF_F_TOKEN_FD flag set.
|
|
||||||
*/
|
|
||||||
__s32 map_token_fd;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct { /* anonymous struct used by BPF_MAP_*_ELEM and BPF_MAP_FREEZE commands */
|
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
|
||||||
__u32 map_fd;
|
__u32 map_fd;
|
||||||
__aligned_u64 key;
|
__aligned_u64 key;
|
||||||
union {
|
union {
|
||||||
@@ -1573,20 +1418,6 @@ union bpf_attr {
|
|||||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||||
*/
|
*/
|
||||||
__u32 log_true_size;
|
__u32 log_true_size;
|
||||||
/* BPF token FD to use with BPF_PROG_LOAD operation.
|
|
||||||
* If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
|
|
||||||
*/
|
|
||||||
__s32 prog_token_fd;
|
|
||||||
/* The fd_array_cnt can be used to pass the length of the
|
|
||||||
* fd_array array. In this case all the [map] file descriptors
|
|
||||||
* passed in this array will be bound to the program, even if
|
|
||||||
* the maps are not referenced directly. The functionality is
|
|
||||||
* similar to the BPF_PROG_BIND_MAP syscall, but maps can be
|
|
||||||
* used by the verifier during the program load. If provided,
|
|
||||||
* then the fd_array[0,...,fd_array_cnt-1] is expected to be
|
|
||||||
* continuous.
|
|
||||||
*/
|
|
||||||
__u32 fd_array_cnt;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct { /* anonymous struct used by BPF_OBJ_* commands */
|
struct { /* anonymous struct used by BPF_OBJ_* commands */
|
||||||
@@ -1603,19 +1434,14 @@ union bpf_attr {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
|
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
|
||||||
union {
|
__u32 target_fd; /* container object to attach to */
|
||||||
__u32 target_fd; /* target object to attach to or ... */
|
__u32 attach_bpf_fd; /* eBPF program to attach */
|
||||||
__u32 target_ifindex; /* target ifindex */
|
|
||||||
};
|
|
||||||
__u32 attach_bpf_fd;
|
|
||||||
__u32 attach_type;
|
__u32 attach_type;
|
||||||
__u32 attach_flags;
|
__u32 attach_flags;
|
||||||
__u32 replace_bpf_fd;
|
__u32 replace_bpf_fd; /* previously attached eBPF
|
||||||
union {
|
* program to replace if
|
||||||
__u32 relative_fd;
|
* BPF_F_REPLACE is used
|
||||||
__u32 relative_id;
|
*/
|
||||||
};
|
|
||||||
__u64 expected_revision;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
|
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
|
||||||
@@ -1652,7 +1478,6 @@ union bpf_attr {
|
|||||||
};
|
};
|
||||||
__u32 next_id;
|
__u32 next_id;
|
||||||
__u32 open_flags;
|
__u32 open_flags;
|
||||||
__s32 fd_by_id_token_fd;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
|
struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
|
||||||
@@ -1662,33 +1487,21 @@ union bpf_attr {
|
|||||||
} info;
|
} info;
|
||||||
|
|
||||||
struct { /* anonymous struct used by BPF_PROG_QUERY command */
|
struct { /* anonymous struct used by BPF_PROG_QUERY command */
|
||||||
union {
|
__u32 target_fd; /* container object to query */
|
||||||
__u32 target_fd; /* target object to query or ... */
|
|
||||||
__u32 target_ifindex; /* target ifindex */
|
|
||||||
};
|
|
||||||
__u32 attach_type;
|
__u32 attach_type;
|
||||||
__u32 query_flags;
|
__u32 query_flags;
|
||||||
__u32 attach_flags;
|
__u32 attach_flags;
|
||||||
__aligned_u64 prog_ids;
|
__aligned_u64 prog_ids;
|
||||||
union {
|
__u32 prog_cnt;
|
||||||
__u32 prog_cnt;
|
|
||||||
__u32 count;
|
|
||||||
};
|
|
||||||
__u32 :32;
|
|
||||||
/* output: per-program attach_flags.
|
/* output: per-program attach_flags.
|
||||||
* not allowed to be set during effective query.
|
* not allowed to be set during effective query.
|
||||||
*/
|
*/
|
||||||
__aligned_u64 prog_attach_flags;
|
__aligned_u64 prog_attach_flags;
|
||||||
__aligned_u64 link_ids;
|
|
||||||
__aligned_u64 link_attach_flags;
|
|
||||||
__u64 revision;
|
|
||||||
} query;
|
} query;
|
||||||
|
|
||||||
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
|
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
|
||||||
__u64 name;
|
__u64 name;
|
||||||
__u32 prog_fd;
|
__u32 prog_fd;
|
||||||
__u32 :32;
|
|
||||||
__aligned_u64 cookie;
|
|
||||||
} raw_tracepoint;
|
} raw_tracepoint;
|
||||||
|
|
||||||
struct { /* anonymous struct for BPF_BTF_LOAD */
|
struct { /* anonymous struct for BPF_BTF_LOAD */
|
||||||
@@ -1702,11 +1515,6 @@ union bpf_attr {
|
|||||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||||
*/
|
*/
|
||||||
__u32 btf_log_true_size;
|
__u32 btf_log_true_size;
|
||||||
__u32 btf_flags;
|
|
||||||
/* BPF token FD to use with BPF_BTF_LOAD operation.
|
|
||||||
* If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
|
|
||||||
*/
|
|
||||||
__s32 btf_token_fd;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
@@ -1731,13 +1539,13 @@ union bpf_attr {
|
|||||||
__u32 map_fd; /* struct_ops to attach */
|
__u32 map_fd; /* struct_ops to attach */
|
||||||
};
|
};
|
||||||
union {
|
union {
|
||||||
__u32 target_fd; /* target object to attach to or ... */
|
__u32 target_fd; /* object to attach to */
|
||||||
__u32 target_ifindex; /* target ifindex */
|
__u32 target_ifindex; /* target ifindex */
|
||||||
};
|
};
|
||||||
__u32 attach_type; /* attach type */
|
__u32 attach_type; /* attach type */
|
||||||
__u32 flags; /* extra flags */
|
__u32 flags; /* extra flags */
|
||||||
union {
|
union {
|
||||||
__u32 target_btf_id; /* btf_id of target to attach to */
|
__u32 target_btf_id; /* btf_id of target to attach to */
|
||||||
struct {
|
struct {
|
||||||
__aligned_u64 iter_info; /* extra bpf_iter_link_info */
|
__aligned_u64 iter_info; /* extra bpf_iter_link_info */
|
||||||
__u32 iter_info_len; /* iter_info length */
|
__u32 iter_info_len; /* iter_info length */
|
||||||
@@ -1771,29 +1579,6 @@ union bpf_attr {
|
|||||||
__s32 priority;
|
__s32 priority;
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
} netfilter;
|
} netfilter;
|
||||||
struct {
|
|
||||||
union {
|
|
||||||
__u32 relative_fd;
|
|
||||||
__u32 relative_id;
|
|
||||||
};
|
|
||||||
__u64 expected_revision;
|
|
||||||
} tcx;
|
|
||||||
struct {
|
|
||||||
__aligned_u64 path;
|
|
||||||
__aligned_u64 offsets;
|
|
||||||
__aligned_u64 ref_ctr_offsets;
|
|
||||||
__aligned_u64 cookies;
|
|
||||||
__u32 cnt;
|
|
||||||
__u32 flags;
|
|
||||||
__u32 pid;
|
|
||||||
} uprobe_multi;
|
|
||||||
struct {
|
|
||||||
union {
|
|
||||||
__u32 relative_fd;
|
|
||||||
__u32 relative_id;
|
|
||||||
};
|
|
||||||
__u64 expected_revision;
|
|
||||||
} netkit;
|
|
||||||
};
|
};
|
||||||
} link_create;
|
} link_create;
|
||||||
|
|
||||||
@@ -1837,11 +1622,6 @@ union bpf_attr {
|
|||||||
__u32 flags; /* extra flags */
|
__u32 flags; /* extra flags */
|
||||||
} prog_bind_map;
|
} prog_bind_map;
|
||||||
|
|
||||||
struct { /* struct used by BPF_TOKEN_CREATE command */
|
|
||||||
__u32 flags;
|
|
||||||
__u32 bpffs_fd;
|
|
||||||
} token_create;
|
|
||||||
|
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
/* The description below is an attempt at providing documentation to eBPF
|
/* The description below is an attempt at providing documentation to eBPF
|
||||||
@@ -1989,21 +1769,15 @@ union bpf_attr {
|
|||||||
* program.
|
* program.
|
||||||
* Return
|
* Return
|
||||||
* The SMP id of the processor running the program.
|
* The SMP id of the processor running the program.
|
||||||
* Attributes
|
|
||||||
* __bpf_fastcall
|
|
||||||
*
|
*
|
||||||
* long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
|
* long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
* Store *len* bytes from address *from* into the packet
|
* Store *len* bytes from address *from* into the packet
|
||||||
* associated to *skb*, at *offset*. The *flags* are a combination
|
* associated to *skb*, at *offset*. *flags* are a combination of
|
||||||
* of the following values:
|
* **BPF_F_RECOMPUTE_CSUM** (automatically recompute the
|
||||||
*
|
* checksum for the packet after storing the bytes) and
|
||||||
* **BPF_F_RECOMPUTE_CSUM**
|
* **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
|
||||||
* Automatically update *skb*\ **->csum** after storing the
|
* **->swhash** and *skb*\ **->l4hash** to 0).
|
||||||
* bytes.
|
|
||||||
* **BPF_F_INVALIDATE_HASH**
|
|
||||||
* Set *skb*\ **->hash**, *skb*\ **->swhash** and *skb*\
|
|
||||||
* **->l4hash** to 0.
|
|
||||||
*
|
*
|
||||||
* A call to this helper is susceptible to change the underlying
|
* A call to this helper is susceptible to change the underlying
|
||||||
* packet buffer. Therefore, at load time, all checks on pointers
|
* packet buffer. Therefore, at load time, all checks on pointers
|
||||||
@@ -2055,7 +1829,7 @@ union bpf_attr {
|
|||||||
* untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
|
* untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
|
||||||
* for updates resulting in a null checksum the value is set to
|
* for updates resulting in a null checksum the value is set to
|
||||||
* **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
|
* **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
|
||||||
* that the modified header field is part of the pseudo-header.
|
* the checksum is to be computed against a pseudo-header.
|
||||||
*
|
*
|
||||||
* This helper works in combination with **bpf_csum_diff**\ (),
|
* This helper works in combination with **bpf_csum_diff**\ (),
|
||||||
* which does not update the checksum in-place, but offers more
|
* which does not update the checksum in-place, but offers more
|
||||||
@@ -2123,9 +1897,7 @@ union bpf_attr {
|
|||||||
* performed again, if the helper is used in combination with
|
* performed again, if the helper is used in combination with
|
||||||
* direct packet access.
|
* direct packet access.
|
||||||
* Return
|
* Return
|
||||||
* 0 on success, or a negative error in case of failure. Positive
|
* 0 on success, or a negative error in case of failure.
|
||||||
* error indicates a potential drop or congestion in the target
|
|
||||||
* device. The particular positive error codes are not defined.
|
|
||||||
*
|
*
|
||||||
* u64 bpf_get_current_pid_tgid(void)
|
* u64 bpf_get_current_pid_tgid(void)
|
||||||
* Description
|
* Description
|
||||||
@@ -2858,8 +2630,8 @@ union bpf_attr {
|
|||||||
* *bpf_socket* should be one of the following:
|
* *bpf_socket* should be one of the following:
|
||||||
*
|
*
|
||||||
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
|
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
|
||||||
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
|
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
|
||||||
* **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
|
* and **BPF_CGROUP_INET6_CONNECT**.
|
||||||
*
|
*
|
||||||
* This helper actually implements a subset of **setsockopt()**.
|
* This helper actually implements a subset of **setsockopt()**.
|
||||||
* It supports the following *level*\ s:
|
* It supports the following *level*\ s:
|
||||||
@@ -2876,7 +2648,7 @@ union bpf_attr {
|
|||||||
* **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
|
* **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
|
||||||
* **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
|
* **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
|
||||||
* **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
|
* **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
|
||||||
* **TCP_BPF_RTO_MIN**, **TCP_BPF_SOCK_OPS_CB_FLAGS**.
|
* **TCP_BPF_RTO_MIN**.
|
||||||
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
|
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
|
||||||
* * **IPPROTO_IPV6**, which supports the following *optname*\ s:
|
* * **IPPROTO_IPV6**, which supports the following *optname*\ s:
|
||||||
* **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
|
* **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
|
||||||
@@ -3097,8 +2869,8 @@ union bpf_attr {
|
|||||||
* *bpf_socket* should be one of the following:
|
* *bpf_socket* should be one of the following:
|
||||||
*
|
*
|
||||||
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
|
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
|
||||||
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
|
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
|
||||||
* **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
|
* and **BPF_CGROUP_INET6_CONNECT**.
|
||||||
*
|
*
|
||||||
* This helper actually implements a subset of **getsockopt()**.
|
* This helper actually implements a subset of **getsockopt()**.
|
||||||
* It supports the same set of *optname*\ s that is supported by
|
* It supports the same set of *optname*\ s that is supported by
|
||||||
@@ -3126,6 +2898,10 @@ union bpf_attr {
|
|||||||
* with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
|
* with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
|
||||||
* option, and in this case it only works on functions tagged with
|
* option, and in this case it only works on functions tagged with
|
||||||
* **ALLOW_ERROR_INJECTION** in the kernel code.
|
* **ALLOW_ERROR_INJECTION** in the kernel code.
|
||||||
|
*
|
||||||
|
* Also, the helper is only available for the architectures having
|
||||||
|
* the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
|
||||||
|
* x86 architecture is the only one to support this feature.
|
||||||
* Return
|
* Return
|
||||||
* 0
|
* 0
|
||||||
*
|
*
|
||||||
@@ -3414,15 +3190,6 @@ union bpf_attr {
|
|||||||
* and *params*->smac will not be set as output. A common
|
* and *params*->smac will not be set as output. A common
|
||||||
* use case is to call **bpf_redirect_neigh**\ () after
|
* use case is to call **bpf_redirect_neigh**\ () after
|
||||||
* doing **bpf_fib_lookup**\ ().
|
* doing **bpf_fib_lookup**\ ().
|
||||||
* **BPF_FIB_LOOKUP_SRC**
|
|
||||||
* Derive and set source IP addr in *params*->ipv{4,6}_src
|
|
||||||
* for the nexthop. If the src addr cannot be derived,
|
|
||||||
* **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
|
|
||||||
* case, *params*->dmac and *params*->smac are not set either.
|
|
||||||
* **BPF_FIB_LOOKUP_MARK**
|
|
||||||
* Use the mark present in *params*->mark for the fib lookup.
|
|
||||||
* This option should not be used with BPF_FIB_LOOKUP_DIRECT,
|
|
||||||
* as it only has meaning for full lookups.
|
|
||||||
*
|
*
|
||||||
* *ctx* is either **struct xdp_md** for XDP programs or
|
* *ctx* is either **struct xdp_md** for XDP programs or
|
||||||
* **struct sk_buff** tc cls_act programs.
|
* **struct sk_buff** tc cls_act programs.
|
||||||
@@ -4392,6 +4159,9 @@ union bpf_attr {
|
|||||||
* **-EOPNOTSUPP** if the operation is not supported, for example
|
* **-EOPNOTSUPP** if the operation is not supported, for example
|
||||||
* a call from outside of TC ingress.
|
* a call from outside of TC ingress.
|
||||||
*
|
*
|
||||||
|
* **-ESOCKTNOSUPPORT** if the socket type is not supported
|
||||||
|
* (reuseport).
|
||||||
|
*
|
||||||
* long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
|
* long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
* Helper is overloaded depending on BPF program type. This
|
* Helper is overloaded depending on BPF program type. This
|
||||||
@@ -4656,8 +4426,6 @@ union bpf_attr {
|
|||||||
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
|
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
* Return a user or a kernel stack in bpf program provided buffer.
|
* Return a user or a kernel stack in bpf program provided buffer.
|
||||||
* Note: the user stack will only be populated if the *task* is
|
|
||||||
* the current task; all other tasks will return -EOPNOTSUPP.
|
|
||||||
* To achieve this, the helper needs *task*, which is a valid
|
* To achieve this, the helper needs *task*, which is a valid
|
||||||
* pointer to **struct task_struct**. To store the stacktrace, the
|
* pointer to **struct task_struct**. To store the stacktrace, the
|
||||||
* bpf program provides *buf* with a nonnegative *size*.
|
* bpf program provides *buf* with a nonnegative *size*.
|
||||||
@@ -4669,7 +4437,6 @@ union bpf_attr {
|
|||||||
*
|
*
|
||||||
* **BPF_F_USER_STACK**
|
* **BPF_F_USER_STACK**
|
||||||
* Collect a user space stack instead of a kernel stack.
|
* Collect a user space stack instead of a kernel stack.
|
||||||
* The *task* must be the current task.
|
|
||||||
* **BPF_F_USER_BUILD_ID**
|
* **BPF_F_USER_BUILD_ID**
|
||||||
* Collect buildid+offset instead of ips for user stack,
|
* Collect buildid+offset instead of ips for user stack,
|
||||||
* only valid if **BPF_F_USER_STACK** is also specified.
|
* only valid if **BPF_F_USER_STACK** is also specified.
|
||||||
@@ -4973,9 +4740,9 @@ union bpf_attr {
|
|||||||
* going through the CPU's backlog queue.
|
* going through the CPU's backlog queue.
|
||||||
*
|
*
|
||||||
* The *flags* argument is reserved and must be 0. The helper is
|
* The *flags* argument is reserved and must be 0. The helper is
|
||||||
* currently only supported for tc BPF program types at the
|
* currently only supported for tc BPF program types at the ingress
|
||||||
* ingress hook and for veth and netkit target device types. The
|
* hook and for veth device types. The peer device must reside in a
|
||||||
* peer device must reside in a different network namespace.
|
* different network namespace.
|
||||||
* Return
|
* Return
|
||||||
* The helper returns **TC_ACT_REDIRECT** on success or
|
* The helper returns **TC_ACT_REDIRECT** on success or
|
||||||
* **TC_ACT_SHOT** on error.
|
* **TC_ACT_SHOT** on error.
|
||||||
@@ -5051,7 +4818,7 @@ union bpf_attr {
|
|||||||
* bytes will be copied to *dst*
|
* bytes will be copied to *dst*
|
||||||
* Return
|
* Return
|
||||||
* The **hash_algo** is returned on success,
|
* The **hash_algo** is returned on success,
|
||||||
* **-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if
|
* **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
|
||||||
* invalid arguments are passed.
|
* invalid arguments are passed.
|
||||||
*
|
*
|
||||||
* struct socket *bpf_sock_from_file(struct file *file)
|
* struct socket *bpf_sock_from_file(struct file *file)
|
||||||
@@ -5258,8 +5025,6 @@ union bpf_attr {
|
|||||||
* **BPF_F_TIMER_ABS**
|
* **BPF_F_TIMER_ABS**
|
||||||
* Start the timer in absolute expire value instead of the
|
* Start the timer in absolute expire value instead of the
|
||||||
* default relative one.
|
* default relative one.
|
||||||
* **BPF_F_TIMER_CPU_PIN**
|
|
||||||
* Timer will be pinned to the CPU of the caller.
|
|
||||||
*
|
*
|
||||||
* Return
|
* Return
|
||||||
* 0 on success.
|
* 0 on success.
|
||||||
@@ -5279,14 +5044,9 @@ union bpf_attr {
|
|||||||
* u64 bpf_get_func_ip(void *ctx)
|
* u64 bpf_get_func_ip(void *ctx)
|
||||||
* Description
|
* Description
|
||||||
* Get address of the traced function (for tracing and kprobe programs).
|
* Get address of the traced function (for tracing and kprobe programs).
|
||||||
*
|
|
||||||
* When called for kprobe program attached as uprobe it returns
|
|
||||||
* probe address for both entry and return uprobe.
|
|
||||||
*
|
|
||||||
* Return
|
* Return
|
||||||
* Address of the traced function for kprobe.
|
* Address of the traced function.
|
||||||
* 0 for kprobes placed within the function (not at the entry).
|
* 0 for kprobes placed within the function (not at the entry).
|
||||||
* Address of the probe for uprobe and return uprobe.
|
|
||||||
*
|
*
|
||||||
* u64 bpf_get_attach_cookie(void *ctx)
|
* u64 bpf_get_attach_cookie(void *ctx)
|
||||||
* Description
|
* Description
|
||||||
@@ -5390,7 +5150,7 @@ union bpf_attr {
|
|||||||
* Currently, the **flags** must be 0. Currently, nr_loops is
|
* Currently, the **flags** must be 0. Currently, nr_loops is
|
||||||
* limited to 1 << 23 (~8 million) loops.
|
* limited to 1 << 23 (~8 million) loops.
|
||||||
*
|
*
|
||||||
* long (\*callback_fn)(u64 index, void \*ctx);
|
* long (\*callback_fn)(u32 index, void \*ctx);
|
||||||
*
|
*
|
||||||
* where **index** is the current index in the loop. The index
|
* where **index** is the current index in the loop. The index
|
||||||
* is zero-indexed.
|
* is zero-indexed.
|
||||||
@@ -5537,15 +5297,14 @@ union bpf_attr {
|
|||||||
* bytes will be copied to *dst*
|
* bytes will be copied to *dst*
|
||||||
* Return
|
* Return
|
||||||
* The **hash_algo** is returned on success,
|
* The **hash_algo** is returned on success,
|
||||||
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
|
* **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
|
||||||
* invalid arguments are passed.
|
* invalid arguments are passed.
|
||||||
*
|
*
|
||||||
* void *bpf_kptr_xchg(void *dst, void *ptr)
|
* void *bpf_kptr_xchg(void *map_value, void *ptr)
|
||||||
* Description
|
* Description
|
||||||
* Exchange kptr at pointer *dst* with *ptr*, and return the old value.
|
* Exchange kptr at pointer *map_value* with *ptr*, and return the
|
||||||
* *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
|
* old value. *ptr* can be NULL, otherwise it must be a referenced
|
||||||
* it must be a referenced pointer which will be released when this helper
|
* pointer which will be released when this helper is called.
|
||||||
* is called.
|
|
||||||
* Return
|
* Return
|
||||||
* The old value of kptr (which can be NULL). The returned pointer
|
* The old value of kptr (which can be NULL). The returned pointer
|
||||||
* if not NULL, is a reference which must be released using its
|
* if not NULL, is a reference which must be released using its
|
||||||
@@ -6028,10 +5787,7 @@ union bpf_attr {
|
|||||||
FN(user_ringbuf_drain, 209, ##ctx) \
|
FN(user_ringbuf_drain, 209, ##ctx) \
|
||||||
FN(cgrp_storage_get, 210, ##ctx) \
|
FN(cgrp_storage_get, 210, ##ctx) \
|
||||||
FN(cgrp_storage_delete, 211, ##ctx) \
|
FN(cgrp_storage_delete, 211, ##ctx) \
|
||||||
/* This helper list is effectively frozen. If you are trying to \
|
/* */
|
||||||
* add a new helper, you should add a kfunc instead which has \
|
|
||||||
* less stability guarantees. See Documentation/bpf/kfuncs.rst \
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
|
/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
|
||||||
* know or care about integer value that is now passed as second argument
|
* know or care about integer value that is now passed as second argument
|
||||||
@@ -6071,6 +5827,11 @@ enum {
|
|||||||
BPF_F_MARK_ENFORCE = (1ULL << 6),
|
BPF_F_MARK_ENFORCE = (1ULL << 6),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
|
||||||
|
enum {
|
||||||
|
BPF_F_INGRESS = (1ULL << 0),
|
||||||
|
};
|
||||||
|
|
||||||
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
|
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
|
||||||
enum {
|
enum {
|
||||||
BPF_F_TUNINFO_IPV6 = (1ULL << 0),
|
BPF_F_TUNINFO_IPV6 = (1ULL << 0),
|
||||||
@@ -6217,12 +5978,10 @@ enum {
|
|||||||
BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
|
BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Flags for bpf_redirect and bpf_redirect_map helpers */
|
/* Flags for bpf_redirect_map helper */
|
||||||
enum {
|
enum {
|
||||||
BPF_F_INGRESS = (1ULL << 0), /* used for skb path */
|
BPF_F_BROADCAST = (1ULL << 3),
|
||||||
BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */
|
BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
|
||||||
BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */
|
|
||||||
#define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define __bpf_md_ptr(type, name) \
|
#define __bpf_md_ptr(type, name) \
|
||||||
@@ -6231,17 +5990,12 @@ union { \
|
|||||||
__u64 :64; \
|
__u64 :64; \
|
||||||
} __attribute__((aligned(8)))
|
} __attribute__((aligned(8)))
|
||||||
|
|
||||||
/* The enum used in skb->tstamp_type. It specifies the clock type
|
|
||||||
* of the time stored in the skb->tstamp.
|
|
||||||
*/
|
|
||||||
enum {
|
enum {
|
||||||
BPF_SKB_TSTAMP_UNSPEC = 0, /* DEPRECATED */
|
BPF_SKB_TSTAMP_UNSPEC,
|
||||||
BPF_SKB_TSTAMP_DELIVERY_MONO = 1, /* DEPRECATED */
|
BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
|
||||||
BPF_SKB_CLOCK_REALTIME = 0,
|
/* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
|
||||||
BPF_SKB_CLOCK_MONOTONIC = 1,
|
* the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
|
||||||
BPF_SKB_CLOCK_TAI = 2,
|
* and try to deduce it by ingress, egress or skb->sk->sk_clockid.
|
||||||
/* For any future BPF_SKB_CLOCK_* that the bpf prog cannot handle,
|
|
||||||
* the bpf prog can try to deduce it by ingress/egress/skb->sk->sk_clockid.
|
|
||||||
*/
|
*/
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -6433,19 +6187,6 @@ struct bpf_sock_tuple {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
/* (Simplified) user return codes for tcx prog type.
|
|
||||||
* A valid tcx program must return one of these defined values. All other
|
|
||||||
* return codes are reserved for future use. Must remain compatible with
|
|
||||||
* their TC_ACT_* counter-parts. For compatibility in behavior, unknown
|
|
||||||
* return codes are mapped to TCX_NEXT.
|
|
||||||
*/
|
|
||||||
enum tcx_action_base {
|
|
||||||
TCX_NEXT = -1,
|
|
||||||
TCX_PASS = 0,
|
|
||||||
TCX_DROP = 2,
|
|
||||||
TCX_REDIRECT = 7,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct bpf_xdp_sock {
|
struct bpf_xdp_sock {
|
||||||
__u32 queue_id;
|
__u32 queue_id;
|
||||||
};
|
};
|
||||||
@@ -6627,7 +6368,7 @@ struct bpf_map_info {
|
|||||||
__u32 btf_id;
|
__u32 btf_id;
|
||||||
__u32 btf_key_type_id;
|
__u32 btf_key_type_id;
|
||||||
__u32 btf_value_type_id;
|
__u32 btf_value_type_id;
|
||||||
__u32 btf_vmlinux_id;
|
__u32 :32; /* alignment pad */
|
||||||
__u64 map_extra;
|
__u64 map_extra;
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
@@ -6698,68 +6439,6 @@ struct bpf_link_info {
|
|||||||
__s32 priority;
|
__s32 priority;
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
} netfilter;
|
} netfilter;
|
||||||
struct {
|
|
||||||
__aligned_u64 addrs;
|
|
||||||
__u32 count; /* in/out: kprobe_multi function count */
|
|
||||||
__u32 flags;
|
|
||||||
__u64 missed;
|
|
||||||
__aligned_u64 cookies;
|
|
||||||
} kprobe_multi;
|
|
||||||
struct {
|
|
||||||
__aligned_u64 path;
|
|
||||||
__aligned_u64 offsets;
|
|
||||||
__aligned_u64 ref_ctr_offsets;
|
|
||||||
__aligned_u64 cookies;
|
|
||||||
__u32 path_size; /* in/out: real path size on success, including zero byte */
|
|
||||||
__u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
|
|
||||||
__u32 flags;
|
|
||||||
__u32 pid;
|
|
||||||
} uprobe_multi;
|
|
||||||
struct {
|
|
||||||
__u32 type; /* enum bpf_perf_event_type */
|
|
||||||
__u32 :32;
|
|
||||||
union {
|
|
||||||
struct {
|
|
||||||
__aligned_u64 file_name; /* in/out */
|
|
||||||
__u32 name_len;
|
|
||||||
__u32 offset; /* offset from file_name */
|
|
||||||
__u64 cookie;
|
|
||||||
__u64 ref_ctr_offset;
|
|
||||||
} uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
|
|
||||||
struct {
|
|
||||||
__aligned_u64 func_name; /* in/out */
|
|
||||||
__u32 name_len;
|
|
||||||
__u32 offset; /* offset from func_name */
|
|
||||||
__u64 addr;
|
|
||||||
__u64 missed;
|
|
||||||
__u64 cookie;
|
|
||||||
} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
|
|
||||||
struct {
|
|
||||||
__aligned_u64 tp_name; /* in/out */
|
|
||||||
__u32 name_len;
|
|
||||||
__u32 :32;
|
|
||||||
__u64 cookie;
|
|
||||||
} tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
|
|
||||||
struct {
|
|
||||||
__u64 config;
|
|
||||||
__u32 type;
|
|
||||||
__u32 :32;
|
|
||||||
__u64 cookie;
|
|
||||||
} event; /* BPF_PERF_EVENT_EVENT */
|
|
||||||
};
|
|
||||||
} perf_event;
|
|
||||||
struct {
|
|
||||||
__u32 ifindex;
|
|
||||||
__u32 attach_type;
|
|
||||||
} tcx;
|
|
||||||
struct {
|
|
||||||
__u32 ifindex;
|
|
||||||
__u32 attach_type;
|
|
||||||
} netkit;
|
|
||||||
struct {
|
|
||||||
__u32 map_id;
|
|
||||||
__u32 attach_type;
|
|
||||||
} sockmap;
|
|
||||||
};
|
};
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
@@ -6926,12 +6605,6 @@ enum {
|
|||||||
BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F,
|
BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
|
||||||
SK_BPF_CB_TX_TIMESTAMPING = 1<<0,
|
|
||||||
SK_BPF_CB_MASK = (SK_BPF_CB_TX_TIMESTAMPING - 1) |
|
|
||||||
SK_BPF_CB_TX_TIMESTAMPING
|
|
||||||
};
|
|
||||||
|
|
||||||
/* List of known BPF sock_ops operators.
|
/* List of known BPF sock_ops operators.
|
||||||
* New entries can only be added at the end
|
* New entries can only be added at the end
|
||||||
*/
|
*/
|
||||||
@@ -6984,8 +6657,6 @@ enum {
|
|||||||
* socket transition to LISTEN state.
|
* socket transition to LISTEN state.
|
||||||
*/
|
*/
|
||||||
BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
|
BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
|
||||||
* Arg1: measured RTT input (mrtt)
|
|
||||||
* Arg2: updated srtt
|
|
||||||
*/
|
*/
|
||||||
BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
|
BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
|
||||||
* It will be called to handle
|
* It will be called to handle
|
||||||
@@ -7044,29 +6715,6 @@ enum {
|
|||||||
* by the kernel or the
|
* by the kernel or the
|
||||||
* earlier bpf-progs.
|
* earlier bpf-progs.
|
||||||
*/
|
*/
|
||||||
BPF_SOCK_OPS_TSTAMP_SCHED_CB, /* Called when skb is passing
|
|
||||||
* through dev layer when
|
|
||||||
* SK_BPF_CB_TX_TIMESTAMPING
|
|
||||||
* feature is on.
|
|
||||||
*/
|
|
||||||
BPF_SOCK_OPS_TSTAMP_SND_SW_CB, /* Called when skb is about to send
|
|
||||||
* to the nic when SK_BPF_CB_TX_TIMESTAMPING
|
|
||||||
* feature is on.
|
|
||||||
*/
|
|
||||||
BPF_SOCK_OPS_TSTAMP_SND_HW_CB, /* Called in hardware phase when
|
|
||||||
* SK_BPF_CB_TX_TIMESTAMPING feature
|
|
||||||
* is on.
|
|
||||||
*/
|
|
||||||
BPF_SOCK_OPS_TSTAMP_ACK_CB, /* Called when all the skbs in the
|
|
||||||
* same sendmsg call are acked
|
|
||||||
* when SK_BPF_CB_TX_TIMESTAMPING
|
|
||||||
* feature is on.
|
|
||||||
*/
|
|
||||||
BPF_SOCK_OPS_TSTAMP_SENDMSG_CB, /* Called when every sendmsg syscall
|
|
||||||
* is triggered. It's used to correlate
|
|
||||||
* sendmsg timestamp with corresponding
|
|
||||||
* tskey.
|
|
||||||
*/
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
|
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
|
||||||
@@ -7087,7 +6735,6 @@ enum {
|
|||||||
BPF_TCP_LISTEN,
|
BPF_TCP_LISTEN,
|
||||||
BPF_TCP_CLOSING, /* Now a valid state */
|
BPF_TCP_CLOSING, /* Now a valid state */
|
||||||
BPF_TCP_NEW_SYN_RECV,
|
BPF_TCP_NEW_SYN_RECV,
|
||||||
BPF_TCP_BOUND_INACTIVE,
|
|
||||||
|
|
||||||
BPF_TCP_MAX_STATES /* Leave at the end! */
|
BPF_TCP_MAX_STATES /* Leave at the end! */
|
||||||
};
|
};
|
||||||
@@ -7132,8 +6779,6 @@ enum {
|
|||||||
TCP_BPF_SYN = 1005, /* Copy the TCP header */
|
TCP_BPF_SYN = 1005, /* Copy the TCP header */
|
||||||
TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
|
TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
|
||||||
TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
|
TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
|
||||||
TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */
|
|
||||||
SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@@ -7192,8 +6837,6 @@ enum {
|
|||||||
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
|
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
|
||||||
BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
|
BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
|
||||||
BPF_FIB_LOOKUP_TBID = (1U << 3),
|
BPF_FIB_LOOKUP_TBID = (1U << 3),
|
||||||
BPF_FIB_LOOKUP_SRC = (1U << 4),
|
|
||||||
BPF_FIB_LOOKUP_MARK = (1U << 5),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@@ -7206,7 +6849,6 @@ enum {
|
|||||||
BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
|
BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
|
||||||
BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
|
BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
|
||||||
BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
|
BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
|
||||||
BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bpf_fib_lookup {
|
struct bpf_fib_lookup {
|
||||||
@@ -7226,7 +6868,7 @@ struct bpf_fib_lookup {
|
|||||||
|
|
||||||
/* output: MTU value */
|
/* output: MTU value */
|
||||||
__u16 mtu_result;
|
__u16 mtu_result;
|
||||||
} __attribute__((packed, aligned(2)));
|
};
|
||||||
/* input: L3 device index for lookup
|
/* input: L3 device index for lookup
|
||||||
* output: device index from FIB lookup
|
* output: device index from FIB lookup
|
||||||
*/
|
*/
|
||||||
@@ -7241,9 +6883,6 @@ struct bpf_fib_lookup {
|
|||||||
__u32 rt_metric;
|
__u32 rt_metric;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* input: source address to consider for lookup
|
|
||||||
* output: source address result from lookup
|
|
||||||
*/
|
|
||||||
union {
|
union {
|
||||||
__be32 ipv4_src;
|
__be32 ipv4_src;
|
||||||
__u32 ipv6_src[4]; /* in6_addr; network order */
|
__u32 ipv6_src[4]; /* in6_addr; network order */
|
||||||
@@ -7271,19 +6910,8 @@ struct bpf_fib_lookup {
|
|||||||
__u32 tbid;
|
__u32 tbid;
|
||||||
};
|
};
|
||||||
|
|
||||||
union {
|
__u8 smac[6]; /* ETH_ALEN */
|
||||||
/* input */
|
__u8 dmac[6]; /* ETH_ALEN */
|
||||||
struct {
|
|
||||||
__u32 mark; /* policy routing */
|
|
||||||
/* 2 4-byte holes for input */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* output: source and dest mac */
|
|
||||||
struct {
|
|
||||||
__u8 smac[6]; /* ETH_ALEN */
|
|
||||||
__u8 dmac[6]; /* ETH_ALEN */
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bpf_redir_neigh {
|
struct bpf_redir_neigh {
|
||||||
@@ -7367,35 +6995,38 @@ struct bpf_spin_lock {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct bpf_timer {
|
struct bpf_timer {
|
||||||
__u64 __opaque[2];
|
__u64 :64;
|
||||||
} __attribute__((aligned(8)));
|
__u64 :64;
|
||||||
|
|
||||||
struct bpf_wq {
|
|
||||||
__u64 __opaque[2];
|
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
struct bpf_dynptr {
|
struct bpf_dynptr {
|
||||||
__u64 __opaque[2];
|
__u64 :64;
|
||||||
|
__u64 :64;
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
struct bpf_list_head {
|
struct bpf_list_head {
|
||||||
__u64 __opaque[2];
|
__u64 :64;
|
||||||
|
__u64 :64;
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
struct bpf_list_node {
|
struct bpf_list_node {
|
||||||
__u64 __opaque[3];
|
__u64 :64;
|
||||||
|
__u64 :64;
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
struct bpf_rb_root {
|
struct bpf_rb_root {
|
||||||
__u64 __opaque[2];
|
__u64 :64;
|
||||||
|
__u64 :64;
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
struct bpf_rb_node {
|
struct bpf_rb_node {
|
||||||
__u64 __opaque[4];
|
__u64 :64;
|
||||||
|
__u64 :64;
|
||||||
|
__u64 :64;
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
struct bpf_refcount {
|
struct bpf_refcount {
|
||||||
__u32 __opaque[1];
|
__u32 :32;
|
||||||
} __attribute__((aligned(4)));
|
} __attribute__((aligned(4)));
|
||||||
|
|
||||||
struct bpf_sysctl {
|
struct bpf_sysctl {
|
||||||
@@ -7551,11 +7182,9 @@ struct bpf_core_relo {
|
|||||||
* Flags to control bpf_timer_start() behaviour.
|
* Flags to control bpf_timer_start() behaviour.
|
||||||
* - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
|
* - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
|
||||||
* relative to current time.
|
* relative to current time.
|
||||||
* - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
|
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
BPF_F_TIMER_ABS = (1ULL << 0),
|
BPF_F_TIMER_ABS = (1ULL << 0),
|
||||||
BPF_F_TIMER_CPU_PIN = (1ULL << 1),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* BPF numbers iterator state */
|
/* BPF numbers iterator state */
|
||||||
@@ -7566,13 +7195,4 @@ struct bpf_iter_num {
|
|||||||
__u64 __opaque[1];
|
__u64 __opaque[1];
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
/*
|
|
||||||
* Flags to control BPF kfunc behaviour.
|
|
||||||
* - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective
|
|
||||||
* helper documentation for details.)
|
|
||||||
*/
|
|
||||||
enum bpf_kfunc_flags {
|
|
||||||
BPF_F_PAD_ZEROS = (1ULL << 0),
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* _UAPI__LINUX_BPF_H__ */
|
#endif /* _UAPI__LINUX_BPF_H__ */
|
||||||
|
|||||||
@@ -36,8 +36,7 @@ struct btf_type {
|
|||||||
* bits 24-28: kind (e.g. int, ptr, array...etc)
|
* bits 24-28: kind (e.g. int, ptr, array...etc)
|
||||||
* bits 29-30: unused
|
* bits 29-30: unused
|
||||||
* bit 31: kind_flag, currently used by
|
* bit 31: kind_flag, currently used by
|
||||||
* struct, union, enum, fwd, enum64,
|
* struct, union, enum, fwd and enum64
|
||||||
* decl_tag and type_tag
|
|
||||||
*/
|
*/
|
||||||
__u32 info;
|
__u32 info;
|
||||||
/* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64.
|
/* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64.
|
||||||
|
|||||||
115
include/uapi/linux/fcntl.h
Normal file
115
include/uapi/linux/fcntl.h
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||||
|
#ifndef _UAPI_LINUX_FCNTL_H
|
||||||
|
#define _UAPI_LINUX_FCNTL_H
|
||||||
|
|
||||||
|
#include <asm/fcntl.h>
|
||||||
|
#include <linux/openat2.h>
|
||||||
|
|
||||||
|
#define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0)
|
||||||
|
#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Cancel a blocking posix lock; internal use only until we expose an
|
||||||
|
* asynchronous lock api to userspace:
|
||||||
|
*/
|
||||||
|
#define F_CANCELLK (F_LINUX_SPECIFIC_BASE + 5)
|
||||||
|
|
||||||
|
/* Create a file descriptor with FD_CLOEXEC set. */
|
||||||
|
#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Request nofications on a directory.
|
||||||
|
* See below for events that may be notified.
|
||||||
|
*/
|
||||||
|
#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set and get of pipe page size array
|
||||||
|
*/
|
||||||
|
#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
|
||||||
|
#define F_GETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 8)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set/Get seals
|
||||||
|
*/
|
||||||
|
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
|
||||||
|
#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Types of seals
|
||||||
|
*/
|
||||||
|
#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
|
||||||
|
#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
|
||||||
|
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
|
||||||
|
#define F_SEAL_WRITE 0x0008 /* prevent writes */
|
||||||
|
#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
|
||||||
|
#define F_SEAL_EXEC 0x0020 /* prevent chmod modifying exec bits */
|
||||||
|
/* (1U << 31) is reserved for signed error codes */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set/Get write life time hints. {GET,SET}_RW_HINT operate on the
|
||||||
|
* underlying inode, while {GET,SET}_FILE_RW_HINT operate only on
|
||||||
|
* the specific file.
|
||||||
|
*/
|
||||||
|
#define F_GET_RW_HINT (F_LINUX_SPECIFIC_BASE + 11)
|
||||||
|
#define F_SET_RW_HINT (F_LINUX_SPECIFIC_BASE + 12)
|
||||||
|
#define F_GET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 13)
|
||||||
|
#define F_SET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 14)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be
|
||||||
|
* used to clear any hints previously set.
|
||||||
|
*/
|
||||||
|
#define RWH_WRITE_LIFE_NOT_SET 0
|
||||||
|
#define RWH_WRITE_LIFE_NONE 1
|
||||||
|
#define RWH_WRITE_LIFE_SHORT 2
|
||||||
|
#define RWH_WRITE_LIFE_MEDIUM 3
|
||||||
|
#define RWH_WRITE_LIFE_LONG 4
|
||||||
|
#define RWH_WRITE_LIFE_EXTREME 5
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The originally introduced spelling is remained from the first
|
||||||
|
* versions of the patch set that introduced the feature, see commit
|
||||||
|
* v4.13-rc1~212^2~51.
|
||||||
|
*/
|
||||||
|
#define RWF_WRITE_LIFE_NOT_SET RWH_WRITE_LIFE_NOT_SET
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Types of directory notifications that may be requested.
|
||||||
|
*/
|
||||||
|
#define DN_ACCESS 0x00000001 /* File accessed */
|
||||||
|
#define DN_MODIFY 0x00000002 /* File modified */
|
||||||
|
#define DN_CREATE 0x00000004 /* File created */
|
||||||
|
#define DN_DELETE 0x00000008 /* File removed */
|
||||||
|
#define DN_RENAME 0x00000010 /* File renamed */
|
||||||
|
#define DN_ATTRIB 0x00000020 /* File changed attibutes */
|
||||||
|
#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The constants AT_REMOVEDIR and AT_EACCESS have the same value. AT_EACCESS is
|
||||||
|
* meaningful only to faccessat, while AT_REMOVEDIR is meaningful only to
|
||||||
|
* unlinkat. The two functions do completely different things and therefore,
|
||||||
|
* the flags can be allowed to overlap. For example, passing AT_REMOVEDIR to
|
||||||
|
* faccessat would be undefined behavior and thus treating it equivalent to
|
||||||
|
* AT_EACCESS is valid undefined behavior.
|
||||||
|
*/
|
||||||
|
#define AT_FDCWD -100 /* Special value used to indicate
|
||||||
|
openat should use the current
|
||||||
|
working directory. */
|
||||||
|
#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
|
||||||
|
#define AT_EACCESS 0x200 /* Test access permitted for
|
||||||
|
effective IDs, not real IDs. */
|
||||||
|
#define AT_REMOVEDIR 0x200 /* Remove directory instead of
|
||||||
|
unlinking file. */
|
||||||
|
#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
|
||||||
|
#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */
|
||||||
|
#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
|
||||||
|
|
||||||
|
#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */
|
||||||
|
#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */
|
||||||
|
#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */
|
||||||
|
#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */
|
||||||
|
|
||||||
|
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
|
||||||
|
|
||||||
|
#endif /* _UAPI_LINUX_FCNTL_H */
|
||||||
@@ -211,9 +211,6 @@ struct rtnl_link_stats {
|
|||||||
* @rx_nohandler: Number of packets received on the interface
|
* @rx_nohandler: Number of packets received on the interface
|
||||||
* but dropped by the networking stack because the device is
|
* but dropped by the networking stack because the device is
|
||||||
* not designated to receive packets (e.g. backup link in a bond).
|
* not designated to receive packets (e.g. backup link in a bond).
|
||||||
*
|
|
||||||
* @rx_otherhost_dropped: Number of packets dropped due to mismatch
|
|
||||||
* in destination MAC address.
|
|
||||||
*/
|
*/
|
||||||
struct rtnl_link_stats64 {
|
struct rtnl_link_stats64 {
|
||||||
__u64 rx_packets;
|
__u64 rx_packets;
|
||||||
@@ -246,23 +243,6 @@ struct rtnl_link_stats64 {
|
|||||||
__u64 rx_compressed;
|
__u64 rx_compressed;
|
||||||
__u64 tx_compressed;
|
__u64 tx_compressed;
|
||||||
__u64 rx_nohandler;
|
__u64 rx_nohandler;
|
||||||
|
|
||||||
__u64 rx_otherhost_dropped;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Subset of link stats useful for in-HW collection. Meaning of the fields is as
|
|
||||||
* for struct rtnl_link_stats64.
|
|
||||||
*/
|
|
||||||
struct rtnl_hw_stats64 {
|
|
||||||
__u64 rx_packets;
|
|
||||||
__u64 tx_packets;
|
|
||||||
__u64 rx_bytes;
|
|
||||||
__u64 tx_bytes;
|
|
||||||
__u64 rx_errors;
|
|
||||||
__u64 tx_errors;
|
|
||||||
__u64 rx_dropped;
|
|
||||||
__u64 tx_dropped;
|
|
||||||
__u64 multicast;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* The struct should be in sync with struct ifmap */
|
/* The struct should be in sync with struct ifmap */
|
||||||
@@ -370,14 +350,7 @@ enum {
|
|||||||
IFLA_GRO_MAX_SIZE,
|
IFLA_GRO_MAX_SIZE,
|
||||||
IFLA_TSO_MAX_SIZE,
|
IFLA_TSO_MAX_SIZE,
|
||||||
IFLA_TSO_MAX_SEGS,
|
IFLA_TSO_MAX_SEGS,
|
||||||
IFLA_ALLMULTI, /* Allmulti count: > 0 means acts ALLMULTI */
|
|
||||||
|
|
||||||
IFLA_DEVLINK_PORT,
|
|
||||||
|
|
||||||
IFLA_GSO_IPV4_MAX_SIZE,
|
|
||||||
IFLA_GRO_IPV4_MAX_SIZE,
|
|
||||||
IFLA_DPLL_PIN,
|
|
||||||
IFLA_MAX_PACING_OFFLOAD_HORIZON,
|
|
||||||
__IFLA_MAX
|
__IFLA_MAX
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -462,286 +435,6 @@ enum in6_addr_gen_mode {
|
|||||||
|
|
||||||
/* Bridge section */
|
/* Bridge section */
|
||||||
|
|
||||||
/**
|
|
||||||
* DOC: Bridge enum definition
|
|
||||||
*
|
|
||||||
* Please *note* that the timer values in the following section are expected
|
|
||||||
* in clock_t format, which is seconds multiplied by USER_HZ (generally
|
|
||||||
* defined as 100).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_FORWARD_DELAY
|
|
||||||
* The bridge forwarding delay is the time spent in LISTENING state
|
|
||||||
* (before moving to LEARNING) and in LEARNING state (before moving
|
|
||||||
* to FORWARDING). Only relevant if STP is enabled.
|
|
||||||
*
|
|
||||||
* The valid values are between (2 * USER_HZ) and (30 * USER_HZ).
|
|
||||||
* The default value is (15 * USER_HZ).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_HELLO_TIME
|
|
||||||
* The time between hello packets sent by the bridge, when it is a root
|
|
||||||
* bridge or a designated bridge. Only relevant if STP is enabled.
|
|
||||||
*
|
|
||||||
* The valid values are between (1 * USER_HZ) and (10 * USER_HZ).
|
|
||||||
* The default value is (2 * USER_HZ).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MAX_AGE
|
|
||||||
* The hello packet timeout is the time until another bridge in the
|
|
||||||
* spanning tree is assumed to be dead, after reception of its last hello
|
|
||||||
* message. Only relevant if STP is enabled.
|
|
||||||
*
|
|
||||||
* The valid values are between (6 * USER_HZ) and (40 * USER_HZ).
|
|
||||||
* The default value is (20 * USER_HZ).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_AGEING_TIME
|
|
||||||
* Configure the bridge's FDB entries aging time. It is the time a MAC
|
|
||||||
* address will be kept in the FDB after a packet has been received from
|
|
||||||
* that address. After this time has passed, entries are cleaned up.
|
|
||||||
* Allow values outside the 802.1 standard specification for special cases:
|
|
||||||
*
|
|
||||||
* * 0 - entry never ages (all permanent)
|
|
||||||
* * 1 - entry disappears (no persistence)
|
|
||||||
*
|
|
||||||
* The default value is (300 * USER_HZ).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_STP_STATE
|
|
||||||
* Turn spanning tree protocol on (*IFLA_BR_STP_STATE* > 0) or off
|
|
||||||
* (*IFLA_BR_STP_STATE* == 0) for this bridge.
|
|
||||||
*
|
|
||||||
* The default value is 0 (disabled).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_PRIORITY
|
|
||||||
* Set this bridge's spanning tree priority, used during STP root bridge
|
|
||||||
* election.
|
|
||||||
*
|
|
||||||
* The valid values are between 0 and 65535.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_VLAN_FILTERING
|
|
||||||
* Turn VLAN filtering on (*IFLA_BR_VLAN_FILTERING* > 0) or off
|
|
||||||
* (*IFLA_BR_VLAN_FILTERING* == 0). When disabled, the bridge will not
|
|
||||||
* consider the VLAN tag when handling packets.
|
|
||||||
*
|
|
||||||
* The default value is 0 (disabled).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_VLAN_PROTOCOL
|
|
||||||
* Set the protocol used for VLAN filtering.
|
|
||||||
*
|
|
||||||
* The valid values are 0x8100(802.1Q) or 0x88A8(802.1AD). The default value
|
|
||||||
* is 0x8100(802.1Q).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_GROUP_FWD_MASK
|
|
||||||
* The group forwarding mask. This is the bitmask that is applied to
|
|
||||||
* decide whether to forward incoming frames destined to link-local
|
|
||||||
* addresses (of the form 01:80:C2:00:00:0X).
|
|
||||||
*
|
|
||||||
* The default value is 0, which means the bridge does not forward any
|
|
||||||
* link-local frames coming on this port.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_ROOT_ID
|
|
||||||
* The bridge root id, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_BRIDGE_ID
|
|
||||||
* The bridge id, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_ROOT_PORT
|
|
||||||
* The bridge root port, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_ROOT_PATH_COST
|
|
||||||
* The bridge root path cost, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_TOPOLOGY_CHANGE
|
|
||||||
* The bridge topology change, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_TOPOLOGY_CHANGE_DETECTED
|
|
||||||
* The bridge topology change detected, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_HELLO_TIMER
|
|
||||||
* The bridge hello timer, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_TCN_TIMER
|
|
||||||
* The bridge tcn timer, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_TOPOLOGY_CHANGE_TIMER
|
|
||||||
* The bridge topology change timer, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_GC_TIMER
|
|
||||||
* The bridge gc timer, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_GROUP_ADDR
|
|
||||||
* Set the MAC address of the multicast group this bridge uses for STP.
|
|
||||||
* The address must be a link-local address in standard Ethernet MAC address
|
|
||||||
* format. It is an address of the form 01:80:C2:00:00:0X, with X in [0, 4..f].
|
|
||||||
*
|
|
||||||
* The default value is 0.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_FDB_FLUSH
|
|
||||||
* Flush bridge's fdb dynamic entries.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_ROUTER
|
|
||||||
* Set bridge's multicast router if IGMP snooping is enabled.
|
|
||||||
* The valid values are:
|
|
||||||
*
|
|
||||||
* * 0 - disabled.
|
|
||||||
* * 1 - automatic (queried).
|
|
||||||
* * 2 - permanently enabled.
|
|
||||||
*
|
|
||||||
* The default value is 1.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_SNOOPING
|
|
||||||
* Turn multicast snooping on (*IFLA_BR_MCAST_SNOOPING* > 0) or off
|
|
||||||
* (*IFLA_BR_MCAST_SNOOPING* == 0).
|
|
||||||
*
|
|
||||||
* The default value is 1.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_QUERY_USE_IFADDR
|
|
||||||
* If enabled use the bridge's own IP address as source address for IGMP
|
|
||||||
* queries (*IFLA_BR_MCAST_QUERY_USE_IFADDR* > 0) or the default of 0.0.0.0
|
|
||||||
* (*IFLA_BR_MCAST_QUERY_USE_IFADDR* == 0).
|
|
||||||
*
|
|
||||||
* The default value is 0 (disabled).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_QUERIER
|
|
||||||
* Enable (*IFLA_BR_MULTICAST_QUERIER* > 0) or disable
|
|
||||||
* (*IFLA_BR_MULTICAST_QUERIER* == 0) IGMP querier, ie sending of multicast
|
|
||||||
* queries by the bridge.
|
|
||||||
*
|
|
||||||
* The default value is 0 (disabled).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_HASH_ELASTICITY
|
|
||||||
* Set multicast database hash elasticity, It is the maximum chain length in
|
|
||||||
* the multicast hash table. This attribute is *deprecated* and the value
|
|
||||||
* is always 16.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_HASH_MAX
|
|
||||||
* Set maximum size of the multicast hash table
|
|
||||||
*
|
|
||||||
* The default value is 4096, the value must be a power of 2.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_LAST_MEMBER_CNT
|
|
||||||
* The Last Member Query Count is the number of Group-Specific Queries
|
|
||||||
* sent before the router assumes there are no local members. The Last
|
|
||||||
* Member Query Count is also the number of Group-and-Source-Specific
|
|
||||||
* Queries sent before the router assumes there are no listeners for a
|
|
||||||
* particular source.
|
|
||||||
*
|
|
||||||
* The default value is 2.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_STARTUP_QUERY_CNT
|
|
||||||
* The Startup Query Count is the number of Queries sent out on startup,
|
|
||||||
* separated by the Startup Query Interval.
|
|
||||||
*
|
|
||||||
* The default value is 2.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_LAST_MEMBER_INTVL
|
|
||||||
* The Last Member Query Interval is the Max Response Time inserted into
|
|
||||||
* Group-Specific Queries sent in response to Leave Group messages, and
|
|
||||||
* is also the amount of time between Group-Specific Query messages.
|
|
||||||
*
|
|
||||||
* The default value is (1 * USER_HZ).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_MEMBERSHIP_INTVL
|
|
||||||
* The interval after which the bridge will leave a group, if no membership
|
|
||||||
* reports for this group are received.
|
|
||||||
*
|
|
||||||
* The default value is (260 * USER_HZ).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_QUERIER_INTVL
|
|
||||||
* The interval between queries sent by other routers. if no queries are
|
|
||||||
* seen after this delay has passed, the bridge will start to send its own
|
|
||||||
* queries (as if *IFLA_BR_MCAST_QUERIER_INTVL* was enabled).
|
|
||||||
*
|
|
||||||
* The default value is (255 * USER_HZ).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_QUERY_INTVL
|
|
||||||
* The Query Interval is the interval between General Queries sent by
|
|
||||||
* the Querier.
|
|
||||||
*
|
|
||||||
* The default value is (125 * USER_HZ). The minimum value is (1 * USER_HZ).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
|
|
||||||
* The Max Response Time used to calculate the Max Resp Code inserted
|
|
||||||
* into the periodic General Queries.
|
|
||||||
*
|
|
||||||
* The default value is (10 * USER_HZ).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_STARTUP_QUERY_INTVL
|
|
||||||
* The interval between queries in the startup phase.
|
|
||||||
*
|
|
||||||
* The default value is (125 * USER_HZ) / 4. The minimum value is (1 * USER_HZ).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_NF_CALL_IPTABLES
|
|
||||||
* Enable (*NF_CALL_IPTABLES* > 0) or disable (*NF_CALL_IPTABLES* == 0)
|
|
||||||
* iptables hooks on the bridge.
|
|
||||||
*
|
|
||||||
* The default value is 0 (disabled).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_NF_CALL_IP6TABLES
|
|
||||||
* Enable (*NF_CALL_IP6TABLES* > 0) or disable (*NF_CALL_IP6TABLES* == 0)
|
|
||||||
* ip6tables hooks on the bridge.
|
|
||||||
*
|
|
||||||
* The default value is 0 (disabled).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_NF_CALL_ARPTABLES
|
|
||||||
* Enable (*NF_CALL_ARPTABLES* > 0) or disable (*NF_CALL_ARPTABLES* == 0)
|
|
||||||
* arptables hooks on the bridge.
|
|
||||||
*
|
|
||||||
* The default value is 0 (disabled).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_VLAN_DEFAULT_PVID
|
|
||||||
* VLAN ID applied to untagged and priority-tagged incoming packets.
|
|
||||||
*
|
|
||||||
* The default value is 1. Setting to the special value 0 makes all ports of
|
|
||||||
* this bridge not have a PVID by default, which means that they will
|
|
||||||
* not accept VLAN-untagged traffic.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_PAD
|
|
||||||
* Bridge attribute padding type for netlink message.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_VLAN_STATS_ENABLED
|
|
||||||
* Enable (*IFLA_BR_VLAN_STATS_ENABLED* == 1) or disable
|
|
||||||
* (*IFLA_BR_VLAN_STATS_ENABLED* == 0) per-VLAN stats accounting.
|
|
||||||
*
|
|
||||||
* The default value is 0 (disabled).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_STATS_ENABLED
|
|
||||||
* Enable (*IFLA_BR_MCAST_STATS_ENABLED* > 0) or disable
|
|
||||||
* (*IFLA_BR_MCAST_STATS_ENABLED* == 0) multicast (IGMP/MLD) stats
|
|
||||||
* accounting.
|
|
||||||
*
|
|
||||||
* The default value is 0 (disabled).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_IGMP_VERSION
|
|
||||||
* Set the IGMP version.
|
|
||||||
*
|
|
||||||
* The valid values are 2 and 3. The default value is 2.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_MLD_VERSION
|
|
||||||
* Set the MLD version.
|
|
||||||
*
|
|
||||||
* The valid values are 1 and 2. The default value is 1.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_VLAN_STATS_PER_PORT
|
|
||||||
* Enable (*IFLA_BR_VLAN_STATS_PER_PORT* == 1) or disable
|
|
||||||
* (*IFLA_BR_VLAN_STATS_PER_PORT* == 0) per-VLAN per-port stats accounting.
|
|
||||||
* Can be changed only when there are no port VLANs configured.
|
|
||||||
*
|
|
||||||
* The default value is 0 (disabled).
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MULTI_BOOLOPT
|
|
||||||
* The multi_boolopt is used to control new boolean options to avoid adding
|
|
||||||
* new netlink attributes. You can look at ``enum br_boolopt_id`` for those
|
|
||||||
* options.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_MCAST_QUERIER_STATE
|
|
||||||
* Bridge mcast querier states, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_FDB_N_LEARNED
|
|
||||||
* The number of dynamically learned FDB entries for the current bridge,
|
|
||||||
* read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BR_FDB_MAX_LEARNED
|
|
||||||
* Set the number of max dynamically learned FDB entries for the current
|
|
||||||
* bridge.
|
|
||||||
*/
|
|
||||||
enum {
|
enum {
|
||||||
IFLA_BR_UNSPEC,
|
IFLA_BR_UNSPEC,
|
||||||
IFLA_BR_FORWARD_DELAY,
|
IFLA_BR_FORWARD_DELAY,
|
||||||
@@ -791,8 +484,6 @@ enum {
|
|||||||
IFLA_BR_VLAN_STATS_PER_PORT,
|
IFLA_BR_VLAN_STATS_PER_PORT,
|
||||||
IFLA_BR_MULTI_BOOLOPT,
|
IFLA_BR_MULTI_BOOLOPT,
|
||||||
IFLA_BR_MCAST_QUERIER_STATE,
|
IFLA_BR_MCAST_QUERIER_STATE,
|
||||||
IFLA_BR_FDB_N_LEARNED,
|
|
||||||
IFLA_BR_FDB_MAX_LEARNED,
|
|
||||||
__IFLA_BR_MAX,
|
__IFLA_BR_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -803,252 +494,11 @@ struct ifla_bridge_id {
|
|||||||
__u8 addr[6]; /* ETH_ALEN */
|
__u8 addr[6]; /* ETH_ALEN */
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* DOC: Bridge mode enum definition
|
|
||||||
*
|
|
||||||
* @BRIDGE_MODE_HAIRPIN
|
|
||||||
* Controls whether traffic may be sent back out of the port on which it
|
|
||||||
* was received. This option is also called reflective relay mode, and is
|
|
||||||
* used to support basic VEPA (Virtual Ethernet Port Aggregator)
|
|
||||||
* capabilities. By default, this flag is turned off and the bridge will
|
|
||||||
* not forward traffic back out of the receiving port.
|
|
||||||
*/
|
|
||||||
enum {
|
enum {
|
||||||
BRIDGE_MODE_UNSPEC,
|
BRIDGE_MODE_UNSPEC,
|
||||||
BRIDGE_MODE_HAIRPIN,
|
BRIDGE_MODE_HAIRPIN,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* DOC: Bridge port enum definition
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_STATE
|
|
||||||
* The operation state of the port. Here are the valid values.
|
|
||||||
*
|
|
||||||
* * 0 - port is in STP *DISABLED* state. Make this port completely
|
|
||||||
* inactive for STP. This is also called BPDU filter and could be used
|
|
||||||
* to disable STP on an untrusted port, like a leaf virtual device.
|
|
||||||
* The traffic forwarding is also stopped on this port.
|
|
||||||
* * 1 - port is in STP *LISTENING* state. Only valid if STP is enabled
|
|
||||||
* on the bridge. In this state the port listens for STP BPDUs and
|
|
||||||
* drops all other traffic frames.
|
|
||||||
* * 2 - port is in STP *LEARNING* state. Only valid if STP is enabled on
|
|
||||||
* the bridge. In this state the port will accept traffic only for the
|
|
||||||
* purpose of updating MAC address tables.
|
|
||||||
* * 3 - port is in STP *FORWARDING* state. Port is fully active.
|
|
||||||
* * 4 - port is in STP *BLOCKING* state. Only valid if STP is enabled on
|
|
||||||
* the bridge. This state is used during the STP election process.
|
|
||||||
* In this state, port will only process STP BPDUs.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_PRIORITY
|
|
||||||
* The STP port priority. The valid values are between 0 and 255.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_COST
|
|
||||||
* The STP path cost of the port. The valid values are between 1 and 65535.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MODE
|
|
||||||
* Set the bridge port mode. See *BRIDGE_MODE_HAIRPIN* for more details.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_GUARD
|
|
||||||
* Controls whether STP BPDUs will be processed by the bridge port. By
|
|
||||||
* default, the flag is turned off to allow BPDU processing. Turning this
|
|
||||||
* flag on will disable the bridge port if a STP BPDU packet is received.
|
|
||||||
*
|
|
||||||
* If the bridge has Spanning Tree enabled, hostile devices on the network
|
|
||||||
* may send BPDU on a port and cause network failure. Setting *guard on*
|
|
||||||
* will detect and stop this by disabling the port. The port will be
|
|
||||||
* restarted if the link is brought down, or removed and reattached.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_PROTECT
|
|
||||||
* Controls whether a given port is allowed to become a root port or not.
|
|
||||||
* Only used when STP is enabled on the bridge. By default the flag is off.
|
|
||||||
*
|
|
||||||
* This feature is also called root port guard. If BPDU is received from a
|
|
||||||
* leaf (edge) port, it should not be elected as root port. This could
|
|
||||||
* be used if using STP on a bridge and the downstream bridges are not fully
|
|
||||||
* trusted; this prevents a hostile guest from rerouting traffic.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_FAST_LEAVE
|
|
||||||
* This flag allows the bridge to immediately stop multicast traffic
|
|
||||||
* forwarding on a port that receives an IGMP Leave message. It is only used
|
|
||||||
* when IGMP snooping is enabled on the bridge. By default the flag is off.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_LEARNING
|
|
||||||
* Controls whether a given port will learn *source* MAC addresses from
|
|
||||||
* received traffic or not. Also controls whether dynamic FDB entries
|
|
||||||
* (which can also be added by software) will be refreshed by incoming
|
|
||||||
* traffic. By default this flag is on.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_UNICAST_FLOOD
|
|
||||||
* Controls whether unicast traffic for which there is no FDB entry will
|
|
||||||
* be flooded towards this port. By default this flag is on.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_PROXYARP
|
|
||||||
* Enable proxy ARP on this port.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_LEARNING_SYNC
|
|
||||||
* Controls whether a given port will sync MAC addresses learned on device
|
|
||||||
* port to bridge FDB.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_PROXYARP_WIFI
|
|
||||||
* Enable proxy ARP on this port which meets extended requirements by
|
|
||||||
* IEEE 802.11 and Hotspot 2.0 specifications.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_ROOT_ID
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_BRIDGE_ID
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_DESIGNATED_PORT
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_DESIGNATED_COST
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_ID
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_NO
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_CONFIG_PENDING
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MESSAGE_AGE_TIMER
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_FORWARD_DELAY_TIMER
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_HOLD_TIMER
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_FLUSH
|
|
||||||
* Flush bridge ports' fdb dynamic entries.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MULTICAST_ROUTER
|
|
||||||
* Configure the port's multicast router presence. A port with
|
|
||||||
* a multicast router will receive all multicast traffic.
|
|
||||||
* The valid values are:
|
|
||||||
*
|
|
||||||
* * 0 disable multicast routers on this port
|
|
||||||
* * 1 let the system detect the presence of routers (default)
|
|
||||||
* * 2 permanently enable multicast traffic forwarding on this port
|
|
||||||
* * 3 enable multicast routers temporarily on this port, not depending
|
|
||||||
* on incoming queries.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_PAD
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MCAST_FLOOD
|
|
||||||
* Controls whether a given port will flood multicast traffic for which
|
|
||||||
* there is no MDB entry. By default this flag is on.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MCAST_TO_UCAST
|
|
||||||
* Controls whether a given port will replicate packets using unicast
|
|
||||||
* instead of multicast. By default this flag is off.
|
|
||||||
*
|
|
||||||
* This is done by copying the packet per host and changing the multicast
|
|
||||||
* destination MAC to a unicast one accordingly.
|
|
||||||
*
|
|
||||||
* *mcast_to_unicast* works on top of the multicast snooping feature of the
|
|
||||||
* bridge. Which means unicast copies are only delivered to hosts which
|
|
||||||
* are interested in unicast and signaled this via IGMP/MLD reports previously.
|
|
||||||
*
|
|
||||||
* This feature is intended for interface types which have a more reliable
|
|
||||||
* and/or efficient way to deliver unicast packets than broadcast ones
|
|
||||||
* (e.g. WiFi).
|
|
||||||
*
|
|
||||||
* However, it should only be enabled on interfaces where no IGMPv2/MLDv1
|
|
||||||
* report suppression takes place. IGMP/MLD report suppression issue is
|
|
||||||
* usually overcome by the network daemon (supplicant) enabling AP isolation
|
|
||||||
* and by that separating all STAs.
|
|
||||||
*
|
|
||||||
* Delivery of STA-to-STA IP multicast is made possible again by enabling
|
|
||||||
* and utilizing the bridge hairpin mode, which considers the incoming port
|
|
||||||
* as a potential outgoing port, too (see *BRIDGE_MODE_HAIRPIN* option).
|
|
||||||
* Hairpin mode is performed after multicast snooping, therefore leading
|
|
||||||
* to only deliver reports to STAs running a multicast router.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_VLAN_TUNNEL
|
|
||||||
* Controls whether vlan to tunnel mapping is enabled on the port.
|
|
||||||
* By default this flag is off.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_BCAST_FLOOD
|
|
||||||
* Controls flooding of broadcast traffic on the given port. By default
|
|
||||||
* this flag is on.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_GROUP_FWD_MASK
|
|
||||||
* Set the group forward mask. This is a bitmask that is applied to
|
|
||||||
* decide whether to forward incoming frames destined to link-local
|
|
||||||
* addresses. The addresses of the form are 01:80:C2:00:00:0X (defaults
|
|
||||||
* to 0, which means the bridge does not forward any link-local frames
|
|
||||||
* coming on this port).
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_NEIGH_SUPPRESS
|
|
||||||
* Controls whether neighbor discovery (arp and nd) proxy and suppression
|
|
||||||
* is enabled on the port. By default this flag is off.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_ISOLATED
|
|
||||||
* Controls whether a given port will be isolated, which means it will be
|
|
||||||
* able to communicate with non-isolated ports only. By default this
|
|
||||||
* flag is off.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_BACKUP_PORT
|
|
||||||
* Set a backup port. If the port loses carrier all traffic will be
|
|
||||||
* redirected to the configured backup port. Set the value to 0 to disable
|
|
||||||
* it.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MRP_RING_OPEN
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MRP_IN_OPEN
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT
|
|
||||||
* The number of per-port EHT hosts limit. The default value is 512.
|
|
||||||
* Setting to 0 is not allowed.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MCAST_EHT_HOSTS_CNT
|
|
||||||
* The current number of tracked hosts, read only.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_LOCKED
|
|
||||||
* Controls whether a port will be locked, meaning that hosts behind the
|
|
||||||
* port will not be able to communicate through the port unless an FDB
|
|
||||||
* entry with the unit's MAC address is in the FDB. The common use case is
|
|
||||||
* that hosts are allowed access through authentication with the IEEE 802.1X
|
|
||||||
* protocol or based on whitelists. By default this flag is off.
|
|
||||||
*
|
|
||||||
* Please note that secure 802.1X deployments should always use the
|
|
||||||
* *BR_BOOLOPT_NO_LL_LEARN* flag, to not permit the bridge to populate its
|
|
||||||
* FDB based on link-local (EAPOL) traffic received on the port.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MAB
|
|
||||||
* Controls whether a port will use MAC Authentication Bypass (MAB), a
|
|
||||||
* technique through which select MAC addresses may be allowed on a locked
|
|
||||||
* port, without using 802.1X authentication. Packets with an unknown source
|
|
||||||
* MAC address generates a "locked" FDB entry on the incoming bridge port.
|
|
||||||
* The common use case is for user space to react to these bridge FDB
|
|
||||||
* notifications and optionally replace the locked FDB entry with a normal
|
|
||||||
* one, allowing traffic to pass for whitelisted MAC addresses.
|
|
||||||
*
|
|
||||||
* Setting this flag also requires *IFLA_BRPORT_LOCKED* and
|
|
||||||
* *IFLA_BRPORT_LEARNING*. *IFLA_BRPORT_LOCKED* ensures that unauthorized
|
|
||||||
* data packets are dropped, and *IFLA_BRPORT_LEARNING* allows the dynamic
|
|
||||||
* FDB entries installed by user space (as replacements for the locked FDB
|
|
||||||
* entries) to be refreshed and/or aged out.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MCAST_N_GROUPS
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_MCAST_MAX_GROUPS
|
|
||||||
* Sets the maximum number of MDB entries that can be registered for a
|
|
||||||
* given port. Attempts to register more MDB entries at the port than this
|
|
||||||
* limit allows will be rejected, whether they are done through netlink
|
|
||||||
* (e.g. the bridge tool), or IGMP or MLD membership reports. Setting a
|
|
||||||
* limit of 0 disables the limit. The default value is 0.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_NEIGH_VLAN_SUPPRESS
|
|
||||||
* Controls whether neighbor discovery (arp and nd) proxy and suppression is
|
|
||||||
* enabled for a given port. By default this flag is off.
|
|
||||||
*
|
|
||||||
* Note that this option only takes effect when *IFLA_BRPORT_NEIGH_SUPPRESS*
|
|
||||||
* is enabled for a given port.
|
|
||||||
*
|
|
||||||
* @IFLA_BRPORT_BACKUP_NHID
|
|
||||||
* The FDB nexthop object ID to attach to packets being redirected to a
|
|
||||||
* backup port that has VLAN tunnel mapping enabled (via the
|
|
||||||
* *IFLA_BRPORT_VLAN_TUNNEL* option). Setting a value of 0 (default) has
|
|
||||||
* the effect of not attaching any ID.
|
|
||||||
*/
|
|
||||||
enum {
|
enum {
|
||||||
IFLA_BRPORT_UNSPEC,
|
IFLA_BRPORT_UNSPEC,
|
||||||
IFLA_BRPORT_STATE, /* Spanning tree state */
|
IFLA_BRPORT_STATE, /* Spanning tree state */
|
||||||
@@ -1089,12 +539,6 @@ enum {
|
|||||||
IFLA_BRPORT_MRP_IN_OPEN,
|
IFLA_BRPORT_MRP_IN_OPEN,
|
||||||
IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
|
IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
|
||||||
IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
|
IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
|
||||||
IFLA_BRPORT_LOCKED,
|
|
||||||
IFLA_BRPORT_MAB,
|
|
||||||
IFLA_BRPORT_MCAST_N_GROUPS,
|
|
||||||
IFLA_BRPORT_MCAST_MAX_GROUPS,
|
|
||||||
IFLA_BRPORT_NEIGH_VLAN_SUPPRESS,
|
|
||||||
IFLA_BRPORT_BACKUP_NHID,
|
|
||||||
__IFLA_BRPORT_MAX
|
__IFLA_BRPORT_MAX
|
||||||
};
|
};
|
||||||
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
|
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
|
||||||
@@ -1272,96 +716,7 @@ enum ipvlan_mode {
|
|||||||
#define IPVLAN_F_PRIVATE 0x01
|
#define IPVLAN_F_PRIVATE 0x01
|
||||||
#define IPVLAN_F_VEPA 0x02
|
#define IPVLAN_F_VEPA 0x02
|
||||||
|
|
||||||
/* Tunnel RTM header */
|
|
||||||
struct tunnel_msg {
|
|
||||||
__u8 family;
|
|
||||||
__u8 flags;
|
|
||||||
__u16 reserved2;
|
|
||||||
__u32 ifindex;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* netkit section */
|
|
||||||
enum netkit_action {
|
|
||||||
NETKIT_NEXT = -1,
|
|
||||||
NETKIT_PASS = 0,
|
|
||||||
NETKIT_DROP = 2,
|
|
||||||
NETKIT_REDIRECT = 7,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum netkit_mode {
|
|
||||||
NETKIT_L2,
|
|
||||||
NETKIT_L3,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* NETKIT_SCRUB_NONE leaves clearing skb->{mark,priority} up to
|
|
||||||
* the BPF program if attached. This also means the latter can
|
|
||||||
* consume the two fields if they were populated earlier.
|
|
||||||
*
|
|
||||||
* NETKIT_SCRUB_DEFAULT zeroes skb->{mark,priority} fields before
|
|
||||||
* invoking the attached BPF program when the peer device resides
|
|
||||||
* in a different network namespace. This is the default behavior.
|
|
||||||
*/
|
|
||||||
enum netkit_scrub {
|
|
||||||
NETKIT_SCRUB_NONE,
|
|
||||||
NETKIT_SCRUB_DEFAULT,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
IFLA_NETKIT_UNSPEC,
|
|
||||||
IFLA_NETKIT_PEER_INFO,
|
|
||||||
IFLA_NETKIT_PRIMARY,
|
|
||||||
IFLA_NETKIT_POLICY,
|
|
||||||
IFLA_NETKIT_PEER_POLICY,
|
|
||||||
IFLA_NETKIT_MODE,
|
|
||||||
IFLA_NETKIT_SCRUB,
|
|
||||||
IFLA_NETKIT_PEER_SCRUB,
|
|
||||||
IFLA_NETKIT_HEADROOM,
|
|
||||||
IFLA_NETKIT_TAILROOM,
|
|
||||||
__IFLA_NETKIT_MAX,
|
|
||||||
};
|
|
||||||
#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1)
|
|
||||||
|
|
||||||
/* VXLAN section */
|
/* VXLAN section */
|
||||||
|
|
||||||
/* include statistics in the dump */
|
|
||||||
#define TUNNEL_MSG_FLAG_STATS 0x01
|
|
||||||
|
|
||||||
#define TUNNEL_MSG_VALID_USER_FLAGS TUNNEL_MSG_FLAG_STATS
|
|
||||||
|
|
||||||
/* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */
|
|
||||||
enum {
|
|
||||||
VNIFILTER_ENTRY_STATS_UNSPEC,
|
|
||||||
VNIFILTER_ENTRY_STATS_RX_BYTES,
|
|
||||||
VNIFILTER_ENTRY_STATS_RX_PKTS,
|
|
||||||
VNIFILTER_ENTRY_STATS_RX_DROPS,
|
|
||||||
VNIFILTER_ENTRY_STATS_RX_ERRORS,
|
|
||||||
VNIFILTER_ENTRY_STATS_TX_BYTES,
|
|
||||||
VNIFILTER_ENTRY_STATS_TX_PKTS,
|
|
||||||
VNIFILTER_ENTRY_STATS_TX_DROPS,
|
|
||||||
VNIFILTER_ENTRY_STATS_TX_ERRORS,
|
|
||||||
VNIFILTER_ENTRY_STATS_PAD,
|
|
||||||
__VNIFILTER_ENTRY_STATS_MAX
|
|
||||||
};
|
|
||||||
#define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1)
|
|
||||||
|
|
||||||
enum {
|
|
||||||
VXLAN_VNIFILTER_ENTRY_UNSPEC,
|
|
||||||
VXLAN_VNIFILTER_ENTRY_START,
|
|
||||||
VXLAN_VNIFILTER_ENTRY_END,
|
|
||||||
VXLAN_VNIFILTER_ENTRY_GROUP,
|
|
||||||
VXLAN_VNIFILTER_ENTRY_GROUP6,
|
|
||||||
VXLAN_VNIFILTER_ENTRY_STATS,
|
|
||||||
__VXLAN_VNIFILTER_ENTRY_MAX
|
|
||||||
};
|
|
||||||
#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1)
|
|
||||||
|
|
||||||
enum {
|
|
||||||
VXLAN_VNIFILTER_UNSPEC,
|
|
||||||
VXLAN_VNIFILTER_ENTRY,
|
|
||||||
__VXLAN_VNIFILTER_MAX
|
|
||||||
};
|
|
||||||
#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1)
|
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
IFLA_VXLAN_UNSPEC,
|
IFLA_VXLAN_UNSPEC,
|
||||||
IFLA_VXLAN_ID,
|
IFLA_VXLAN_ID,
|
||||||
@@ -1393,9 +748,6 @@ enum {
|
|||||||
IFLA_VXLAN_GPE,
|
IFLA_VXLAN_GPE,
|
||||||
IFLA_VXLAN_TTL_INHERIT,
|
IFLA_VXLAN_TTL_INHERIT,
|
||||||
IFLA_VXLAN_DF,
|
IFLA_VXLAN_DF,
|
||||||
IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */
|
|
||||||
IFLA_VXLAN_LOCALBYPASS,
|
|
||||||
IFLA_VXLAN_LABEL_POLICY, /* IPv6 flow label policy; ifla_vxlan_label_policy */
|
|
||||||
__IFLA_VXLAN_MAX
|
__IFLA_VXLAN_MAX
|
||||||
};
|
};
|
||||||
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
|
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
|
||||||
@@ -1413,13 +765,6 @@ enum ifla_vxlan_df {
|
|||||||
VXLAN_DF_MAX = __VXLAN_DF_END - 1,
|
VXLAN_DF_MAX = __VXLAN_DF_END - 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum ifla_vxlan_label_policy {
|
|
||||||
VXLAN_LABEL_FIXED = 0,
|
|
||||||
VXLAN_LABEL_INHERIT = 1,
|
|
||||||
__VXLAN_LABEL_END,
|
|
||||||
VXLAN_LABEL_MAX = __VXLAN_LABEL_END - 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* GENEVE section */
|
/* GENEVE section */
|
||||||
enum {
|
enum {
|
||||||
IFLA_GENEVE_UNSPEC,
|
IFLA_GENEVE_UNSPEC,
|
||||||
@@ -1436,7 +781,6 @@ enum {
|
|||||||
IFLA_GENEVE_LABEL,
|
IFLA_GENEVE_LABEL,
|
||||||
IFLA_GENEVE_TTL_INHERIT,
|
IFLA_GENEVE_TTL_INHERIT,
|
||||||
IFLA_GENEVE_DF,
|
IFLA_GENEVE_DF,
|
||||||
IFLA_GENEVE_INNER_PROTO_INHERIT,
|
|
||||||
__IFLA_GENEVE_MAX
|
__IFLA_GENEVE_MAX
|
||||||
};
|
};
|
||||||
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
|
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
|
||||||
@@ -1482,10 +826,6 @@ enum {
|
|||||||
IFLA_GTP_FD1,
|
IFLA_GTP_FD1,
|
||||||
IFLA_GTP_PDP_HASHSIZE,
|
IFLA_GTP_PDP_HASHSIZE,
|
||||||
IFLA_GTP_ROLE,
|
IFLA_GTP_ROLE,
|
||||||
IFLA_GTP_CREATE_SOCKETS,
|
|
||||||
IFLA_GTP_RESTART_COUNT,
|
|
||||||
IFLA_GTP_LOCAL,
|
|
||||||
IFLA_GTP_LOCAL6,
|
|
||||||
__IFLA_GTP_MAX,
|
__IFLA_GTP_MAX,
|
||||||
};
|
};
|
||||||
#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
|
#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
|
||||||
@@ -1525,7 +865,6 @@ enum {
|
|||||||
IFLA_BOND_AD_LACP_ACTIVE,
|
IFLA_BOND_AD_LACP_ACTIVE,
|
||||||
IFLA_BOND_MISSED_MAX,
|
IFLA_BOND_MISSED_MAX,
|
||||||
IFLA_BOND_NS_IP6_TARGET,
|
IFLA_BOND_NS_IP6_TARGET,
|
||||||
IFLA_BOND_COUPLED_CONTROL,
|
|
||||||
__IFLA_BOND_MAX,
|
__IFLA_BOND_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1791,7 +1130,6 @@ enum {
|
|||||||
IFLA_HSR_PROTOCOL, /* Indicate different protocol than
|
IFLA_HSR_PROTOCOL, /* Indicate different protocol than
|
||||||
* HSR. For example PRP.
|
* HSR. For example PRP.
|
||||||
*/
|
*/
|
||||||
IFLA_HSR_INTERLINK, /* HSR interlink network device */
|
|
||||||
__IFLA_HSR_MAX,
|
__IFLA_HSR_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1824,17 +1162,6 @@ enum {
|
|||||||
|
|
||||||
#define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1))
|
#define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1))
|
||||||
|
|
||||||
enum {
|
|
||||||
IFLA_STATS_GETSET_UNSPEC,
|
|
||||||
IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with
|
|
||||||
* a filter mask for the corresponding group.
|
|
||||||
*/
|
|
||||||
IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */
|
|
||||||
__IFLA_STATS_GETSET_MAX,
|
|
||||||
};
|
|
||||||
|
|
||||||
#define IFLA_STATS_GETSET_MAX (__IFLA_STATS_GETSET_MAX - 1)
|
|
||||||
|
|
||||||
/* These are embedded into IFLA_STATS_LINK_XSTATS:
|
/* These are embedded into IFLA_STATS_LINK_XSTATS:
|
||||||
* [IFLA_STATS_LINK_XSTATS]
|
* [IFLA_STATS_LINK_XSTATS]
|
||||||
* -> [LINK_XSTATS_TYPE_xxx]
|
* -> [LINK_XSTATS_TYPE_xxx]
|
||||||
@@ -1852,21 +1179,10 @@ enum {
|
|||||||
enum {
|
enum {
|
||||||
IFLA_OFFLOAD_XSTATS_UNSPEC,
|
IFLA_OFFLOAD_XSTATS_UNSPEC,
|
||||||
IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */
|
IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */
|
||||||
IFLA_OFFLOAD_XSTATS_HW_S_INFO, /* HW stats info. A nest */
|
|
||||||
IFLA_OFFLOAD_XSTATS_L3_STATS, /* struct rtnl_hw_stats64 */
|
|
||||||
__IFLA_OFFLOAD_XSTATS_MAX
|
__IFLA_OFFLOAD_XSTATS_MAX
|
||||||
};
|
};
|
||||||
#define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1)
|
#define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1)
|
||||||
|
|
||||||
enum {
|
|
||||||
IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC,
|
|
||||||
IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, /* u8 */
|
|
||||||
IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, /* u8 */
|
|
||||||
__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX,
|
|
||||||
};
|
|
||||||
#define IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX \
|
|
||||||
(__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX - 1)
|
|
||||||
|
|
||||||
/* XDP section */
|
/* XDP section */
|
||||||
|
|
||||||
#define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0)
|
#define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0)
|
||||||
@@ -1965,16 +1281,4 @@ enum {
|
|||||||
|
|
||||||
#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1)
|
#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1)
|
||||||
|
|
||||||
/* DSA section */
|
|
||||||
|
|
||||||
enum {
|
|
||||||
IFLA_DSA_UNSPEC,
|
|
||||||
IFLA_DSA_CONDUIT,
|
|
||||||
/* Deprecated, use IFLA_DSA_CONDUIT instead */
|
|
||||||
IFLA_DSA_MASTER = IFLA_DSA_CONDUIT,
|
|
||||||
__IFLA_DSA_MAX,
|
|
||||||
};
|
|
||||||
|
|
||||||
#define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1)
|
|
||||||
|
|
||||||
#endif /* _UAPI_LINUX_IF_LINK_H */
|
#endif /* _UAPI_LINUX_IF_LINK_H */
|
||||||
|
|||||||
@@ -25,25 +25,9 @@
|
|||||||
* application.
|
* application.
|
||||||
*/
|
*/
|
||||||
#define XDP_USE_NEED_WAKEUP (1 << 3)
|
#define XDP_USE_NEED_WAKEUP (1 << 3)
|
||||||
/* By setting this option, userspace application indicates that it can
|
|
||||||
* handle multiple descriptors per packet thus enabling AF_XDP to split
|
|
||||||
* multi-buffer XDP frames into multiple Rx descriptors. Without this set
|
|
||||||
* such frames will be dropped.
|
|
||||||
*/
|
|
||||||
#define XDP_USE_SG (1 << 4)
|
|
||||||
|
|
||||||
/* Flags for xsk_umem_config flags */
|
/* Flags for xsk_umem_config flags */
|
||||||
#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
|
#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
|
||||||
|
|
||||||
/* Force checksum calculation in software. Can be used for testing or
|
|
||||||
* working around potential HW issues. This option causes performance
|
|
||||||
* degradation and only works in XDP_COPY mode.
|
|
||||||
*/
|
|
||||||
#define XDP_UMEM_TX_SW_CSUM (1 << 1)
|
|
||||||
|
|
||||||
/* Request to reserve tx_metadata_len bytes of per-chunk metadata.
|
|
||||||
*/
|
|
||||||
#define XDP_UMEM_TX_METADATA_LEN (1 << 2)
|
|
||||||
|
|
||||||
struct sockaddr_xdp {
|
struct sockaddr_xdp {
|
||||||
__u16 sxdp_family;
|
__u16 sxdp_family;
|
||||||
@@ -86,7 +70,6 @@ struct xdp_umem_reg {
|
|||||||
__u32 chunk_size;
|
__u32 chunk_size;
|
||||||
__u32 headroom;
|
__u32 headroom;
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
__u32 tx_metadata_len;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct xdp_statistics {
|
struct xdp_statistics {
|
||||||
@@ -116,51 +99,6 @@ struct xdp_options {
|
|||||||
#define XSK_UNALIGNED_BUF_ADDR_MASK \
|
#define XSK_UNALIGNED_BUF_ADDR_MASK \
|
||||||
((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
|
((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
|
||||||
|
|
||||||
/* Request transmit timestamp. Upon completion, put it into tx_timestamp
|
|
||||||
* field of struct xsk_tx_metadata.
|
|
||||||
*/
|
|
||||||
#define XDP_TXMD_FLAGS_TIMESTAMP (1 << 0)
|
|
||||||
|
|
||||||
/* Request transmit checksum offload. Checksum start position and offset
|
|
||||||
* are communicated via csum_start and csum_offset fields of struct
|
|
||||||
* xsk_tx_metadata.
|
|
||||||
*/
|
|
||||||
#define XDP_TXMD_FLAGS_CHECKSUM (1 << 1)
|
|
||||||
|
|
||||||
/* Request launch time hardware offload. The device will schedule the packet for
|
|
||||||
* transmission at a pre-determined time called launch time. The value of
|
|
||||||
* launch time is communicated via launch_time field of struct xsk_tx_metadata.
|
|
||||||
*/
|
|
||||||
#define XDP_TXMD_FLAGS_LAUNCH_TIME (1 << 2)
|
|
||||||
|
|
||||||
/* AF_XDP offloads request. 'request' union member is consumed by the driver
|
|
||||||
* when the packet is being transmitted. 'completion' union member is
|
|
||||||
* filled by the driver when the transmit completion arrives.
|
|
||||||
*/
|
|
||||||
struct xsk_tx_metadata {
|
|
||||||
__u64 flags;
|
|
||||||
|
|
||||||
union {
|
|
||||||
struct {
|
|
||||||
/* XDP_TXMD_FLAGS_CHECKSUM */
|
|
||||||
|
|
||||||
/* Offset from desc->addr where checksumming should start. */
|
|
||||||
__u16 csum_start;
|
|
||||||
/* Offset from csum_start where checksum should be stored. */
|
|
||||||
__u16 csum_offset;
|
|
||||||
|
|
||||||
/* XDP_TXMD_FLAGS_LAUNCH_TIME */
|
|
||||||
/* Launch time in nanosecond against the PTP HW Clock */
|
|
||||||
__u64 launch_time;
|
|
||||||
} request;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
/* XDP_TXMD_FLAGS_TIMESTAMP */
|
|
||||||
__u64 tx_timestamp;
|
|
||||||
} completion;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Rx/Tx descriptor */
|
/* Rx/Tx descriptor */
|
||||||
struct xdp_desc {
|
struct xdp_desc {
|
||||||
__u64 addr;
|
__u64 addr;
|
||||||
@@ -170,14 +108,4 @@ struct xdp_desc {
|
|||||||
|
|
||||||
/* UMEM descriptor is __u64 */
|
/* UMEM descriptor is __u64 */
|
||||||
|
|
||||||
/* Flag indicating that the packet continues with the buffer pointed out by the
|
|
||||||
* next frame in the ring. The end of the packet is signalled by setting this
|
|
||||||
* bit to zero. For single buffer packets, every descriptor has 'options' set
|
|
||||||
* to 0 and this maintains backward compatibility.
|
|
||||||
*/
|
|
||||||
#define XDP_PKT_CONTD (1 << 0)
|
|
||||||
|
|
||||||
/* TX packet carries valid metadata. */
|
|
||||||
#define XDP_TX_METADATA (1 << 1)
|
|
||||||
|
|
||||||
#endif /* _LINUX_IF_XDP_H */
|
#endif /* _LINUX_IF_XDP_H */
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* enum netdev_xdp_act
|
* enum netdev_xdp_act
|
||||||
* @NETDEV_XDP_ACT_BASIC: XDP features set supported by all drivers
|
* @NETDEV_XDP_ACT_BASIC: XDP feautues set supported by all drivers
|
||||||
* (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
|
* (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
|
||||||
* @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT
|
* @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT
|
||||||
* @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements
|
* @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements
|
||||||
@@ -34,197 +34,28 @@ enum netdev_xdp_act {
|
|||||||
NETDEV_XDP_ACT_RX_SG = 32,
|
NETDEV_XDP_ACT_RX_SG = 32,
|
||||||
NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
|
NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
|
||||||
|
|
||||||
/* private: */
|
|
||||||
NETDEV_XDP_ACT_MASK = 127,
|
NETDEV_XDP_ACT_MASK = 127,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* enum netdev_xdp_rx_metadata
|
|
||||||
* @NETDEV_XDP_RX_METADATA_TIMESTAMP: Device is capable of exposing receive HW
|
|
||||||
* timestamp via bpf_xdp_metadata_rx_timestamp().
|
|
||||||
* @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet
|
|
||||||
* hash via bpf_xdp_metadata_rx_hash().
|
|
||||||
* @NETDEV_XDP_RX_METADATA_VLAN_TAG: Device is capable of exposing receive
|
|
||||||
* packet VLAN tag via bpf_xdp_metadata_rx_vlan_tag().
|
|
||||||
*/
|
|
||||||
enum netdev_xdp_rx_metadata {
|
|
||||||
NETDEV_XDP_RX_METADATA_TIMESTAMP = 1,
|
|
||||||
NETDEV_XDP_RX_METADATA_HASH = 2,
|
|
||||||
NETDEV_XDP_RX_METADATA_VLAN_TAG = 4,
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* enum netdev_xsk_flags
|
|
||||||
* @NETDEV_XSK_FLAGS_TX_TIMESTAMP: HW timestamping egress packets is supported
|
|
||||||
* by the driver.
|
|
||||||
* @NETDEV_XSK_FLAGS_TX_CHECKSUM: L3 checksum HW offload is supported by the
|
|
||||||
* driver.
|
|
||||||
* @NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO: Launch time HW offload is supported
|
|
||||||
* by the driver.
|
|
||||||
*/
|
|
||||||
enum netdev_xsk_flags {
|
|
||||||
NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1,
|
|
||||||
NETDEV_XSK_FLAGS_TX_CHECKSUM = 2,
|
|
||||||
NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO = 4,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum netdev_queue_type {
|
|
||||||
NETDEV_QUEUE_TYPE_RX,
|
|
||||||
NETDEV_QUEUE_TYPE_TX,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum netdev_qstats_scope {
|
|
||||||
NETDEV_QSTATS_SCOPE_QUEUE = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
NETDEV_A_DEV_IFINDEX = 1,
|
NETDEV_A_DEV_IFINDEX = 1,
|
||||||
NETDEV_A_DEV_PAD,
|
NETDEV_A_DEV_PAD,
|
||||||
NETDEV_A_DEV_XDP_FEATURES,
|
NETDEV_A_DEV_XDP_FEATURES,
|
||||||
NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
|
|
||||||
NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
|
|
||||||
NETDEV_A_DEV_XSK_FEATURES,
|
|
||||||
|
|
||||||
__NETDEV_A_DEV_MAX,
|
__NETDEV_A_DEV_MAX,
|
||||||
NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
|
NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
|
||||||
__NETDEV_A_IO_URING_PROVIDER_INFO_MAX,
|
|
||||||
NETDEV_A_IO_URING_PROVIDER_INFO_MAX = (__NETDEV_A_IO_URING_PROVIDER_INFO_MAX - 1)
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
NETDEV_A_PAGE_POOL_ID = 1,
|
|
||||||
NETDEV_A_PAGE_POOL_IFINDEX,
|
|
||||||
NETDEV_A_PAGE_POOL_NAPI_ID,
|
|
||||||
NETDEV_A_PAGE_POOL_INFLIGHT,
|
|
||||||
NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
|
|
||||||
NETDEV_A_PAGE_POOL_DETACH_TIME,
|
|
||||||
NETDEV_A_PAGE_POOL_DMABUF,
|
|
||||||
NETDEV_A_PAGE_POOL_IO_URING,
|
|
||||||
|
|
||||||
__NETDEV_A_PAGE_POOL_MAX,
|
|
||||||
NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_INFO = 1,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
|
|
||||||
|
|
||||||
__NETDEV_A_PAGE_POOL_STATS_MAX,
|
|
||||||
NETDEV_A_PAGE_POOL_STATS_MAX = (__NETDEV_A_PAGE_POOL_STATS_MAX - 1)
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
NETDEV_A_NAPI_IFINDEX = 1,
|
|
||||||
NETDEV_A_NAPI_ID,
|
|
||||||
NETDEV_A_NAPI_IRQ,
|
|
||||||
NETDEV_A_NAPI_PID,
|
|
||||||
NETDEV_A_NAPI_DEFER_HARD_IRQS,
|
|
||||||
NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT,
|
|
||||||
NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
|
|
||||||
|
|
||||||
__NETDEV_A_NAPI_MAX,
|
|
||||||
NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1)
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
__NETDEV_A_XSK_INFO_MAX,
|
|
||||||
NETDEV_A_XSK_INFO_MAX = (__NETDEV_A_XSK_INFO_MAX - 1)
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
NETDEV_A_QUEUE_ID = 1,
|
|
||||||
NETDEV_A_QUEUE_IFINDEX,
|
|
||||||
NETDEV_A_QUEUE_TYPE,
|
|
||||||
NETDEV_A_QUEUE_NAPI_ID,
|
|
||||||
NETDEV_A_QUEUE_DMABUF,
|
|
||||||
NETDEV_A_QUEUE_IO_URING,
|
|
||||||
NETDEV_A_QUEUE_XSK,
|
|
||||||
|
|
||||||
__NETDEV_A_QUEUE_MAX,
|
|
||||||
NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
NETDEV_A_QSTATS_IFINDEX = 1,
|
|
||||||
NETDEV_A_QSTATS_QUEUE_TYPE,
|
|
||||||
NETDEV_A_QSTATS_QUEUE_ID,
|
|
||||||
NETDEV_A_QSTATS_SCOPE,
|
|
||||||
NETDEV_A_QSTATS_RX_PACKETS = 8,
|
|
||||||
NETDEV_A_QSTATS_RX_BYTES,
|
|
||||||
NETDEV_A_QSTATS_TX_PACKETS,
|
|
||||||
NETDEV_A_QSTATS_TX_BYTES,
|
|
||||||
NETDEV_A_QSTATS_RX_ALLOC_FAIL,
|
|
||||||
NETDEV_A_QSTATS_RX_HW_DROPS,
|
|
||||||
NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS,
|
|
||||||
NETDEV_A_QSTATS_RX_CSUM_COMPLETE,
|
|
||||||
NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY,
|
|
||||||
NETDEV_A_QSTATS_RX_CSUM_NONE,
|
|
||||||
NETDEV_A_QSTATS_RX_CSUM_BAD,
|
|
||||||
NETDEV_A_QSTATS_RX_HW_GRO_PACKETS,
|
|
||||||
NETDEV_A_QSTATS_RX_HW_GRO_BYTES,
|
|
||||||
NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS,
|
|
||||||
NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES,
|
|
||||||
NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS,
|
|
||||||
NETDEV_A_QSTATS_TX_HW_DROPS,
|
|
||||||
NETDEV_A_QSTATS_TX_HW_DROP_ERRORS,
|
|
||||||
NETDEV_A_QSTATS_TX_CSUM_NONE,
|
|
||||||
NETDEV_A_QSTATS_TX_NEEDS_CSUM,
|
|
||||||
NETDEV_A_QSTATS_TX_HW_GSO_PACKETS,
|
|
||||||
NETDEV_A_QSTATS_TX_HW_GSO_BYTES,
|
|
||||||
NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS,
|
|
||||||
NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES,
|
|
||||||
NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS,
|
|
||||||
NETDEV_A_QSTATS_TX_STOP,
|
|
||||||
NETDEV_A_QSTATS_TX_WAKE,
|
|
||||||
|
|
||||||
__NETDEV_A_QSTATS_MAX,
|
|
||||||
NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
NETDEV_A_DMABUF_IFINDEX = 1,
|
|
||||||
NETDEV_A_DMABUF_QUEUES,
|
|
||||||
NETDEV_A_DMABUF_FD,
|
|
||||||
NETDEV_A_DMABUF_ID,
|
|
||||||
|
|
||||||
__NETDEV_A_DMABUF_MAX,
|
|
||||||
NETDEV_A_DMABUF_MAX = (__NETDEV_A_DMABUF_MAX - 1)
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
NETDEV_CMD_DEV_GET = 1,
|
NETDEV_CMD_DEV_GET = 1,
|
||||||
NETDEV_CMD_DEV_ADD_NTF,
|
NETDEV_CMD_DEV_ADD_NTF,
|
||||||
NETDEV_CMD_DEV_DEL_NTF,
|
NETDEV_CMD_DEV_DEL_NTF,
|
||||||
NETDEV_CMD_DEV_CHANGE_NTF,
|
NETDEV_CMD_DEV_CHANGE_NTF,
|
||||||
NETDEV_CMD_PAGE_POOL_GET,
|
|
||||||
NETDEV_CMD_PAGE_POOL_ADD_NTF,
|
|
||||||
NETDEV_CMD_PAGE_POOL_DEL_NTF,
|
|
||||||
NETDEV_CMD_PAGE_POOL_CHANGE_NTF,
|
|
||||||
NETDEV_CMD_PAGE_POOL_STATS_GET,
|
|
||||||
NETDEV_CMD_QUEUE_GET,
|
|
||||||
NETDEV_CMD_NAPI_GET,
|
|
||||||
NETDEV_CMD_QSTATS_GET,
|
|
||||||
NETDEV_CMD_BIND_RX,
|
|
||||||
NETDEV_CMD_NAPI_SET,
|
|
||||||
|
|
||||||
__NETDEV_CMD_MAX,
|
__NETDEV_CMD_MAX,
|
||||||
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
|
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
|
||||||
};
|
};
|
||||||
|
|
||||||
#define NETDEV_MCGRP_MGMT "mgmt"
|
#define NETDEV_MCGRP_MGMT "mgmt"
|
||||||
#define NETDEV_MCGRP_PAGE_POOL "page-pool"
|
|
||||||
|
|
||||||
#endif /* _UAPI_LINUX_NETDEV_H */
|
#endif /* _UAPI_LINUX_NETDEV_H */
|
||||||
|
|||||||
43
include/uapi/linux/openat2.h
Normal file
43
include/uapi/linux/openat2.h
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||||
|
#ifndef _UAPI_LINUX_OPENAT2_H
|
||||||
|
#define _UAPI_LINUX_OPENAT2_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Arguments for how openat2(2) should open the target path. If only @flags and
|
||||||
|
* @mode are non-zero, then openat2(2) operates very similarly to openat(2).
|
||||||
|
*
|
||||||
|
* However, unlike openat(2), unknown or invalid bits in @flags result in
|
||||||
|
* -EINVAL rather than being silently ignored. @mode must be zero unless one of
|
||||||
|
* {O_CREAT, O_TMPFILE} are set.
|
||||||
|
*
|
||||||
|
* @flags: O_* flags.
|
||||||
|
* @mode: O_CREAT/O_TMPFILE file mode.
|
||||||
|
* @resolve: RESOLVE_* flags.
|
||||||
|
*/
|
||||||
|
struct open_how {
|
||||||
|
__u64 flags;
|
||||||
|
__u64 mode;
|
||||||
|
__u64 resolve;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* how->resolve flags for openat2(2). */
|
||||||
|
#define RESOLVE_NO_XDEV 0x01 /* Block mount-point crossings
|
||||||
|
(includes bind-mounts). */
|
||||||
|
#define RESOLVE_NO_MAGICLINKS 0x02 /* Block traversal through procfs-style
|
||||||
|
"magic-links". */
|
||||||
|
#define RESOLVE_NO_SYMLINKS 0x04 /* Block traversal through all symlinks
|
||||||
|
(implies OEXT_NO_MAGICLINKS) */
|
||||||
|
#define RESOLVE_BENEATH 0x08 /* Block "lexical" trickery like
|
||||||
|
"..", symlinks, and absolute
|
||||||
|
paths which escape the dirfd. */
|
||||||
|
#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".."
|
||||||
|
be scoped inside the dirfd
|
||||||
|
(similar to chroot(2)). */
|
||||||
|
#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be
|
||||||
|
completed through cached lookup. May
|
||||||
|
return -EAGAIN if that's not
|
||||||
|
possible. */
|
||||||
|
|
||||||
|
#endif /* _UAPI_LINUX_OPENAT2_H */
|
||||||
@@ -204,8 +204,6 @@ enum perf_branch_sample_type_shift {
|
|||||||
|
|
||||||
PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
|
PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
|
||||||
|
|
||||||
PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */
|
|
||||||
|
|
||||||
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
|
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -237,8 +235,6 @@ enum perf_branch_sample_type {
|
|||||||
|
|
||||||
PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
|
PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
|
||||||
|
|
||||||
PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
|
|
||||||
|
|
||||||
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
|
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -385,8 +381,6 @@ enum perf_event_read_format {
|
|||||||
*
|
*
|
||||||
* @sample_max_stack: Max number of frame pointers in a callchain,
|
* @sample_max_stack: Max number of frame pointers in a callchain,
|
||||||
* should be < /proc/sys/kernel/perf_event_max_stack
|
* should be < /proc/sys/kernel/perf_event_max_stack
|
||||||
* Max number of entries of branch stack
|
|
||||||
* should be < hardware limit
|
|
||||||
*/
|
*/
|
||||||
struct perf_event_attr {
|
struct perf_event_attr {
|
||||||
|
|
||||||
@@ -513,16 +507,7 @@ struct perf_event_attr {
|
|||||||
__u16 sample_max_stack;
|
__u16 sample_max_stack;
|
||||||
__u16 __reserved_2;
|
__u16 __reserved_2;
|
||||||
__u32 aux_sample_size;
|
__u32 aux_sample_size;
|
||||||
|
__u32 __reserved_3;
|
||||||
union {
|
|
||||||
__u32 aux_action;
|
|
||||||
struct {
|
|
||||||
__u32 aux_start_paused : 1, /* start AUX area tracing paused */
|
|
||||||
aux_pause : 1, /* on overflow, pause AUX area tracing */
|
|
||||||
aux_resume : 1, /* on overflow, resume AUX area tracing */
|
|
||||||
__reserved_3 : 29;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* User provided data if sigtrap=1, passed back to user via
|
* User provided data if sigtrap=1, passed back to user via
|
||||||
@@ -997,12 +982,6 @@ enum perf_event_type {
|
|||||||
* { u64 nr;
|
* { u64 nr;
|
||||||
* { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
|
* { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
|
||||||
* { u64 from, to, flags } lbr[nr];
|
* { u64 from, to, flags } lbr[nr];
|
||||||
* #
|
|
||||||
* # The format of the counters is decided by the
|
|
||||||
* # "branch_counter_nr" and "branch_counter_width",
|
|
||||||
* # which are defined in the ABI.
|
|
||||||
* #
|
|
||||||
* { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS
|
|
||||||
* } && PERF_SAMPLE_BRANCH_STACK
|
* } && PERF_SAMPLE_BRANCH_STACK
|
||||||
*
|
*
|
||||||
* { u64 abi; # enum perf_sample_regs_abi
|
* { u64 abi; # enum perf_sample_regs_abi
|
||||||
@@ -1360,14 +1339,11 @@ union perf_mem_data_src {
|
|||||||
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
|
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
|
||||||
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
|
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
|
||||||
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
|
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
|
||||||
#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */
|
/* 5-0x8 available */
|
||||||
#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */
|
|
||||||
/* 0x7 available */
|
|
||||||
#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
|
|
||||||
#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
|
#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
|
||||||
#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
|
#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
|
||||||
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
|
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
|
||||||
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */
|
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
|
||||||
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
|
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
|
||||||
#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
|
#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
|
||||||
#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
|
#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
|
||||||
@@ -1450,9 +1426,6 @@ struct perf_branch_entry {
|
|||||||
reserved:31;
|
reserved:31;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Size of used info bits in struct perf_branch_entry */
|
|
||||||
#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33
|
|
||||||
|
|
||||||
union perf_sample_weight {
|
union perf_sample_weight {
|
||||||
__u64 full;
|
__u64 full;
|
||||||
#if defined(__LITTLE_ENDIAN_BITFIELD)
|
#if defined(__LITTLE_ENDIAN_BITFIELD)
|
||||||
|
|||||||
@@ -204,6 +204,37 @@ struct tc_u32_pcnt {
|
|||||||
|
|
||||||
#define TC_U32_MAXDEPTH 8
|
#define TC_U32_MAXDEPTH 8
|
||||||
|
|
||||||
|
|
||||||
|
/* RSVP filter */
|
||||||
|
|
||||||
|
enum {
|
||||||
|
TCA_RSVP_UNSPEC,
|
||||||
|
TCA_RSVP_CLASSID,
|
||||||
|
TCA_RSVP_DST,
|
||||||
|
TCA_RSVP_SRC,
|
||||||
|
TCA_RSVP_PINFO,
|
||||||
|
TCA_RSVP_POLICE,
|
||||||
|
TCA_RSVP_ACT,
|
||||||
|
__TCA_RSVP_MAX
|
||||||
|
};
|
||||||
|
|
||||||
|
#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 )
|
||||||
|
|
||||||
|
struct tc_rsvp_gpi {
|
||||||
|
__u32 key;
|
||||||
|
__u32 mask;
|
||||||
|
int offset;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct tc_rsvp_pinfo {
|
||||||
|
struct tc_rsvp_gpi dpi;
|
||||||
|
struct tc_rsvp_gpi spi;
|
||||||
|
__u8 protocol;
|
||||||
|
__u8 tunnelid;
|
||||||
|
__u8 tunnelhdr;
|
||||||
|
__u8 pad;
|
||||||
|
};
|
||||||
|
|
||||||
/* ROUTE filter */
|
/* ROUTE filter */
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@@ -234,6 +265,22 @@ enum {
|
|||||||
|
|
||||||
#define TCA_FW_MAX (__TCA_FW_MAX - 1)
|
#define TCA_FW_MAX (__TCA_FW_MAX - 1)
|
||||||
|
|
||||||
|
/* TC index filter */
|
||||||
|
|
||||||
|
enum {
|
||||||
|
TCA_TCINDEX_UNSPEC,
|
||||||
|
TCA_TCINDEX_HASH,
|
||||||
|
TCA_TCINDEX_MASK,
|
||||||
|
TCA_TCINDEX_SHIFT,
|
||||||
|
TCA_TCINDEX_FALL_THROUGH,
|
||||||
|
TCA_TCINDEX_CLASSID,
|
||||||
|
TCA_TCINDEX_POLICE,
|
||||||
|
TCA_TCINDEX_ACT,
|
||||||
|
__TCA_TCINDEX_MAX
|
||||||
|
};
|
||||||
|
|
||||||
|
#define TCA_TCINDEX_MAX (__TCA_TCINDEX_MAX - 1)
|
||||||
|
|
||||||
/* Flow filter */
|
/* Flow filter */
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
|||||||
@@ -457,6 +457,115 @@ enum {
|
|||||||
|
|
||||||
#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
|
#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
|
||||||
|
|
||||||
|
|
||||||
|
/* CBQ section */
|
||||||
|
|
||||||
|
#define TC_CBQ_MAXPRIO 8
|
||||||
|
#define TC_CBQ_MAXLEVEL 8
|
||||||
|
#define TC_CBQ_DEF_EWMA 5
|
||||||
|
|
||||||
|
struct tc_cbq_lssopt {
|
||||||
|
unsigned char change;
|
||||||
|
unsigned char flags;
|
||||||
|
#define TCF_CBQ_LSS_BOUNDED 1
|
||||||
|
#define TCF_CBQ_LSS_ISOLATED 2
|
||||||
|
unsigned char ewma_log;
|
||||||
|
unsigned char level;
|
||||||
|
#define TCF_CBQ_LSS_FLAGS 1
|
||||||
|
#define TCF_CBQ_LSS_EWMA 2
|
||||||
|
#define TCF_CBQ_LSS_MAXIDLE 4
|
||||||
|
#define TCF_CBQ_LSS_MINIDLE 8
|
||||||
|
#define TCF_CBQ_LSS_OFFTIME 0x10
|
||||||
|
#define TCF_CBQ_LSS_AVPKT 0x20
|
||||||
|
__u32 maxidle;
|
||||||
|
__u32 minidle;
|
||||||
|
__u32 offtime;
|
||||||
|
__u32 avpkt;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct tc_cbq_wrropt {
|
||||||
|
unsigned char flags;
|
||||||
|
unsigned char priority;
|
||||||
|
unsigned char cpriority;
|
||||||
|
unsigned char __reserved;
|
||||||
|
__u32 allot;
|
||||||
|
__u32 weight;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct tc_cbq_ovl {
|
||||||
|
unsigned char strategy;
|
||||||
|
#define TC_CBQ_OVL_CLASSIC 0
|
||||||
|
#define TC_CBQ_OVL_DELAY 1
|
||||||
|
#define TC_CBQ_OVL_LOWPRIO 2
|
||||||
|
#define TC_CBQ_OVL_DROP 3
|
||||||
|
#define TC_CBQ_OVL_RCLASSIC 4
|
||||||
|
unsigned char priority2;
|
||||||
|
__u16 pad;
|
||||||
|
__u32 penalty;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct tc_cbq_police {
|
||||||
|
unsigned char police;
|
||||||
|
unsigned char __res1;
|
||||||
|
unsigned short __res2;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct tc_cbq_fopt {
|
||||||
|
__u32 split;
|
||||||
|
__u32 defmap;
|
||||||
|
__u32 defchange;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct tc_cbq_xstats {
|
||||||
|
__u32 borrows;
|
||||||
|
__u32 overactions;
|
||||||
|
__s32 avgidle;
|
||||||
|
__s32 undertime;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
TCA_CBQ_UNSPEC,
|
||||||
|
TCA_CBQ_LSSOPT,
|
||||||
|
TCA_CBQ_WRROPT,
|
||||||
|
TCA_CBQ_FOPT,
|
||||||
|
TCA_CBQ_OVL_STRATEGY,
|
||||||
|
TCA_CBQ_RATE,
|
||||||
|
TCA_CBQ_RTAB,
|
||||||
|
TCA_CBQ_POLICE,
|
||||||
|
__TCA_CBQ_MAX,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
|
||||||
|
|
||||||
|
/* dsmark section */
|
||||||
|
|
||||||
|
enum {
|
||||||
|
TCA_DSMARK_UNSPEC,
|
||||||
|
TCA_DSMARK_INDICES,
|
||||||
|
TCA_DSMARK_DEFAULT_INDEX,
|
||||||
|
TCA_DSMARK_SET_TC_INDEX,
|
||||||
|
TCA_DSMARK_MASK,
|
||||||
|
TCA_DSMARK_VALUE,
|
||||||
|
__TCA_DSMARK_MAX,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
|
||||||
|
|
||||||
|
/* ATM section */
|
||||||
|
|
||||||
|
enum {
|
||||||
|
TCA_ATM_UNSPEC,
|
||||||
|
TCA_ATM_FD, /* file/socket descriptor */
|
||||||
|
TCA_ATM_PTR, /* pointer to descriptor - later */
|
||||||
|
TCA_ATM_HDR, /* LL header */
|
||||||
|
TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
|
||||||
|
TCA_ATM_ADDR, /* PVC address (for output only) */
|
||||||
|
TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
|
||||||
|
__TCA_ATM_MAX,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
|
||||||
|
|
||||||
/* Network emulator */
|
/* Network emulator */
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ fi
|
|||||||
# due to https://bugs.gentoo.org/794601) so let's just point the script to
|
# due to https://bugs.gentoo.org/794601) so let's just point the script to
|
||||||
# commits referring to versions of libelf that actually can be built
|
# commits referring to versions of libelf that actually can be built
|
||||||
rm -rf elfutils
|
rm -rf elfutils
|
||||||
git clone https://sourceware.org/git/elfutils.git
|
git clone git://sourceware.org/git/elfutils.git
|
||||||
(
|
(
|
||||||
cd elfutils
|
cd elfutils
|
||||||
git checkout 67a187d4c1790058fc7fd218317851cb68bb087c
|
git checkout 67a187d4c1790058fc7fd218317851cb68bb087c
|
||||||
|
|||||||
@@ -1,37 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
usage () {
|
|
||||||
echo "USAGE: ./mailmap-update.sh <libbpf-repo> <linux-repo>"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
LIBBPF_REPO="${1-""}"
|
|
||||||
LINUX_REPO="${2-""}"
|
|
||||||
|
|
||||||
if [ -z "${LIBBPF_REPO}" ] || [ -z "${LINUX_REPO}" ]; then
|
|
||||||
echo "Error: libbpf or linux repos are not specified"
|
|
||||||
usage
|
|
||||||
fi
|
|
||||||
|
|
||||||
LIBBPF_MAILMAP="${LIBBPF_REPO}/.mailmap"
|
|
||||||
LINUX_MAILMAP="${LINUX_REPO}/.mailmap"
|
|
||||||
|
|
||||||
tmpfile="$(mktemp)"
|
|
||||||
cleanup() {
|
|
||||||
rm -f "${tmpfile}"
|
|
||||||
}
|
|
||||||
trap cleanup EXIT
|
|
||||||
|
|
||||||
grep_lines() {
|
|
||||||
local pattern="$1"
|
|
||||||
local file="$2"
|
|
||||||
grep "${pattern}" "${file}" || true
|
|
||||||
}
|
|
||||||
|
|
||||||
while read -r email; do
|
|
||||||
grep_lines "${email}$" "${LINUX_MAILMAP}" >> "${tmpfile}"
|
|
||||||
done < <(git log --format='<%ae>' | sort -u)
|
|
||||||
|
|
||||||
sort -u "${tmpfile}" > "${LIBBPF_MAILMAP}"
|
|
||||||
@@ -295,22 +295,6 @@ Latest changes to BPF helper definitions.
|
|||||||
" -- src/bpf_helper_defs.h
|
" -- src/bpf_helper_defs.h
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Regenerating .mailmap..."
|
|
||||||
cd_to "${LINUX_REPO}"
|
|
||||||
git checkout "${TIP_SYM_REF}"
|
|
||||||
cd_to "${LIBBPF_REPO}"
|
|
||||||
"${LIBBPF_REPO}"/scripts/mailmap-update.sh "${LIBBPF_REPO}" "${LINUX_REPO}"
|
|
||||||
# if anything changed, commit it
|
|
||||||
mailmap_changes=$(git status --porcelain .mailmap | wc -l)
|
|
||||||
if ((${mailmap_changes} == 1)); then
|
|
||||||
git add .mailmap
|
|
||||||
git commit -s -m "sync: update .mailmap
|
|
||||||
|
|
||||||
Update .mailmap based on libbpf's list of contributors and on the latest
|
|
||||||
.mailmap version in the upstream repository.
|
|
||||||
" -- .mailmap
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Use generated cover-letter as a template for "sync commit" with
|
# Use generated cover-letter as a template for "sync commit" with
|
||||||
# baseline and checkpoint commits from kernel repo (and leave summary
|
# baseline and checkpoint commits from kernel repo (and leave summary
|
||||||
# from cover letter intact, of course)
|
# from cover letter intact, of course)
|
||||||
|
|||||||
18
src/Makefile
18
src/Makefile
@@ -9,7 +9,7 @@ else
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
LIBBPF_MAJOR_VERSION := 1
|
LIBBPF_MAJOR_VERSION := 1
|
||||||
LIBBPF_MINOR_VERSION := 6
|
LIBBPF_MINOR_VERSION := 3
|
||||||
LIBBPF_PATCH_VERSION := 0
|
LIBBPF_PATCH_VERSION := 0
|
||||||
LIBBPF_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).$(LIBBPF_PATCH_VERSION)
|
LIBBPF_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).$(LIBBPF_PATCH_VERSION)
|
||||||
LIBBPF_MAJMIN_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).0
|
LIBBPF_MAJMIN_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).0
|
||||||
@@ -26,7 +26,6 @@ endef
|
|||||||
|
|
||||||
$(call allow-override,CC,$(CROSS_COMPILE)cc)
|
$(call allow-override,CC,$(CROSS_COMPILE)cc)
|
||||||
$(call allow-override,LD,$(CROSS_COMPILE)ld)
|
$(call allow-override,LD,$(CROSS_COMPILE)ld)
|
||||||
PKG_CONFIG ?= pkg-config
|
|
||||||
|
|
||||||
TOPDIR = ..
|
TOPDIR = ..
|
||||||
|
|
||||||
@@ -36,18 +35,13 @@ ALL_CFLAGS := $(INCLUDES)
|
|||||||
SHARED_CFLAGS += -fPIC -fvisibility=hidden -DSHARED
|
SHARED_CFLAGS += -fPIC -fvisibility=hidden -DSHARED
|
||||||
|
|
||||||
CFLAGS ?= -g -O2 -Werror -Wall -std=gnu89
|
CFLAGS ?= -g -O2 -Werror -Wall -std=gnu89
|
||||||
ALL_CFLAGS += $(CFLAGS) \
|
ALL_CFLAGS += $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 $(EXTRA_CFLAGS)
|
||||||
-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 \
|
|
||||||
-Wno-unknown-warning-option -Wno-format-overflow \
|
|
||||||
$(EXTRA_CFLAGS)
|
|
||||||
ALL_LDFLAGS += $(LDFLAGS) $(EXTRA_LDFLAGS)
|
ALL_LDFLAGS += $(LDFLAGS) $(EXTRA_LDFLAGS)
|
||||||
|
|
||||||
ifeq ($(shell command -v $(PKG_CONFIG) 2> /dev/null),)
|
|
||||||
NO_PKG_CONFIG := 1
|
|
||||||
endif
|
|
||||||
ifdef NO_PKG_CONFIG
|
ifdef NO_PKG_CONFIG
|
||||||
ALL_LDFLAGS += -lelf -lz
|
ALL_LDFLAGS += -lelf -lz
|
||||||
else
|
else
|
||||||
|
PKG_CONFIG ?= pkg-config
|
||||||
ALL_CFLAGS += $(shell $(PKG_CONFIG) --cflags libelf zlib)
|
ALL_CFLAGS += $(shell $(PKG_CONFIG) --cflags libelf zlib)
|
||||||
ALL_LDFLAGS += $(shell $(PKG_CONFIG) --libs libelf zlib)
|
ALL_LDFLAGS += $(shell $(PKG_CONFIG) --libs libelf zlib)
|
||||||
endif
|
endif
|
||||||
@@ -58,7 +52,7 @@ STATIC_OBJDIR := $(OBJDIR)/staticobjs
|
|||||||
OBJS := bpf.o btf.o libbpf.o libbpf_errno.o netlink.o \
|
OBJS := bpf.o btf.o libbpf.o libbpf_errno.o netlink.o \
|
||||||
nlattr.o str_error.o libbpf_probes.o bpf_prog_linfo.o \
|
nlattr.o str_error.o libbpf_probes.o bpf_prog_linfo.o \
|
||||||
btf_dump.o hashmap.o ringbuf.o strset.o linker.o gen_loader.o \
|
btf_dump.o hashmap.o ringbuf.o strset.o linker.o gen_loader.o \
|
||||||
relo_core.o usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o
|
relo_core.o usdt.o zip.o
|
||||||
SHARED_OBJS := $(addprefix $(SHARED_OBJDIR)/,$(OBJS))
|
SHARED_OBJS := $(addprefix $(SHARED_OBJDIR)/,$(OBJS))
|
||||||
STATIC_OBJS := $(addprefix $(STATIC_OBJDIR)/,$(OBJS))
|
STATIC_OBJS := $(addprefix $(STATIC_OBJDIR)/,$(OBJS))
|
||||||
|
|
||||||
@@ -122,13 +116,13 @@ $(OBJDIR)/libbpf.so.$(LIBBPF_VERSION): $(SHARED_OBJS)
|
|||||||
-Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
|
-Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
|
||||||
$^ $(ALL_LDFLAGS) -o $@
|
$^ $(ALL_LDFLAGS) -o $@
|
||||||
|
|
||||||
$(OBJDIR)/libbpf.pc: force | $(OBJDIR)
|
$(OBJDIR)/libbpf.pc: force
|
||||||
$(Q)sed -e "s|@PREFIX@|$(PREFIX)|" \
|
$(Q)sed -e "s|@PREFIX@|$(PREFIX)|" \
|
||||||
-e "s|@LIBDIR@|$(LIBDIR_PC)|" \
|
-e "s|@LIBDIR@|$(LIBDIR_PC)|" \
|
||||||
-e "s|@VERSION@|$(LIBBPF_VERSION)|" \
|
-e "s|@VERSION@|$(LIBBPF_VERSION)|" \
|
||||||
< libbpf.pc.template > $@
|
< libbpf.pc.template > $@
|
||||||
|
|
||||||
$(OBJDIR) $(STATIC_OBJDIR) $(SHARED_OBJDIR):
|
$(STATIC_OBJDIR) $(SHARED_OBJDIR):
|
||||||
$(call msg,MKDIR,$@)
|
$(call msg,MKDIR,$@)
|
||||||
$(Q)mkdir -p $@
|
$(Q)mkdir -p $@
|
||||||
|
|
||||||
|
|||||||
220
src/bpf.c
220
src/bpf.c
@@ -103,9 +103,9 @@ int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
|
|||||||
* [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
|
* [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
|
||||||
* [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
|
* [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
|
||||||
*/
|
*/
|
||||||
int probe_memcg_account(int token_fd)
|
int probe_memcg_account(void)
|
||||||
{
|
{
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
|
const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
|
||||||
struct bpf_insn insns[] = {
|
struct bpf_insn insns[] = {
|
||||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
|
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
|
||||||
BPF_EXIT_INSN(),
|
BPF_EXIT_INSN(),
|
||||||
@@ -120,9 +120,6 @@ int probe_memcg_account(int token_fd)
|
|||||||
attr.insns = ptr_to_u64(insns);
|
attr.insns = ptr_to_u64(insns);
|
||||||
attr.insn_cnt = insn_cnt;
|
attr.insn_cnt = insn_cnt;
|
||||||
attr.license = ptr_to_u64("GPL");
|
attr.license = ptr_to_u64("GPL");
|
||||||
attr.prog_token_fd = token_fd;
|
|
||||||
if (token_fd)
|
|
||||||
attr.prog_flags |= BPF_F_TOKEN_FD;
|
|
||||||
|
|
||||||
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
|
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
|
||||||
if (prog_fd >= 0) {
|
if (prog_fd >= 0) {
|
||||||
@@ -149,7 +146,7 @@ int bump_rlimit_memlock(void)
|
|||||||
struct rlimit rlim;
|
struct rlimit rlim;
|
||||||
|
|
||||||
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
|
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
|
||||||
if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT))
|
if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
memlock_bumped = true;
|
memlock_bumped = true;
|
||||||
@@ -172,7 +169,7 @@ int bpf_map_create(enum bpf_map_type map_type,
|
|||||||
__u32 max_entries,
|
__u32 max_entries,
|
||||||
const struct bpf_map_create_opts *opts)
|
const struct bpf_map_create_opts *opts)
|
||||||
{
|
{
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd);
|
const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
|
||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
int fd;
|
int fd;
|
||||||
|
|
||||||
@@ -184,7 +181,7 @@ int bpf_map_create(enum bpf_map_type map_type,
|
|||||||
return libbpf_err(-EINVAL);
|
return libbpf_err(-EINVAL);
|
||||||
|
|
||||||
attr.map_type = map_type;
|
attr.map_type = map_type;
|
||||||
if (map_name && feat_supported(NULL, FEAT_PROG_NAME))
|
if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
|
||||||
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
|
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
|
||||||
attr.key_size = key_size;
|
attr.key_size = key_size;
|
||||||
attr.value_size = value_size;
|
attr.value_size = value_size;
|
||||||
@@ -194,7 +191,6 @@ int bpf_map_create(enum bpf_map_type map_type,
|
|||||||
attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
|
attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
|
||||||
attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
|
attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
|
||||||
attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
|
attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
|
||||||
attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0);
|
|
||||||
|
|
||||||
attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
|
attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
|
||||||
attr.map_flags = OPTS_GET(opts, map_flags, 0);
|
attr.map_flags = OPTS_GET(opts, map_flags, 0);
|
||||||
@@ -202,8 +198,6 @@ int bpf_map_create(enum bpf_map_type map_type,
|
|||||||
attr.numa_node = OPTS_GET(opts, numa_node, 0);
|
attr.numa_node = OPTS_GET(opts, numa_node, 0);
|
||||||
attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
|
attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
|
||||||
|
|
||||||
attr.map_token_fd = OPTS_GET(opts, token_fd, 0);
|
|
||||||
|
|
||||||
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
|
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
|
||||||
return libbpf_err_errno(fd);
|
return libbpf_err_errno(fd);
|
||||||
}
|
}
|
||||||
@@ -238,7 +232,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
|||||||
const struct bpf_insn *insns, size_t insn_cnt,
|
const struct bpf_insn *insns, size_t insn_cnt,
|
||||||
struct bpf_prog_load_opts *opts)
|
struct bpf_prog_load_opts *opts)
|
||||||
{
|
{
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, fd_array_cnt);
|
const size_t attr_sz = offsetofend(union bpf_attr, log_true_size);
|
||||||
void *finfo = NULL, *linfo = NULL;
|
void *finfo = NULL, *linfo = NULL;
|
||||||
const char *func_info, *line_info;
|
const char *func_info, *line_info;
|
||||||
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
|
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
|
||||||
@@ -267,9 +261,8 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
|||||||
attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
|
attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
|
||||||
attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
|
attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
|
||||||
attr.kern_version = OPTS_GET(opts, kern_version, 0);
|
attr.kern_version = OPTS_GET(opts, kern_version, 0);
|
||||||
attr.prog_token_fd = OPTS_GET(opts, token_fd, 0);
|
|
||||||
|
|
||||||
if (prog_name && feat_supported(NULL, FEAT_PROG_NAME))
|
if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
|
||||||
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
|
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
|
||||||
attr.license = ptr_to_u64(license);
|
attr.license = ptr_to_u64(license);
|
||||||
|
|
||||||
@@ -311,7 +304,6 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
|||||||
attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
|
attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
|
||||||
|
|
||||||
attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
|
attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
|
||||||
attr.fd_array_cnt = OPTS_GET(opts, fd_array_cnt, 0);
|
|
||||||
|
|
||||||
if (log_level) {
|
if (log_level) {
|
||||||
attr.log_buf = ptr_to_u64(log_buf);
|
attr.log_buf = ptr_to_u64(log_buf);
|
||||||
@@ -637,89 +629,55 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
|
|||||||
return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts);
|
return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bpf_prog_attach_opts(int prog_fd, int target, enum bpf_attach_type type,
|
int bpf_prog_attach_opts(int prog_fd, int target_fd,
|
||||||
const struct bpf_prog_attach_opts *opts)
|
enum bpf_attach_type type,
|
||||||
|
const struct bpf_prog_attach_opts *opts)
|
||||||
{
|
{
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, expected_revision);
|
const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
|
||||||
__u32 relative_id, flags;
|
|
||||||
int ret, relative_fd;
|
|
||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!OPTS_VALID(opts, bpf_prog_attach_opts))
|
if (!OPTS_VALID(opts, bpf_prog_attach_opts))
|
||||||
return libbpf_err(-EINVAL);
|
return libbpf_err(-EINVAL);
|
||||||
|
|
||||||
relative_id = OPTS_GET(opts, relative_id, 0);
|
|
||||||
relative_fd = OPTS_GET(opts, relative_fd, 0);
|
|
||||||
flags = OPTS_GET(opts, flags, 0);
|
|
||||||
|
|
||||||
/* validate we don't have unexpected combinations of non-zero fields */
|
|
||||||
if (relative_fd && relative_id)
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
memset(&attr, 0, attr_sz);
|
memset(&attr, 0, attr_sz);
|
||||||
attr.target_fd = target;
|
attr.target_fd = target_fd;
|
||||||
attr.attach_bpf_fd = prog_fd;
|
attr.attach_bpf_fd = prog_fd;
|
||||||
attr.attach_type = type;
|
attr.attach_type = type;
|
||||||
attr.replace_bpf_fd = OPTS_GET(opts, replace_fd, 0);
|
attr.attach_flags = OPTS_GET(opts, flags, 0);
|
||||||
attr.expected_revision = OPTS_GET(opts, expected_revision, 0);
|
attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
|
||||||
|
|
||||||
if (relative_id) {
|
|
||||||
attr.attach_flags = flags | BPF_F_ID;
|
|
||||||
attr.relative_id = relative_id;
|
|
||||||
} else {
|
|
||||||
attr.attach_flags = flags;
|
|
||||||
attr.relative_fd = relative_fd;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz);
|
ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz);
|
||||||
return libbpf_err_errno(ret);
|
return libbpf_err_errno(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bpf_prog_detach_opts(int prog_fd, int target, enum bpf_attach_type type,
|
int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
|
||||||
const struct bpf_prog_detach_opts *opts)
|
|
||||||
{
|
{
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, expected_revision);
|
const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
|
||||||
__u32 relative_id, flags;
|
|
||||||
int ret, relative_fd;
|
|
||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
|
int ret;
|
||||||
if (!OPTS_VALID(opts, bpf_prog_detach_opts))
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
relative_id = OPTS_GET(opts, relative_id, 0);
|
|
||||||
relative_fd = OPTS_GET(opts, relative_fd, 0);
|
|
||||||
flags = OPTS_GET(opts, flags, 0);
|
|
||||||
|
|
||||||
/* validate we don't have unexpected combinations of non-zero fields */
|
|
||||||
if (relative_fd && relative_id)
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
memset(&attr, 0, attr_sz);
|
memset(&attr, 0, attr_sz);
|
||||||
attr.target_fd = target;
|
attr.target_fd = target_fd;
|
||||||
attr.attach_bpf_fd = prog_fd;
|
attr.attach_type = type;
|
||||||
attr.attach_type = type;
|
|
||||||
attr.expected_revision = OPTS_GET(opts, expected_revision, 0);
|
|
||||||
|
|
||||||
if (relative_id) {
|
|
||||||
attr.attach_flags = flags | BPF_F_ID;
|
|
||||||
attr.relative_id = relative_id;
|
|
||||||
} else {
|
|
||||||
attr.attach_flags = flags;
|
|
||||||
attr.relative_fd = relative_fd;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
|
ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
|
||||||
return libbpf_err_errno(ret);
|
return libbpf_err_errno(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
|
|
||||||
{
|
|
||||||
return bpf_prog_detach_opts(0, target_fd, type, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
|
int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
|
||||||
{
|
{
|
||||||
return bpf_prog_detach_opts(prog_fd, target_fd, type, NULL);
|
const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
|
||||||
|
union bpf_attr attr;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
memset(&attr, 0, attr_sz);
|
||||||
|
attr.target_fd = target_fd;
|
||||||
|
attr.attach_bpf_fd = prog_fd;
|
||||||
|
attr.attach_type = type;
|
||||||
|
|
||||||
|
ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
|
||||||
|
return libbpf_err_errno(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bpf_link_create(int prog_fd, int target_fd,
|
int bpf_link_create(int prog_fd, int target_fd,
|
||||||
@@ -727,9 +685,9 @@ int bpf_link_create(int prog_fd, int target_fd,
|
|||||||
const struct bpf_link_create_opts *opts)
|
const struct bpf_link_create_opts *opts)
|
||||||
{
|
{
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, link_create);
|
const size_t attr_sz = offsetofend(union bpf_attr, link_create);
|
||||||
__u32 target_btf_id, iter_info_len, relative_id;
|
__u32 target_btf_id, iter_info_len;
|
||||||
int fd, err, relative_fd;
|
|
||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
|
int fd, err;
|
||||||
|
|
||||||
if (!OPTS_VALID(opts, bpf_link_create_opts))
|
if (!OPTS_VALID(opts, bpf_link_create_opts))
|
||||||
return libbpf_err(-EINVAL);
|
return libbpf_err(-EINVAL);
|
||||||
@@ -767,7 +725,6 @@ int bpf_link_create(int prog_fd, int target_fd,
|
|||||||
return libbpf_err(-EINVAL);
|
return libbpf_err(-EINVAL);
|
||||||
break;
|
break;
|
||||||
case BPF_TRACE_KPROBE_MULTI:
|
case BPF_TRACE_KPROBE_MULTI:
|
||||||
case BPF_TRACE_KPROBE_SESSION:
|
|
||||||
attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0);
|
attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0);
|
||||||
attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0);
|
attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0);
|
||||||
attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0));
|
attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0));
|
||||||
@@ -776,19 +733,6 @@ int bpf_link_create(int prog_fd, int target_fd,
|
|||||||
if (!OPTS_ZEROED(opts, kprobe_multi))
|
if (!OPTS_ZEROED(opts, kprobe_multi))
|
||||||
return libbpf_err(-EINVAL);
|
return libbpf_err(-EINVAL);
|
||||||
break;
|
break;
|
||||||
case BPF_TRACE_UPROBE_MULTI:
|
|
||||||
case BPF_TRACE_UPROBE_SESSION:
|
|
||||||
attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0);
|
|
||||||
attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0);
|
|
||||||
attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0));
|
|
||||||
attr.link_create.uprobe_multi.offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.offsets, 0));
|
|
||||||
attr.link_create.uprobe_multi.ref_ctr_offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.ref_ctr_offsets, 0));
|
|
||||||
attr.link_create.uprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, uprobe_multi.cookies, 0));
|
|
||||||
attr.link_create.uprobe_multi.pid = OPTS_GET(opts, uprobe_multi.pid, 0);
|
|
||||||
if (!OPTS_ZEROED(opts, uprobe_multi))
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
break;
|
|
||||||
case BPF_TRACE_RAW_TP:
|
|
||||||
case BPF_TRACE_FENTRY:
|
case BPF_TRACE_FENTRY:
|
||||||
case BPF_TRACE_FEXIT:
|
case BPF_TRACE_FEXIT:
|
||||||
case BPF_MODIFY_RETURN:
|
case BPF_MODIFY_RETURN:
|
||||||
@@ -805,38 +749,6 @@ int bpf_link_create(int prog_fd, int target_fd,
|
|||||||
if (!OPTS_ZEROED(opts, netfilter))
|
if (!OPTS_ZEROED(opts, netfilter))
|
||||||
return libbpf_err(-EINVAL);
|
return libbpf_err(-EINVAL);
|
||||||
break;
|
break;
|
||||||
case BPF_TCX_INGRESS:
|
|
||||||
case BPF_TCX_EGRESS:
|
|
||||||
relative_fd = OPTS_GET(opts, tcx.relative_fd, 0);
|
|
||||||
relative_id = OPTS_GET(opts, tcx.relative_id, 0);
|
|
||||||
if (relative_fd && relative_id)
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
if (relative_id) {
|
|
||||||
attr.link_create.tcx.relative_id = relative_id;
|
|
||||||
attr.link_create.flags |= BPF_F_ID;
|
|
||||||
} else {
|
|
||||||
attr.link_create.tcx.relative_fd = relative_fd;
|
|
||||||
}
|
|
||||||
attr.link_create.tcx.expected_revision = OPTS_GET(opts, tcx.expected_revision, 0);
|
|
||||||
if (!OPTS_ZEROED(opts, tcx))
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
break;
|
|
||||||
case BPF_NETKIT_PRIMARY:
|
|
||||||
case BPF_NETKIT_PEER:
|
|
||||||
relative_fd = OPTS_GET(opts, netkit.relative_fd, 0);
|
|
||||||
relative_id = OPTS_GET(opts, netkit.relative_id, 0);
|
|
||||||
if (relative_fd && relative_id)
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
if (relative_id) {
|
|
||||||
attr.link_create.netkit.relative_id = relative_id;
|
|
||||||
attr.link_create.flags |= BPF_F_ID;
|
|
||||||
} else {
|
|
||||||
attr.link_create.netkit.relative_fd = relative_fd;
|
|
||||||
}
|
|
||||||
attr.link_create.netkit.expected_revision = OPTS_GET(opts, netkit.expected_revision, 0);
|
|
||||||
if (!OPTS_ZEROED(opts, netkit))
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
if (!OPTS_ZEROED(opts, flags))
|
if (!OPTS_ZEROED(opts, flags))
|
||||||
return libbpf_err(-EINVAL);
|
return libbpf_err(-EINVAL);
|
||||||
@@ -929,7 +841,8 @@ int bpf_iter_create(int link_fd)
|
|||||||
return libbpf_err_errno(fd);
|
return libbpf_err_errno(fd);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bpf_prog_query_opts(int target, enum bpf_attach_type type,
|
int bpf_prog_query_opts(int target_fd,
|
||||||
|
enum bpf_attach_type type,
|
||||||
struct bpf_prog_query_opts *opts)
|
struct bpf_prog_query_opts *opts)
|
||||||
{
|
{
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, query);
|
const size_t attr_sz = offsetofend(union bpf_attr, query);
|
||||||
@@ -940,20 +853,18 @@ int bpf_prog_query_opts(int target, enum bpf_attach_type type,
|
|||||||
return libbpf_err(-EINVAL);
|
return libbpf_err(-EINVAL);
|
||||||
|
|
||||||
memset(&attr, 0, attr_sz);
|
memset(&attr, 0, attr_sz);
|
||||||
attr.query.target_fd = target;
|
|
||||||
attr.query.attach_type = type;
|
attr.query.target_fd = target_fd;
|
||||||
attr.query.query_flags = OPTS_GET(opts, query_flags, 0);
|
attr.query.attach_type = type;
|
||||||
attr.query.count = OPTS_GET(opts, count, 0);
|
attr.query.query_flags = OPTS_GET(opts, query_flags, 0);
|
||||||
attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
|
attr.query.prog_cnt = OPTS_GET(opts, prog_cnt, 0);
|
||||||
attr.query.link_ids = ptr_to_u64(OPTS_GET(opts, link_ids, NULL));
|
attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
|
||||||
attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
|
attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
|
||||||
attr.query.link_attach_flags = ptr_to_u64(OPTS_GET(opts, link_attach_flags, NULL));
|
|
||||||
|
|
||||||
ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz);
|
ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz);
|
||||||
|
|
||||||
OPTS_SET(opts, attach_flags, attr.query.attach_flags);
|
OPTS_SET(opts, attach_flags, attr.query.attach_flags);
|
||||||
OPTS_SET(opts, revision, attr.query.revision);
|
OPTS_SET(opts, prog_cnt, attr.query.prog_cnt);
|
||||||
OPTS_SET(opts, count, attr.query.count);
|
|
||||||
|
|
||||||
return libbpf_err_errno(ret);
|
return libbpf_err_errno(ret);
|
||||||
}
|
}
|
||||||
@@ -1097,7 +1008,7 @@ int bpf_map_get_fd_by_id(__u32 id)
|
|||||||
int bpf_btf_get_fd_by_id_opts(__u32 id,
|
int bpf_btf_get_fd_by_id_opts(__u32 id,
|
||||||
const struct bpf_get_fd_by_id_opts *opts)
|
const struct bpf_get_fd_by_id_opts *opts)
|
||||||
{
|
{
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, fd_by_id_token_fd);
|
const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
|
||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
int fd;
|
int fd;
|
||||||
|
|
||||||
@@ -1107,7 +1018,6 @@ int bpf_btf_get_fd_by_id_opts(__u32 id,
|
|||||||
memset(&attr, 0, attr_sz);
|
memset(&attr, 0, attr_sz);
|
||||||
attr.btf_id = id;
|
attr.btf_id = id;
|
||||||
attr.open_flags = OPTS_GET(opts, open_flags, 0);
|
attr.open_flags = OPTS_GET(opts, open_flags, 0);
|
||||||
attr.fd_by_id_token_fd = OPTS_GET(opts, token_fd, 0);
|
|
||||||
|
|
||||||
fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz);
|
fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz);
|
||||||
return libbpf_err_errno(fd);
|
return libbpf_err_errno(fd);
|
||||||
@@ -1178,34 +1088,23 @@ int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info
|
|||||||
return bpf_obj_get_info_by_fd(link_fd, info, info_len);
|
return bpf_obj_get_info_by_fd(link_fd, info, info_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bpf_raw_tracepoint_open_opts(int prog_fd, struct bpf_raw_tp_opts *opts)
|
int bpf_raw_tracepoint_open(const char *name, int prog_fd)
|
||||||
{
|
{
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
|
const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
|
||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
int fd;
|
int fd;
|
||||||
|
|
||||||
if (!OPTS_VALID(opts, bpf_raw_tp_opts))
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
memset(&attr, 0, attr_sz);
|
memset(&attr, 0, attr_sz);
|
||||||
|
attr.raw_tracepoint.name = ptr_to_u64(name);
|
||||||
attr.raw_tracepoint.prog_fd = prog_fd;
|
attr.raw_tracepoint.prog_fd = prog_fd;
|
||||||
attr.raw_tracepoint.name = ptr_to_u64(OPTS_GET(opts, tp_name, NULL));
|
|
||||||
attr.raw_tracepoint.cookie = OPTS_GET(opts, cookie, 0);
|
|
||||||
|
|
||||||
fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
|
fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
|
||||||
return libbpf_err_errno(fd);
|
return libbpf_err_errno(fd);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bpf_raw_tracepoint_open(const char *name, int prog_fd)
|
|
||||||
{
|
|
||||||
LIBBPF_OPTS(bpf_raw_tp_opts, opts, .tp_name = name);
|
|
||||||
|
|
||||||
return bpf_raw_tracepoint_open_opts(prog_fd, &opts);
|
|
||||||
}
|
|
||||||
|
|
||||||
int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
|
int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
|
||||||
{
|
{
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd);
|
const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size);
|
||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
char *log_buf;
|
char *log_buf;
|
||||||
size_t log_size;
|
size_t log_size;
|
||||||
@@ -1230,10 +1129,6 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts
|
|||||||
|
|
||||||
attr.btf = ptr_to_u64(btf_data);
|
attr.btf = ptr_to_u64(btf_data);
|
||||||
attr.btf_size = btf_size;
|
attr.btf_size = btf_size;
|
||||||
|
|
||||||
attr.btf_flags = OPTS_GET(opts, btf_flags, 0);
|
|
||||||
attr.btf_token_fd = OPTS_GET(opts, token_fd, 0);
|
|
||||||
|
|
||||||
/* log_level == 0 and log_buf != NULL means "try loading without
|
/* log_level == 0 and log_buf != NULL means "try loading without
|
||||||
* log_buf, but retry with log_buf and log_level=1 on error", which is
|
* log_buf, but retry with log_buf and log_level=1 on error", which is
|
||||||
* consistent across low-level and high-level BTF and program loading
|
* consistent across low-level and high-level BTF and program loading
|
||||||
@@ -1314,20 +1209,3 @@ int bpf_prog_bind_map(int prog_fd, int map_fd,
|
|||||||
ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
|
ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
|
||||||
return libbpf_err_errno(ret);
|
return libbpf_err_errno(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts)
|
|
||||||
{
|
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, token_create);
|
|
||||||
union bpf_attr attr;
|
|
||||||
int fd;
|
|
||||||
|
|
||||||
if (!OPTS_VALID(opts, bpf_token_create_opts))
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
memset(&attr, 0, attr_sz);
|
|
||||||
attr.token_create.bpffs_fd = bpffs_fd;
|
|
||||||
attr.token_create.flags = OPTS_GET(opts, flags, 0);
|
|
||||||
|
|
||||||
fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz);
|
|
||||||
return libbpf_err_errno(fd);
|
|
||||||
}
|
|
||||||
|
|||||||
211
src/bpf.h
211
src/bpf.h
@@ -35,7 +35,7 @@
|
|||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
LIBBPF_API int libbpf_set_memlock_rlim(size_t memlock_bytes);
|
int libbpf_set_memlock_rlim(size_t memlock_bytes);
|
||||||
|
|
||||||
struct bpf_map_create_opts {
|
struct bpf_map_create_opts {
|
||||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||||
@@ -51,12 +51,8 @@ struct bpf_map_create_opts {
|
|||||||
|
|
||||||
__u32 numa_node;
|
__u32 numa_node;
|
||||||
__u32 map_ifindex;
|
__u32 map_ifindex;
|
||||||
__s32 value_type_btf_obj_fd;
|
|
||||||
|
|
||||||
__u32 token_fd;
|
|
||||||
size_t :0;
|
|
||||||
};
|
};
|
||||||
#define bpf_map_create_opts__last_field token_fd
|
#define bpf_map_create_opts__last_field map_ifindex
|
||||||
|
|
||||||
LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
|
LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
|
||||||
const char *map_name,
|
const char *map_name,
|
||||||
@@ -100,19 +96,15 @@ struct bpf_prog_load_opts {
|
|||||||
__u32 log_level;
|
__u32 log_level;
|
||||||
__u32 log_size;
|
__u32 log_size;
|
||||||
char *log_buf;
|
char *log_buf;
|
||||||
/* output: actual total log contents size (including terminating zero).
|
/* output: actual total log contents size (including termintaing zero).
|
||||||
* It could be both larger than original log_size (if log was
|
* It could be both larger than original log_size (if log was
|
||||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||||
*/
|
*/
|
||||||
__u32 log_true_size;
|
__u32 log_true_size;
|
||||||
__u32 token_fd;
|
|
||||||
|
|
||||||
/* if set, provides the length of fd_array */
|
|
||||||
__u32 fd_array_cnt;
|
|
||||||
size_t :0;
|
size_t :0;
|
||||||
};
|
};
|
||||||
#define bpf_prog_load_opts__last_field fd_array_cnt
|
#define bpf_prog_load_opts__last_field log_true_size
|
||||||
|
|
||||||
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
|
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||||
const char *prog_name, const char *license,
|
const char *prog_name, const char *license,
|
||||||
@@ -132,18 +124,15 @@ struct bpf_btf_load_opts {
|
|||||||
char *log_buf;
|
char *log_buf;
|
||||||
__u32 log_level;
|
__u32 log_level;
|
||||||
__u32 log_size;
|
__u32 log_size;
|
||||||
/* output: actual total log contents size (including terminating zero).
|
/* output: actual total log contents size (including termintaing zero).
|
||||||
* It could be both larger than original log_size (if log was
|
* It could be both larger than original log_size (if log was
|
||||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||||
*/
|
*/
|
||||||
__u32 log_true_size;
|
__u32 log_true_size;
|
||||||
|
|
||||||
__u32 btf_flags;
|
|
||||||
__u32 token_fd;
|
|
||||||
size_t :0;
|
size_t :0;
|
||||||
};
|
};
|
||||||
#define bpf_btf_load_opts__last_field token_fd
|
#define bpf_btf_load_opts__last_field log_true_size
|
||||||
|
|
||||||
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
|
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
|
||||||
struct bpf_btf_load_opts *opts);
|
struct bpf_btf_load_opts *opts);
|
||||||
@@ -193,14 +182,10 @@ LIBBPF_API int bpf_map_delete_batch(int fd, const void *keys,
|
|||||||
/**
|
/**
|
||||||
* @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements.
|
* @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements.
|
||||||
*
|
*
|
||||||
* The parameter *in_batch* is the address of the first element in the batch to
|
* The parameter *in_batch* is the address of the first element in the batch to read.
|
||||||
* read. *out_batch* is an output parameter that should be passed as *in_batch*
|
* *out_batch* is an output parameter that should be passed as *in_batch* to subsequent
|
||||||
* to subsequent calls to **bpf_map_lookup_batch()**. NULL can be passed for
|
* calls to **bpf_map_lookup_batch()**. NULL can be passed for *in_batch* to indicate
|
||||||
* *in_batch* to indicate that the batched lookup starts from the beginning of
|
* that the batched lookup starts from the beginning of the map.
|
||||||
* the map. Both *in_batch* and *out_batch* must point to memory large enough to
|
|
||||||
* hold a single key, except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
|
|
||||||
* LRU_HASH, LRU_PERCPU_HASH}**, for which the memory size must be at
|
|
||||||
* least 4 bytes wide regardless of key size.
|
|
||||||
*
|
*
|
||||||
* The *keys* and *values* are output parameters which must point to memory large enough to
|
* The *keys* and *values* are output parameters which must point to memory large enough to
|
||||||
* hold *count* items based on the key and value size of the map *map_fd*. The *keys*
|
* hold *count* items based on the key and value size of the map *map_fd*. The *keys*
|
||||||
@@ -233,10 +218,7 @@ LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch,
|
|||||||
*
|
*
|
||||||
* @param fd BPF map file descriptor
|
* @param fd BPF map file descriptor
|
||||||
* @param in_batch address of the first element in batch to read, can pass NULL to
|
* @param in_batch address of the first element in batch to read, can pass NULL to
|
||||||
* get address of the first element in *out_batch*. If not NULL, must be large
|
* get address of the first element in *out_batch*
|
||||||
* enough to hold a key. For **BPF_MAP_TYPE_{HASH, PERCPU_HASH, LRU_HASH,
|
|
||||||
* LRU_PERCPU_HASH}**, the memory size must be at least 4 bytes wide regardless
|
|
||||||
* of key size.
|
|
||||||
* @param out_batch output parameter that should be passed to next call as *in_batch*
|
* @param out_batch output parameter that should be passed to next call as *in_batch*
|
||||||
* @param keys pointer to an array of *count* keys
|
* @param keys pointer to an array of *count* keys
|
||||||
* @param values pointer to an array large enough for *count* values
|
* @param values pointer to an array large enough for *count* values
|
||||||
@@ -330,68 +312,22 @@ LIBBPF_API int bpf_obj_get(const char *pathname);
|
|||||||
LIBBPF_API int bpf_obj_get_opts(const char *pathname,
|
LIBBPF_API int bpf_obj_get_opts(const char *pathname,
|
||||||
const struct bpf_obj_get_opts *opts);
|
const struct bpf_obj_get_opts *opts);
|
||||||
|
|
||||||
|
struct bpf_prog_attach_opts {
|
||||||
|
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||||
|
unsigned int flags;
|
||||||
|
int replace_prog_fd;
|
||||||
|
};
|
||||||
|
#define bpf_prog_attach_opts__last_field replace_prog_fd
|
||||||
|
|
||||||
LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
|
LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
|
||||||
enum bpf_attach_type type, unsigned int flags);
|
enum bpf_attach_type type, unsigned int flags);
|
||||||
|
LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int attachable_fd,
|
||||||
|
enum bpf_attach_type type,
|
||||||
|
const struct bpf_prog_attach_opts *opts);
|
||||||
LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
|
LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
|
||||||
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
|
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
|
||||||
enum bpf_attach_type type);
|
enum bpf_attach_type type);
|
||||||
|
|
||||||
struct bpf_prog_attach_opts {
|
|
||||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
|
||||||
__u32 flags;
|
|
||||||
union {
|
|
||||||
int replace_prog_fd;
|
|
||||||
int replace_fd;
|
|
||||||
};
|
|
||||||
int relative_fd;
|
|
||||||
__u32 relative_id;
|
|
||||||
__u64 expected_revision;
|
|
||||||
size_t :0;
|
|
||||||
};
|
|
||||||
#define bpf_prog_attach_opts__last_field expected_revision
|
|
||||||
|
|
||||||
struct bpf_prog_detach_opts {
|
|
||||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
|
||||||
__u32 flags;
|
|
||||||
int relative_fd;
|
|
||||||
__u32 relative_id;
|
|
||||||
__u64 expected_revision;
|
|
||||||
size_t :0;
|
|
||||||
};
|
|
||||||
#define bpf_prog_detach_opts__last_field expected_revision
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **bpf_prog_attach_opts()** attaches the BPF program corresponding to
|
|
||||||
* *prog_fd* to a *target* which can represent a file descriptor or netdevice
|
|
||||||
* ifindex.
|
|
||||||
*
|
|
||||||
* @param prog_fd BPF program file descriptor
|
|
||||||
* @param target attach location file descriptor or ifindex
|
|
||||||
* @param type attach type for the BPF program
|
|
||||||
* @param opts options for configuring the attachment
|
|
||||||
* @return 0, on success; negative error code, otherwise (errno is also set to
|
|
||||||
* the error code)
|
|
||||||
*/
|
|
||||||
LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int target,
|
|
||||||
enum bpf_attach_type type,
|
|
||||||
const struct bpf_prog_attach_opts *opts);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **bpf_prog_detach_opts()** detaches the BPF program corresponding to
|
|
||||||
* *prog_fd* from a *target* which can represent a file descriptor or netdevice
|
|
||||||
* ifindex.
|
|
||||||
*
|
|
||||||
* @param prog_fd BPF program file descriptor
|
|
||||||
* @param target detach location file descriptor or ifindex
|
|
||||||
* @param type detach type for the BPF program
|
|
||||||
* @param opts options for configuring the detachment
|
|
||||||
* @return 0, on success; negative error code, otherwise (errno is also set to
|
|
||||||
* the error code)
|
|
||||||
*/
|
|
||||||
LIBBPF_API int bpf_prog_detach_opts(int prog_fd, int target,
|
|
||||||
enum bpf_attach_type type,
|
|
||||||
const struct bpf_prog_detach_opts *opts);
|
|
||||||
|
|
||||||
union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */
|
union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */
|
||||||
struct bpf_link_create_opts {
|
struct bpf_link_create_opts {
|
||||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||||
@@ -410,15 +346,6 @@ struct bpf_link_create_opts {
|
|||||||
const unsigned long *addrs;
|
const unsigned long *addrs;
|
||||||
const __u64 *cookies;
|
const __u64 *cookies;
|
||||||
} kprobe_multi;
|
} kprobe_multi;
|
||||||
struct {
|
|
||||||
__u32 flags;
|
|
||||||
__u32 cnt;
|
|
||||||
const char *path;
|
|
||||||
const unsigned long *offsets;
|
|
||||||
const unsigned long *ref_ctr_offsets;
|
|
||||||
const __u64 *cookies;
|
|
||||||
__u32 pid;
|
|
||||||
} uprobe_multi;
|
|
||||||
struct {
|
struct {
|
||||||
__u64 cookie;
|
__u64 cookie;
|
||||||
} tracing;
|
} tracing;
|
||||||
@@ -428,20 +355,10 @@ struct bpf_link_create_opts {
|
|||||||
__s32 priority;
|
__s32 priority;
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
} netfilter;
|
} netfilter;
|
||||||
struct {
|
|
||||||
__u32 relative_fd;
|
|
||||||
__u32 relative_id;
|
|
||||||
__u64 expected_revision;
|
|
||||||
} tcx;
|
|
||||||
struct {
|
|
||||||
__u32 relative_fd;
|
|
||||||
__u32 relative_id;
|
|
||||||
__u64 expected_revision;
|
|
||||||
} netkit;
|
|
||||||
};
|
};
|
||||||
size_t :0;
|
size_t :0;
|
||||||
};
|
};
|
||||||
#define bpf_link_create_opts__last_field uprobe_multi.pid
|
#define bpf_link_create_opts__last_field kprobe_multi.cookies
|
||||||
|
|
||||||
LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
|
LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
|
||||||
enum bpf_attach_type attach_type,
|
enum bpf_attach_type attach_type,
|
||||||
@@ -487,10 +404,9 @@ LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id);
|
|||||||
struct bpf_get_fd_by_id_opts {
|
struct bpf_get_fd_by_id_opts {
|
||||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||||
__u32 open_flags; /* permissions requested for the operation on fd */
|
__u32 open_flags; /* permissions requested for the operation on fd */
|
||||||
__u32 token_fd;
|
|
||||||
size_t :0;
|
size_t :0;
|
||||||
};
|
};
|
||||||
#define bpf_get_fd_by_id_opts__last_field token_fd
|
#define bpf_get_fd_by_id_opts__last_field open_flags
|
||||||
|
|
||||||
LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
|
LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
|
||||||
LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id,
|
LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id,
|
||||||
@@ -511,10 +427,7 @@ LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
|
|||||||
* program corresponding to *prog_fd*.
|
* program corresponding to *prog_fd*.
|
||||||
*
|
*
|
||||||
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
||||||
* actual number of bytes written to *info*. Note that *info* should be
|
* actual number of bytes written to *info*.
|
||||||
* zero-initialized or initialized as expected by the requested *info*
|
|
||||||
* type. Failing to (zero-)initialize *info* under certain circumstances can
|
|
||||||
* result in this helper returning an error.
|
|
||||||
*
|
*
|
||||||
* @param prog_fd BPF program file descriptor
|
* @param prog_fd BPF program file descriptor
|
||||||
* @param info pointer to **struct bpf_prog_info** that will be populated with
|
* @param info pointer to **struct bpf_prog_info** that will be populated with
|
||||||
@@ -531,10 +444,7 @@ LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info,
|
|||||||
* map corresponding to *map_fd*.
|
* map corresponding to *map_fd*.
|
||||||
*
|
*
|
||||||
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
||||||
* actual number of bytes written to *info*. Note that *info* should be
|
* actual number of bytes written to *info*.
|
||||||
* zero-initialized or initialized as expected by the requested *info*
|
|
||||||
* type. Failing to (zero-)initialize *info* under certain circumstances can
|
|
||||||
* result in this helper returning an error.
|
|
||||||
*
|
*
|
||||||
* @param map_fd BPF map file descriptor
|
* @param map_fd BPF map file descriptor
|
||||||
* @param info pointer to **struct bpf_map_info** that will be populated with
|
* @param info pointer to **struct bpf_map_info** that will be populated with
|
||||||
@@ -547,14 +457,11 @@ LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info,
|
|||||||
LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len);
|
LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief **bpf_btf_get_info_by_fd()** obtains information about the
|
* @brief **bpf_btf_get_info_by_fd()** obtains information about the
|
||||||
* BTF object corresponding to *btf_fd*.
|
* BTF object corresponding to *btf_fd*.
|
||||||
*
|
*
|
||||||
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
||||||
* actual number of bytes written to *info*. Note that *info* should be
|
* actual number of bytes written to *info*.
|
||||||
* zero-initialized or initialized as expected by the requested *info*
|
|
||||||
* type. Failing to (zero-)initialize *info* under certain circumstances can
|
|
||||||
* result in this helper returning an error.
|
|
||||||
*
|
*
|
||||||
* @param btf_fd BTF object file descriptor
|
* @param btf_fd BTF object file descriptor
|
||||||
* @param info pointer to **struct bpf_btf_info** that will be populated with
|
* @param info pointer to **struct bpf_btf_info** that will be populated with
|
||||||
@@ -571,10 +478,7 @@ LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u
|
|||||||
* link corresponding to *link_fd*.
|
* link corresponding to *link_fd*.
|
||||||
*
|
*
|
||||||
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
|
||||||
* actual number of bytes written to *info*. Note that *info* should be
|
* actual number of bytes written to *info*.
|
||||||
* zero-initialized or initialized as expected by the requested *info*
|
|
||||||
* type. Failing to (zero-)initialize *info* under certain circumstances can
|
|
||||||
* result in this helper returning an error.
|
|
||||||
*
|
*
|
||||||
* @param link_fd BPF link file descriptor
|
* @param link_fd BPF link file descriptor
|
||||||
* @param info pointer to **struct bpf_link_info** that will be populated with
|
* @param info pointer to **struct bpf_link_info** that will be populated with
|
||||||
@@ -591,45 +495,18 @@ struct bpf_prog_query_opts {
|
|||||||
__u32 query_flags;
|
__u32 query_flags;
|
||||||
__u32 attach_flags; /* output argument */
|
__u32 attach_flags; /* output argument */
|
||||||
__u32 *prog_ids;
|
__u32 *prog_ids;
|
||||||
union {
|
__u32 prog_cnt; /* input+output argument */
|
||||||
/* input+output argument */
|
|
||||||
__u32 prog_cnt;
|
|
||||||
__u32 count;
|
|
||||||
};
|
|
||||||
__u32 *prog_attach_flags;
|
__u32 *prog_attach_flags;
|
||||||
__u32 *link_ids;
|
|
||||||
__u32 *link_attach_flags;
|
|
||||||
__u64 revision;
|
|
||||||
size_t :0;
|
|
||||||
};
|
};
|
||||||
#define bpf_prog_query_opts__last_field revision
|
#define bpf_prog_query_opts__last_field prog_attach_flags
|
||||||
|
|
||||||
/**
|
LIBBPF_API int bpf_prog_query_opts(int target_fd,
|
||||||
* @brief **bpf_prog_query_opts()** queries the BPF programs and BPF links
|
enum bpf_attach_type type,
|
||||||
* which are attached to *target* which can represent a file descriptor or
|
|
||||||
* netdevice ifindex.
|
|
||||||
*
|
|
||||||
* @param target query location file descriptor or ifindex
|
|
||||||
* @param type attach type for the BPF program
|
|
||||||
* @param opts options for configuring the query
|
|
||||||
* @return 0, on success; negative error code, otherwise (errno is also set to
|
|
||||||
* the error code)
|
|
||||||
*/
|
|
||||||
LIBBPF_API int bpf_prog_query_opts(int target, enum bpf_attach_type type,
|
|
||||||
struct bpf_prog_query_opts *opts);
|
struct bpf_prog_query_opts *opts);
|
||||||
LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
|
LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
|
||||||
__u32 query_flags, __u32 *attach_flags,
|
__u32 query_flags, __u32 *attach_flags,
|
||||||
__u32 *prog_ids, __u32 *prog_cnt);
|
__u32 *prog_ids, __u32 *prog_cnt);
|
||||||
|
|
||||||
struct bpf_raw_tp_opts {
|
|
||||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
|
||||||
const char *tp_name;
|
|
||||||
__u64 cookie;
|
|
||||||
size_t :0;
|
|
||||||
};
|
|
||||||
#define bpf_raw_tp_opts__last_field cookie
|
|
||||||
|
|
||||||
LIBBPF_API int bpf_raw_tracepoint_open_opts(int prog_fd, struct bpf_raw_tp_opts *opts);
|
|
||||||
LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
|
LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
|
||||||
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
|
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
|
||||||
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
|
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
|
||||||
@@ -680,30 +557,6 @@ struct bpf_test_run_opts {
|
|||||||
LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
|
LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
|
||||||
struct bpf_test_run_opts *opts);
|
struct bpf_test_run_opts *opts);
|
||||||
|
|
||||||
struct bpf_token_create_opts {
|
|
||||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
|
||||||
__u32 flags;
|
|
||||||
size_t :0;
|
|
||||||
};
|
|
||||||
#define bpf_token_create_opts__last_field flags
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **bpf_token_create()** creates a new instance of BPF token derived
|
|
||||||
* from specified BPF FS mount point.
|
|
||||||
*
|
|
||||||
* BPF token created with this API can be passed to bpf() syscall for
|
|
||||||
* commands like BPF_PROG_LOAD, BPF_MAP_CREATE, etc.
|
|
||||||
*
|
|
||||||
* @param bpffs_fd FD for BPF FS instance from which to derive a BPF token
|
|
||||||
* instance.
|
|
||||||
* @param opts optional BPF token creation options, can be NULL
|
|
||||||
*
|
|
||||||
* @return BPF token FD > 0, on success; negative error code, otherwise (errno
|
|
||||||
* is also set to the error code)
|
|
||||||
*/
|
|
||||||
LIBBPF_API int bpf_token_create(int bpffs_fd,
|
|
||||||
struct bpf_token_create_opts *opts);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
} /* extern "C" */
|
} /* extern "C" */
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -2,8 +2,6 @@
|
|||||||
#ifndef __BPF_CORE_READ_H__
|
#ifndef __BPF_CORE_READ_H__
|
||||||
#define __BPF_CORE_READ_H__
|
#define __BPF_CORE_READ_H__
|
||||||
|
|
||||||
#include "bpf_helpers.h"
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* enum bpf_field_info_kind is passed as a second argument into
|
* enum bpf_field_info_kind is passed as a second argument into
|
||||||
* __builtin_preserve_field_info() built-in to get a specific aspect of
|
* __builtin_preserve_field_info() built-in to get a specific aspect of
|
||||||
@@ -46,7 +44,7 @@ enum bpf_enum_value_kind {
|
|||||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||||
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
|
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
|
||||||
bpf_probe_read_kernel( \
|
bpf_probe_read_kernel( \
|
||||||
(void *)dst, \
|
(void *)dst, \
|
||||||
__CORE_RELO(src, fld, BYTE_SIZE), \
|
__CORE_RELO(src, fld, BYTE_SIZE), \
|
||||||
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
|
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
|
||||||
#else
|
#else
|
||||||
@@ -104,7 +102,6 @@ enum bpf_enum_value_kind {
|
|||||||
case 2: val = *(const unsigned short *)p; break; \
|
case 2: val = *(const unsigned short *)p; break; \
|
||||||
case 4: val = *(const unsigned int *)p; break; \
|
case 4: val = *(const unsigned int *)p; break; \
|
||||||
case 8: val = *(const unsigned long long *)p; break; \
|
case 8: val = *(const unsigned long long *)p; break; \
|
||||||
default: val = 0; break; \
|
|
||||||
} \
|
} \
|
||||||
val <<= __CORE_RELO(s, field, LSHIFT_U64); \
|
val <<= __CORE_RELO(s, field, LSHIFT_U64); \
|
||||||
if (__CORE_RELO(s, field, SIGNED)) \
|
if (__CORE_RELO(s, field, SIGNED)) \
|
||||||
@@ -114,61 +111,8 @@ enum bpf_enum_value_kind {
|
|||||||
val; \
|
val; \
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
|
||||||
* Write to a bitfield, identified by s->field.
|
|
||||||
* This is the inverse of BPF_CORE_WRITE_BITFIELD().
|
|
||||||
*/
|
|
||||||
#define BPF_CORE_WRITE_BITFIELD(s, field, new_val) ({ \
|
|
||||||
void *p = (void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
|
|
||||||
unsigned int byte_size = __CORE_RELO(s, field, BYTE_SIZE); \
|
|
||||||
unsigned int lshift = __CORE_RELO(s, field, LSHIFT_U64); \
|
|
||||||
unsigned int rshift = __CORE_RELO(s, field, RSHIFT_U64); \
|
|
||||||
unsigned long long mask, val, nval = new_val; \
|
|
||||||
unsigned int rpad = rshift - lshift; \
|
|
||||||
\
|
|
||||||
asm volatile("" : "+r"(p)); \
|
|
||||||
\
|
|
||||||
switch (byte_size) { \
|
|
||||||
case 1: val = *(unsigned char *)p; break; \
|
|
||||||
case 2: val = *(unsigned short *)p; break; \
|
|
||||||
case 4: val = *(unsigned int *)p; break; \
|
|
||||||
case 8: val = *(unsigned long long *)p; break; \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
mask = (~0ULL << rshift) >> lshift; \
|
|
||||||
val = (val & ~mask) | ((nval << rpad) & mask); \
|
|
||||||
\
|
|
||||||
switch (byte_size) { \
|
|
||||||
case 1: *(unsigned char *)p = val; break; \
|
|
||||||
case 2: *(unsigned short *)p = val; break; \
|
|
||||||
case 4: *(unsigned int *)p = val; break; \
|
|
||||||
case 8: *(unsigned long long *)p = val; break; \
|
|
||||||
} \
|
|
||||||
})
|
|
||||||
|
|
||||||
/* Differentiator between compilers builtin implementations. This is a
|
|
||||||
* requirement due to the compiler parsing differences where GCC optimizes
|
|
||||||
* early in parsing those constructs of type pointers to the builtin specific
|
|
||||||
* type, resulting in not being possible to collect the required type
|
|
||||||
* information in the builtin expansion.
|
|
||||||
*/
|
|
||||||
#ifdef __clang__
|
|
||||||
#define ___bpf_typeof(type) ((typeof(type) *) 0)
|
|
||||||
#else
|
|
||||||
#define ___bpf_typeof1(type, NR) ({ \
|
|
||||||
extern typeof(type) *___concat(bpf_type_tmp_, NR); \
|
|
||||||
___concat(bpf_type_tmp_, NR); \
|
|
||||||
})
|
|
||||||
#define ___bpf_typeof(type) ___bpf_typeof1(type, __COUNTER__)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __clang__
|
|
||||||
#define ___bpf_field_ref1(field) (field)
|
#define ___bpf_field_ref1(field) (field)
|
||||||
#define ___bpf_field_ref2(type, field) (___bpf_typeof(type)->field)
|
#define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field)
|
||||||
#else
|
|
||||||
#define ___bpf_field_ref1(field) (&(field))
|
|
||||||
#define ___bpf_field_ref2(type, field) (&(___bpf_typeof(type)->field))
|
|
||||||
#endif
|
|
||||||
#define ___bpf_field_ref(args...) \
|
#define ___bpf_field_ref(args...) \
|
||||||
___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args)
|
___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args)
|
||||||
|
|
||||||
@@ -218,7 +162,7 @@ enum bpf_enum_value_kind {
|
|||||||
* BTF. Always succeeds.
|
* BTF. Always succeeds.
|
||||||
*/
|
*/
|
||||||
#define bpf_core_type_id_local(type) \
|
#define bpf_core_type_id_local(type) \
|
||||||
__builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_LOCAL)
|
__builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convenience macro to get BTF type ID of a target kernel's type that matches
|
* Convenience macro to get BTF type ID of a target kernel's type that matches
|
||||||
@@ -228,7 +172,7 @@ enum bpf_enum_value_kind {
|
|||||||
* - 0, if no matching type was found in a target kernel BTF.
|
* - 0, if no matching type was found in a target kernel BTF.
|
||||||
*/
|
*/
|
||||||
#define bpf_core_type_id_kernel(type) \
|
#define bpf_core_type_id_kernel(type) \
|
||||||
__builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_TARGET)
|
__builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convenience macro to check that provided named type
|
* Convenience macro to check that provided named type
|
||||||
@@ -238,7 +182,7 @@ enum bpf_enum_value_kind {
|
|||||||
* 0, if no matching type is found.
|
* 0, if no matching type is found.
|
||||||
*/
|
*/
|
||||||
#define bpf_core_type_exists(type) \
|
#define bpf_core_type_exists(type) \
|
||||||
__builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_EXISTS)
|
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convenience macro to check that provided named type
|
* Convenience macro to check that provided named type
|
||||||
@@ -248,7 +192,7 @@ enum bpf_enum_value_kind {
|
|||||||
* 0, if the type does not match any in the target kernel
|
* 0, if the type does not match any in the target kernel
|
||||||
*/
|
*/
|
||||||
#define bpf_core_type_matches(type) \
|
#define bpf_core_type_matches(type) \
|
||||||
__builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_MATCHES)
|
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convenience macro to get the byte size of a provided named type
|
* Convenience macro to get the byte size of a provided named type
|
||||||
@@ -258,7 +202,7 @@ enum bpf_enum_value_kind {
|
|||||||
* 0, if no matching type is found.
|
* 0, if no matching type is found.
|
||||||
*/
|
*/
|
||||||
#define bpf_core_type_size(type) \
|
#define bpf_core_type_size(type) \
|
||||||
__builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_SIZE)
|
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convenience macro to check that provided enumerator value is defined in
|
* Convenience macro to check that provided enumerator value is defined in
|
||||||
@@ -268,13 +212,8 @@ enum bpf_enum_value_kind {
|
|||||||
* kernel's BTF;
|
* kernel's BTF;
|
||||||
* 0, if no matching enum and/or enum value within that enum is found.
|
* 0, if no matching enum and/or enum value within that enum is found.
|
||||||
*/
|
*/
|
||||||
#ifdef __clang__
|
|
||||||
#define bpf_core_enum_value_exists(enum_type, enum_value) \
|
#define bpf_core_enum_value_exists(enum_type, enum_value) \
|
||||||
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
|
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
|
||||||
#else
|
|
||||||
#define bpf_core_enum_value_exists(enum_type, enum_value) \
|
|
||||||
__builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_EXISTS)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convenience macro to get the integer value of an enumerator value in
|
* Convenience macro to get the integer value of an enumerator value in
|
||||||
@@ -284,13 +223,8 @@ enum bpf_enum_value_kind {
|
|||||||
* present in target kernel's BTF;
|
* present in target kernel's BTF;
|
||||||
* 0, if no matching enum and/or enum value within that enum is found.
|
* 0, if no matching enum and/or enum value within that enum is found.
|
||||||
*/
|
*/
|
||||||
#ifdef __clang__
|
|
||||||
#define bpf_core_enum_value(enum_type, enum_value) \
|
#define bpf_core_enum_value(enum_type, enum_value) \
|
||||||
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
|
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
|
||||||
#else
|
|
||||||
#define bpf_core_enum_value(enum_type, enum_value) \
|
|
||||||
__builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_VALUE)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
|
* bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
|
||||||
@@ -302,7 +236,7 @@ enum bpf_enum_value_kind {
|
|||||||
* a relocation, which records BTF type ID describing root struct/union and an
|
* a relocation, which records BTF type ID describing root struct/union and an
|
||||||
* accessor string which describes exact embedded field that was used to take
|
* accessor string which describes exact embedded field that was used to take
|
||||||
* an address. See detailed description of this relocation format and
|
* an address. See detailed description of this relocation format and
|
||||||
* semantics in comments to struct bpf_core_relo in include/uapi/linux/bpf.h.
|
* semantics in comments to struct bpf_field_reloc in libbpf_internal.h.
|
||||||
*
|
*
|
||||||
* This relocation allows libbpf to adjust BPF instruction to use correct
|
* This relocation allows libbpf to adjust BPF instruction to use correct
|
||||||
* actual field offset, based on target kernel BTF type that matches original
|
* actual field offset, based on target kernel BTF type that matches original
|
||||||
@@ -326,17 +260,6 @@ enum bpf_enum_value_kind {
|
|||||||
#define bpf_core_read_user_str(dst, sz, src) \
|
#define bpf_core_read_user_str(dst, sz, src) \
|
||||||
bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
|
bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
|
||||||
|
|
||||||
extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Cast provided pointer *ptr* into a pointer to a specified *type* in such
|
|
||||||
* a way that BPF verifier will become aware of associated kernel-side BTF
|
|
||||||
* type. This allows to access members of kernel types directly without the
|
|
||||||
* need to use BPF_CORE_READ() macros.
|
|
||||||
*/
|
|
||||||
#define bpf_core_cast(ptr, type) \
|
|
||||||
((typeof(type) *)bpf_rdonly_cast((ptr), bpf_core_type_id_kernel(type)))
|
|
||||||
|
|
||||||
#define ___concat(a, b) a ## b
|
#define ___concat(a, b) a ## b
|
||||||
#define ___apply(fn, n) ___concat(fn, n)
|
#define ___apply(fn, n) ___concat(fn, n)
|
||||||
#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
|
#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
|
||||||
@@ -388,13 +311,7 @@ extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
|
|||||||
#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
|
#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
|
||||||
#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
|
#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
|
||||||
|
|
||||||
#if defined(__clang__) && (__clang_major__ >= 19)
|
|
||||||
#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__))
|
|
||||||
#elif defined(__GNUC__) && (__GNUC__ >= 14)
|
|
||||||
#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__))
|
|
||||||
#else
|
|
||||||
#define ___type(...) typeof(___arrow(__VA_ARGS__))
|
#define ___type(...) typeof(___arrow(__VA_ARGS__))
|
||||||
#endif
|
|
||||||
|
|
||||||
#define ___read(read_fn, dst, src_type, src, accessor) \
|
#define ___read(read_fn, dst, src_type, src, accessor) \
|
||||||
read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
|
read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
|
||||||
|
|||||||
@@ -34,7 +34,6 @@ struct bpf_gen {
|
|||||||
void *data_cur;
|
void *data_cur;
|
||||||
void *insn_start;
|
void *insn_start;
|
||||||
void *insn_cur;
|
void *insn_cur;
|
||||||
bool swapped_endian;
|
|
||||||
ssize_t cleanup_label;
|
ssize_t cleanup_label;
|
||||||
__u32 nr_progs;
|
__u32 nr_progs;
|
||||||
__u32 nr_maps;
|
__u32 nr_maps;
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -13,15 +13,6 @@
|
|||||||
#define __uint(name, val) int (*name)[val]
|
#define __uint(name, val) int (*name)[val]
|
||||||
#define __type(name, val) typeof(val) *name
|
#define __type(name, val) typeof(val) *name
|
||||||
#define __array(name, val) typeof(val) *name[]
|
#define __array(name, val) typeof(val) *name[]
|
||||||
#define __ulong(name, val) enum { ___bpf_concat(__unique_value, __COUNTER__) = val } name
|
|
||||||
|
|
||||||
#ifndef likely
|
|
||||||
#define likely(x) (__builtin_expect(!!(x), 1))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef unlikely
|
|
||||||
#define unlikely(x) (__builtin_expect(!!(x), 0))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Helper macro to place programs, maps, license in
|
* Helper macro to place programs, maps, license in
|
||||||
@@ -145,8 +136,7 @@
|
|||||||
/*
|
/*
|
||||||
* Helper function to perform a tail call with a constant/immediate map slot.
|
* Helper function to perform a tail call with a constant/immediate map slot.
|
||||||
*/
|
*/
|
||||||
#if (defined(__clang__) && __clang_major__ >= 8) || (!defined(__clang__) && __GNUC__ > 12)
|
#if __clang_major__ >= 8 && defined(__bpf__)
|
||||||
#if defined(__bpf__)
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
|
bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
|
||||||
{
|
{
|
||||||
@@ -174,7 +164,6 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
|
|||||||
: "r0", "r1", "r2", "r3", "r4", "r5");
|
: "r0", "r1", "r2", "r3", "r4", "r5");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
enum libbpf_pin_type {
|
enum libbpf_pin_type {
|
||||||
LIBBPF_PIN_NONE,
|
LIBBPF_PIN_NONE,
|
||||||
@@ -192,30 +181,11 @@ enum libbpf_tristate {
|
|||||||
#define __ksym __attribute__((section(".ksyms")))
|
#define __ksym __attribute__((section(".ksyms")))
|
||||||
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
|
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
|
||||||
#define __kptr __attribute__((btf_type_tag("kptr")))
|
#define __kptr __attribute__((btf_type_tag("kptr")))
|
||||||
#define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr")))
|
|
||||||
#define __uptr __attribute__((btf_type_tag("uptr")))
|
|
||||||
|
|
||||||
#if defined (__clang__)
|
#define bpf_ksym_exists(sym) ({ \
|
||||||
#define bpf_ksym_exists(sym) ({ \
|
_Static_assert(!__builtin_constant_p(!!sym), #sym " should be marked as __weak"); \
|
||||||
_Static_assert(!__builtin_constant_p(!!sym), \
|
!!sym; \
|
||||||
#sym " should be marked as __weak"); \
|
|
||||||
!!sym; \
|
|
||||||
})
|
})
|
||||||
#elif __GNUC__ > 8
|
|
||||||
#define bpf_ksym_exists(sym) ({ \
|
|
||||||
_Static_assert(__builtin_has_attribute (*sym, __weak__), \
|
|
||||||
#sym " should be marked as __weak"); \
|
|
||||||
!!sym; \
|
|
||||||
})
|
|
||||||
#else
|
|
||||||
#define bpf_ksym_exists(sym) !!sym
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define __arg_ctx __attribute__((btf_decl_tag("arg:ctx")))
|
|
||||||
#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))
|
|
||||||
#define __arg_nullable __attribute((btf_decl_tag("arg:nullable")))
|
|
||||||
#define __arg_trusted __attribute((btf_decl_tag("arg:trusted")))
|
|
||||||
#define __arg_arena __attribute((btf_decl_tag("arg:arena")))
|
|
||||||
|
|
||||||
#ifndef ___bpf_concat
|
#ifndef ___bpf_concat
|
||||||
#define ___bpf_concat(a, b) a ## b
|
#define ___bpf_concat(a, b) a ## b
|
||||||
@@ -350,7 +320,7 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
|
|||||||
* I.e., it looks almost like high-level for each loop in other languages,
|
* I.e., it looks almost like high-level for each loop in other languages,
|
||||||
* supports continue/break, and is verifiable by BPF verifier.
|
* supports continue/break, and is verifiable by BPF verifier.
|
||||||
*
|
*
|
||||||
* For iterating integers, the difference between bpf_for_each(num, i, N, M)
|
* For iterating integers, the difference betwen bpf_for_each(num, i, N, M)
|
||||||
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
|
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
|
||||||
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
|
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
|
||||||
* *`, not just `int`. So for integers bpf_for() is more convenient.
|
* *`, not just `int`. So for integers bpf_for() is more convenient.
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#ifndef __BPF_TRACING_H__
|
#ifndef __BPF_TRACING_H__
|
||||||
#define __BPF_TRACING_H__
|
#define __BPF_TRACING_H__
|
||||||
|
|
||||||
#include "bpf_helpers.h"
|
#include <bpf/bpf_helpers.h>
|
||||||
|
|
||||||
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
|
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
|
||||||
#if defined(__TARGET_ARCH_x86)
|
#if defined(__TARGET_ARCH_x86)
|
||||||
@@ -163,7 +163,7 @@
|
|||||||
|
|
||||||
struct pt_regs___s390 {
|
struct pt_regs___s390 {
|
||||||
unsigned long orig_gpr2;
|
unsigned long orig_gpr2;
|
||||||
} __attribute__((preserve_access_index));
|
};
|
||||||
|
|
||||||
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
|
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
|
||||||
#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
|
#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
|
||||||
@@ -179,7 +179,7 @@ struct pt_regs___s390 {
|
|||||||
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
|
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
|
||||||
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
|
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
|
||||||
#define __PT_PARM6_SYSCALL_REG gprs[7]
|
#define __PT_PARM6_SYSCALL_REG gprs[7]
|
||||||
#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___s390 *)(x))->__PT_PARM1_SYSCALL_REG)
|
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
|
||||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
|
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
|
||||||
BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG)
|
BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG)
|
||||||
|
|
||||||
@@ -222,7 +222,7 @@ struct pt_regs___s390 {
|
|||||||
|
|
||||||
struct pt_regs___arm64 {
|
struct pt_regs___arm64 {
|
||||||
unsigned long orig_x0;
|
unsigned long orig_x0;
|
||||||
} __attribute__((preserve_access_index));
|
};
|
||||||
|
|
||||||
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
|
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||||
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
|
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
|
||||||
@@ -241,7 +241,7 @@ struct pt_regs___arm64 {
|
|||||||
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
|
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
|
||||||
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
|
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
|
||||||
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
|
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
|
||||||
#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___arm64 *)(x))->__PT_PARM1_SYSCALL_REG)
|
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
|
||||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
|
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
|
||||||
BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG)
|
BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG)
|
||||||
|
|
||||||
@@ -351,10 +351,6 @@ struct pt_regs___arm64 {
|
|||||||
* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
|
* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct pt_regs___riscv {
|
|
||||||
unsigned long orig_a0;
|
|
||||||
} __attribute__((preserve_access_index));
|
|
||||||
|
|
||||||
/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */
|
/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */
|
||||||
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
|
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
|
||||||
#define __PT_PARM1_REG a0
|
#define __PT_PARM1_REG a0
|
||||||
@@ -366,15 +362,14 @@ struct pt_regs___riscv {
|
|||||||
#define __PT_PARM7_REG a6
|
#define __PT_PARM7_REG a6
|
||||||
#define __PT_PARM8_REG a7
|
#define __PT_PARM8_REG a7
|
||||||
|
|
||||||
#define __PT_PARM1_SYSCALL_REG orig_a0
|
/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
|
||||||
|
#define PT_REGS_SYSCALL_REGS(ctx) ctx
|
||||||
|
#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
|
||||||
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
|
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
|
||||||
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
|
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
|
||||||
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
|
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
|
||||||
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
|
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
|
||||||
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
|
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
|
||||||
#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___riscv *)(x))->__PT_PARM1_SYSCALL_REG)
|
|
||||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
|
|
||||||
BPF_CORE_READ((const struct pt_regs___riscv *)(x), __PT_PARM1_SYSCALL_REG)
|
|
||||||
|
|
||||||
#define __PT_RET_REG ra
|
#define __PT_RET_REG ra
|
||||||
#define __PT_FP_REG s0
|
#define __PT_FP_REG s0
|
||||||
@@ -480,7 +475,7 @@ struct pt_regs;
|
|||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* Similarly, syscall-specific conventions might differ between function call
|
* Similarly, syscall-specific conventions might differ between function call
|
||||||
* conventions within each architecture. All supported architectures pass
|
* conventions within each architecutre. All supported architectures pass
|
||||||
* either 6 or 7 syscall arguments in registers.
|
* either 6 or 7 syscall arguments in registers.
|
||||||
*
|
*
|
||||||
* See syscall(2) manpage for succinct table with information on each arch.
|
* See syscall(2) manpage for succinct table with information on each arch.
|
||||||
@@ -522,7 +517,7 @@ struct pt_regs;
|
|||||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
|
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
|
||||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||||
|
|
||||||
#elif defined(bpf_target_sparc) || defined(bpf_target_arm64)
|
#elif defined(bpf_target_sparc)
|
||||||
|
|
||||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
|
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
|
||||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||||
@@ -640,25 +635,25 @@ struct pt_regs;
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define ___bpf_ctx_cast0() ctx
|
#define ___bpf_ctx_cast0() ctx
|
||||||
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), ctx[0]
|
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
|
||||||
#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), ctx[1]
|
#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
|
||||||
#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), ctx[2]
|
#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
|
||||||
#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), ctx[3]
|
#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
|
||||||
#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), ctx[4]
|
#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
|
||||||
#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), ctx[5]
|
#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
|
||||||
#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), ctx[6]
|
#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
|
||||||
#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), ctx[7]
|
#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
|
||||||
#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), ctx[8]
|
#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
|
||||||
#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), ctx[9]
|
#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
|
||||||
#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), ctx[10]
|
#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
|
||||||
#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), ctx[11]
|
#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
|
||||||
#define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
|
#define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
|
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
|
||||||
* similar kinds of BPF programs, that accept input arguments as a single
|
* similar kinds of BPF programs, that accept input arguments as a single
|
||||||
* pointer to untyped u64 array, where each u64 can actually be a typed
|
* pointer to untyped u64 array, where each u64 can actually be a typed
|
||||||
* pointer or integer of different size. Instead of requiring user to write
|
* pointer or integer of different size. Instead of requring user to write
|
||||||
* manual casts and work with array elements by index, BPF_PROG macro
|
* manual casts and work with array elements by index, BPF_PROG macro
|
||||||
* allows user to declare a list of named and typed input arguments in the
|
* allows user to declare a list of named and typed input arguments in the
|
||||||
* same syntax as for normal C function. All the casting is hidden and
|
* same syntax as for normal C function. All the casting is hidden and
|
||||||
@@ -793,14 +788,14 @@ ____##name(unsigned long long *ctx ___bpf_ctx_decl(args))
|
|||||||
struct pt_regs;
|
struct pt_regs;
|
||||||
|
|
||||||
#define ___bpf_kprobe_args0() ctx
|
#define ___bpf_kprobe_args0() ctx
|
||||||
#define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (unsigned long long)PT_REGS_PARM1(ctx)
|
#define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
|
||||||
#define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (unsigned long long)PT_REGS_PARM2(ctx)
|
#define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
|
||||||
#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (unsigned long long)PT_REGS_PARM3(ctx)
|
#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
|
||||||
#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (unsigned long long)PT_REGS_PARM4(ctx)
|
#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
|
||||||
#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (unsigned long long)PT_REGS_PARM5(ctx)
|
#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
|
||||||
#define ___bpf_kprobe_args6(x, args...) ___bpf_kprobe_args5(args), (unsigned long long)PT_REGS_PARM6(ctx)
|
#define ___bpf_kprobe_args6(x, args...) ___bpf_kprobe_args5(args), (void *)PT_REGS_PARM6(ctx)
|
||||||
#define ___bpf_kprobe_args7(x, args...) ___bpf_kprobe_args6(args), (unsigned long long)PT_REGS_PARM7(ctx)
|
#define ___bpf_kprobe_args7(x, args...) ___bpf_kprobe_args6(args), (void *)PT_REGS_PARM7(ctx)
|
||||||
#define ___bpf_kprobe_args8(x, args...) ___bpf_kprobe_args7(args), (unsigned long long)PT_REGS_PARM8(ctx)
|
#define ___bpf_kprobe_args8(x, args...) ___bpf_kprobe_args7(args), (void *)PT_REGS_PARM8(ctx)
|
||||||
#define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
|
#define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -808,7 +803,7 @@ struct pt_regs;
|
|||||||
* tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
|
* tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
|
||||||
* low-level way of getting kprobe input arguments from struct pt_regs, and
|
* low-level way of getting kprobe input arguments from struct pt_regs, and
|
||||||
* provides a familiar typed and named function arguments syntax and
|
* provides a familiar typed and named function arguments syntax and
|
||||||
* semantics of accessing kprobe input parameters.
|
* semantics of accessing kprobe input paremeters.
|
||||||
*
|
*
|
||||||
* Original struct pt_regs* context is preserved as 'ctx' argument. This might
|
* Original struct pt_regs* context is preserved as 'ctx' argument. This might
|
||||||
* be necessary when using BPF helpers like bpf_perf_event_output().
|
* be necessary when using BPF helpers like bpf_perf_event_output().
|
||||||
@@ -828,7 +823,7 @@ static __always_inline typeof(name(0)) \
|
|||||||
____##name(struct pt_regs *ctx, ##args)
|
____##name(struct pt_regs *ctx, ##args)
|
||||||
|
|
||||||
#define ___bpf_kretprobe_args0() ctx
|
#define ___bpf_kretprobe_args0() ctx
|
||||||
#define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (unsigned long long)PT_REGS_RC(ctx)
|
#define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
|
||||||
#define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
|
#define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -852,24 +847,24 @@ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
|
|||||||
|
|
||||||
/* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
|
/* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
|
||||||
#define ___bpf_syscall_args0() ctx
|
#define ___bpf_syscall_args0() ctx
|
||||||
#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (unsigned long long)PT_REGS_PARM1_SYSCALL(regs)
|
#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_SYSCALL(regs)
|
||||||
#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (unsigned long long)PT_REGS_PARM2_SYSCALL(regs)
|
#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_SYSCALL(regs)
|
||||||
#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (unsigned long long)PT_REGS_PARM3_SYSCALL(regs)
|
#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs)
|
||||||
#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (unsigned long long)PT_REGS_PARM4_SYSCALL(regs)
|
#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs)
|
||||||
#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (unsigned long long)PT_REGS_PARM5_SYSCALL(regs)
|
#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs)
|
||||||
#define ___bpf_syscall_args6(x, args...) ___bpf_syscall_args5(args), (unsigned long long)PT_REGS_PARM6_SYSCALL(regs)
|
#define ___bpf_syscall_args6(x, args...) ___bpf_syscall_args5(args), (void *)PT_REGS_PARM6_SYSCALL(regs)
|
||||||
#define ___bpf_syscall_args7(x, args...) ___bpf_syscall_args6(args), (unsigned long long)PT_REGS_PARM7_SYSCALL(regs)
|
#define ___bpf_syscall_args7(x, args...) ___bpf_syscall_args6(args), (void *)PT_REGS_PARM7_SYSCALL(regs)
|
||||||
#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
|
#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
|
||||||
|
|
||||||
/* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
|
/* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
|
||||||
#define ___bpf_syswrap_args0() ctx
|
#define ___bpf_syswrap_args0() ctx
|
||||||
#define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (unsigned long long)PT_REGS_PARM1_CORE_SYSCALL(regs)
|
#define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
|
||||||
#define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (unsigned long long)PT_REGS_PARM2_CORE_SYSCALL(regs)
|
#define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
|
||||||
#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (unsigned long long)PT_REGS_PARM3_CORE_SYSCALL(regs)
|
#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
|
||||||
#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (unsigned long long)PT_REGS_PARM4_CORE_SYSCALL(regs)
|
#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
|
||||||
#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (unsigned long long)PT_REGS_PARM5_CORE_SYSCALL(regs)
|
#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
|
||||||
#define ___bpf_syswrap_args6(x, args...) ___bpf_syswrap_args5(args), (unsigned long long)PT_REGS_PARM6_CORE_SYSCALL(regs)
|
#define ___bpf_syswrap_args6(x, args...) ___bpf_syswrap_args5(args), (void *)PT_REGS_PARM6_CORE_SYSCALL(regs)
|
||||||
#define ___bpf_syswrap_args7(x, args...) ___bpf_syswrap_args6(args), (unsigned long long)PT_REGS_PARM7_CORE_SYSCALL(regs)
|
#define ___bpf_syswrap_args7(x, args...) ___bpf_syswrap_args6(args), (void *)PT_REGS_PARM7_CORE_SYSCALL(regs)
|
||||||
#define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
|
#define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
44
src/btf.h
44
src/btf.h
@@ -18,7 +18,6 @@ extern "C" {
|
|||||||
|
|
||||||
#define BTF_ELF_SEC ".BTF"
|
#define BTF_ELF_SEC ".BTF"
|
||||||
#define BTF_EXT_ELF_SEC ".BTF.ext"
|
#define BTF_EXT_ELF_SEC ".BTF.ext"
|
||||||
#define BTF_BASE_ELF_SEC ".BTF.base"
|
|
||||||
#define MAPS_ELF_SEC ".maps"
|
#define MAPS_ELF_SEC ".maps"
|
||||||
|
|
||||||
struct btf;
|
struct btf;
|
||||||
@@ -108,27 +107,6 @@ LIBBPF_API struct btf *btf__new_empty(void);
|
|||||||
*/
|
*/
|
||||||
LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
|
LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **btf__distill_base()** creates new versions of the split BTF
|
|
||||||
* *src_btf* and its base BTF. The new base BTF will only contain the types
|
|
||||||
* needed to improve robustness of the split BTF to small changes in base BTF.
|
|
||||||
* When that split BTF is loaded against a (possibly changed) base, this
|
|
||||||
* distilled base BTF will help update references to that (possibly changed)
|
|
||||||
* base BTF.
|
|
||||||
*
|
|
||||||
* Both the new split and its associated new base BTF must be freed by
|
|
||||||
* the caller.
|
|
||||||
*
|
|
||||||
* If successful, 0 is returned and **new_base_btf** and **new_split_btf**
|
|
||||||
* will point at new base/split BTF. Both the new split and its associated
|
|
||||||
* new base BTF must be freed by the caller.
|
|
||||||
*
|
|
||||||
* A negative value is returned on error and the thread-local `errno` variable
|
|
||||||
* is set to the error code as well.
|
|
||||||
*/
|
|
||||||
LIBBPF_API int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
|
|
||||||
struct btf **new_split_btf);
|
|
||||||
|
|
||||||
LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
|
LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
|
||||||
LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf);
|
LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf);
|
||||||
LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext);
|
LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext);
|
||||||
@@ -167,9 +145,6 @@ LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
|
|||||||
LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size);
|
LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size);
|
||||||
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
|
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
|
||||||
LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size);
|
LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size);
|
||||||
LIBBPF_API enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext);
|
|
||||||
LIBBPF_API int btf_ext__set_endianness(struct btf_ext *btf_ext,
|
|
||||||
enum btf_endianness endian);
|
|
||||||
|
|
||||||
LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
|
LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
|
||||||
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
|
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
|
||||||
@@ -227,7 +202,6 @@ LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id);
|
|||||||
LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id);
|
LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id);
|
||||||
LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id);
|
LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id);
|
||||||
LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id);
|
LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id);
|
||||||
LIBBPF_API int btf__add_type_attr(struct btf *btf, const char *value, int ref_type_id);
|
|
||||||
|
|
||||||
/* func and func_proto construction APIs */
|
/* func and func_proto construction APIs */
|
||||||
LIBBPF_API int btf__add_func(struct btf *btf, const char *name,
|
LIBBPF_API int btf__add_func(struct btf *btf, const char *name,
|
||||||
@@ -244,8 +218,6 @@ LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id,
|
|||||||
/* tag construction API */
|
/* tag construction API */
|
||||||
LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
|
LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||||
int component_idx);
|
int component_idx);
|
||||||
LIBBPF_API int btf__add_decl_attr(struct btf *btf, const char *value, int ref_type_id,
|
|
||||||
int component_idx);
|
|
||||||
|
|
||||||
struct btf_dedup_opts {
|
struct btf_dedup_opts {
|
||||||
size_t sz;
|
size_t sz;
|
||||||
@@ -259,20 +231,6 @@ struct btf_dedup_opts {
|
|||||||
|
|
||||||
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
|
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **btf__relocate()** will check the split BTF *btf* for references
|
|
||||||
* to base BTF kinds, and verify those references are compatible with
|
|
||||||
* *base_btf*; if they are, *btf* is adjusted such that is re-parented to
|
|
||||||
* *base_btf* and type ids and strings are adjusted to accommodate this.
|
|
||||||
*
|
|
||||||
* If successful, 0 is returned and **btf** now has **base_btf** as its
|
|
||||||
* base.
|
|
||||||
*
|
|
||||||
* A negative value is returned on error and the thread-local `errno` variable
|
|
||||||
* is set to the error code as well.
|
|
||||||
*/
|
|
||||||
LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf);
|
|
||||||
|
|
||||||
struct btf_dump;
|
struct btf_dump;
|
||||||
|
|
||||||
struct btf_dump_opts {
|
struct btf_dump_opts {
|
||||||
@@ -292,7 +250,7 @@ LIBBPF_API void btf_dump__free(struct btf_dump *d);
|
|||||||
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
|
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
|
||||||
|
|
||||||
struct btf_dump_emit_type_decl_opts {
|
struct btf_dump_emit_type_decl_opts {
|
||||||
/* size of this struct, for forward/backward compatibility */
|
/* size of this struct, for forward/backward compatiblity */
|
||||||
size_t sz;
|
size_t sz;
|
||||||
/* optional field name for type declaration, e.g.:
|
/* optional field name for type declaration, e.g.:
|
||||||
* - struct my_struct <FNAME>
|
* - struct my_struct <FNAME>
|
||||||
|
|||||||
@@ -21,7 +21,6 @@
|
|||||||
#include "hashmap.h"
|
#include "hashmap.h"
|
||||||
#include "libbpf.h"
|
#include "libbpf.h"
|
||||||
#include "libbpf_internal.h"
|
#include "libbpf_internal.h"
|
||||||
#include "str_error.h"
|
|
||||||
|
|
||||||
static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
|
static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
|
||||||
static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
|
static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
|
||||||
@@ -305,7 +304,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
|
|||||||
* definition, in which case they have to be declared inline as part of field
|
* definition, in which case they have to be declared inline as part of field
|
||||||
* type declaration; or as a top-level anonymous enum, typically used for
|
* type declaration; or as a top-level anonymous enum, typically used for
|
||||||
* declaring global constants. It's impossible to distinguish between two
|
* declaring global constants. It's impossible to distinguish between two
|
||||||
* without knowing whether given enum type was referenced from other type:
|
* without knowning whether given enum type was referenced from other type:
|
||||||
* top-level anonymous enum won't be referenced by anything, while embedded
|
* top-level anonymous enum won't be referenced by anything, while embedded
|
||||||
* one will.
|
* one will.
|
||||||
*/
|
*/
|
||||||
@@ -868,8 +867,8 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
|
|||||||
} pads[] = {
|
} pads[] = {
|
||||||
{"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
|
{"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
|
||||||
};
|
};
|
||||||
int new_off = 0, pad_bits = 0, bits, i;
|
int new_off, pad_bits, bits, i;
|
||||||
const char *pad_type = NULL;
|
const char *pad_type;
|
||||||
|
|
||||||
if (cur_off >= next_off)
|
if (cur_off >= next_off)
|
||||||
return; /* no gap */
|
return; /* no gap */
|
||||||
@@ -1305,7 +1304,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
|
|||||||
* chain, restore stack, emit warning, and try to
|
* chain, restore stack, emit warning, and try to
|
||||||
* proceed nevertheless
|
* proceed nevertheless
|
||||||
*/
|
*/
|
||||||
pr_warn("not enough memory for decl stack: %s\n", errstr(err));
|
pr_warn("not enough memory for decl stack:%d", err);
|
||||||
d->decl_stack_cnt = stack_start;
|
d->decl_stack_cnt = stack_start;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -1494,10 +1493,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
|
|||||||
case BTF_KIND_TYPE_TAG:
|
case BTF_KIND_TYPE_TAG:
|
||||||
btf_dump_emit_mods(d, decls);
|
btf_dump_emit_mods(d, decls);
|
||||||
name = btf_name_of(d, t->name_off);
|
name = btf_name_of(d, t->name_off);
|
||||||
if (btf_kflag(t))
|
btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name);
|
||||||
btf_dump_printf(d, " __attribute__((%s))", name);
|
|
||||||
else
|
|
||||||
btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name);
|
|
||||||
break;
|
break;
|
||||||
case BTF_KIND_ARRAY: {
|
case BTF_KIND_ARRAY: {
|
||||||
const struct btf_array *a = btf_array(t);
|
const struct btf_array *a = btf_array(t);
|
||||||
@@ -1563,12 +1559,10 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
|
|||||||
* Clang for BPF target generates func_proto with no
|
* Clang for BPF target generates func_proto with no
|
||||||
* args as a func_proto with a single void arg (e.g.,
|
* args as a func_proto with a single void arg (e.g.,
|
||||||
* `int (*f)(void)` vs just `int (*f)()`). We are
|
* `int (*f)(void)` vs just `int (*f)()`). We are
|
||||||
* going to emit valid empty args (void) syntax for
|
* going to pretend there are no args for such case.
|
||||||
* such case. Similarly and conveniently, valid
|
|
||||||
* no args case can be special-cased here as well.
|
|
||||||
*/
|
*/
|
||||||
if (vlen == 0 || (vlen == 1 && p->type == 0)) {
|
if (vlen == 1 && p->type == 0) {
|
||||||
btf_dump_printf(d, "void)");
|
btf_dump_printf(d, ")");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1935,7 +1929,6 @@ static int btf_dump_int_data(struct btf_dump *d,
|
|||||||
if (d->typed_dump->is_array_terminated)
|
if (d->typed_dump->is_array_terminated)
|
||||||
break;
|
break;
|
||||||
if (*(char *)data == '\0') {
|
if (*(char *)data == '\0') {
|
||||||
btf_dump_type_values(d, "'\\0'");
|
|
||||||
d->typed_dump->is_array_terminated = true;
|
d->typed_dump->is_array_terminated = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -2038,7 +2031,6 @@ static int btf_dump_array_data(struct btf_dump *d,
|
|||||||
__u32 i, elem_type_id;
|
__u32 i, elem_type_id;
|
||||||
__s64 elem_size;
|
__s64 elem_size;
|
||||||
bool is_array_member;
|
bool is_array_member;
|
||||||
bool is_array_terminated;
|
|
||||||
|
|
||||||
elem_type_id = array->type;
|
elem_type_id = array->type;
|
||||||
elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL);
|
elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL);
|
||||||
@@ -2074,15 +2066,12 @@ static int btf_dump_array_data(struct btf_dump *d,
|
|||||||
*/
|
*/
|
||||||
is_array_member = d->typed_dump->is_array_member;
|
is_array_member = d->typed_dump->is_array_member;
|
||||||
d->typed_dump->is_array_member = true;
|
d->typed_dump->is_array_member = true;
|
||||||
is_array_terminated = d->typed_dump->is_array_terminated;
|
|
||||||
d->typed_dump->is_array_terminated = false;
|
|
||||||
for (i = 0; i < array->nelems; i++, data += elem_size) {
|
for (i = 0; i < array->nelems; i++, data += elem_size) {
|
||||||
if (d->typed_dump->is_array_terminated)
|
if (d->typed_dump->is_array_terminated)
|
||||||
break;
|
break;
|
||||||
btf_dump_dump_type_data(d, NULL, elem_type, elem_type_id, data, 0, 0);
|
btf_dump_dump_type_data(d, NULL, elem_type, elem_type_id, data, 0, 0);
|
||||||
}
|
}
|
||||||
d->typed_dump->is_array_member = is_array_member;
|
d->typed_dump->is_array_member = is_array_member;
|
||||||
d->typed_dump->is_array_terminated = is_array_terminated;
|
|
||||||
d->typed_dump->depth--;
|
d->typed_dump->depth--;
|
||||||
btf_dump_data_pfx(d);
|
btf_dump_data_pfx(d);
|
||||||
btf_dump_type_values(d, "]");
|
btf_dump_type_values(d, "]");
|
||||||
|
|||||||
177
src/btf_iter.c
177
src/btf_iter.c
@@ -1,177 +0,0 @@
|
|||||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
|
||||||
/* Copyright (c) 2021 Facebook */
|
|
||||||
/* Copyright (c) 2024, Oracle and/or its affiliates. */
|
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
|
||||||
#include <linux/bpf.h>
|
|
||||||
#include <linux/btf.h>
|
|
||||||
|
|
||||||
#define btf_var_secinfos(t) (struct btf_var_secinfo *)btf_type_var_secinfo(t)
|
|
||||||
|
|
||||||
#else
|
|
||||||
#include "btf.h"
|
|
||||||
#include "libbpf_internal.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
|
|
||||||
enum btf_field_iter_kind iter_kind)
|
|
||||||
{
|
|
||||||
it->p = NULL;
|
|
||||||
it->m_idx = -1;
|
|
||||||
it->off_idx = 0;
|
|
||||||
it->vlen = 0;
|
|
||||||
|
|
||||||
switch (iter_kind) {
|
|
||||||
case BTF_FIELD_ITER_IDS:
|
|
||||||
switch (btf_kind(t)) {
|
|
||||||
case BTF_KIND_UNKN:
|
|
||||||
case BTF_KIND_INT:
|
|
||||||
case BTF_KIND_FLOAT:
|
|
||||||
case BTF_KIND_ENUM:
|
|
||||||
case BTF_KIND_ENUM64:
|
|
||||||
it->desc = (struct btf_field_desc) {};
|
|
||||||
break;
|
|
||||||
case BTF_KIND_FWD:
|
|
||||||
case BTF_KIND_CONST:
|
|
||||||
case BTF_KIND_VOLATILE:
|
|
||||||
case BTF_KIND_RESTRICT:
|
|
||||||
case BTF_KIND_PTR:
|
|
||||||
case BTF_KIND_TYPEDEF:
|
|
||||||
case BTF_KIND_FUNC:
|
|
||||||
case BTF_KIND_VAR:
|
|
||||||
case BTF_KIND_DECL_TAG:
|
|
||||||
case BTF_KIND_TYPE_TAG:
|
|
||||||
it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} };
|
|
||||||
break;
|
|
||||||
case BTF_KIND_ARRAY:
|
|
||||||
it->desc = (struct btf_field_desc) {
|
|
||||||
2, {sizeof(struct btf_type) + offsetof(struct btf_array, type),
|
|
||||||
sizeof(struct btf_type) + offsetof(struct btf_array, index_type)}
|
|
||||||
};
|
|
||||||
break;
|
|
||||||
case BTF_KIND_STRUCT:
|
|
||||||
case BTF_KIND_UNION:
|
|
||||||
it->desc = (struct btf_field_desc) {
|
|
||||||
0, {},
|
|
||||||
sizeof(struct btf_member),
|
|
||||||
1, {offsetof(struct btf_member, type)}
|
|
||||||
};
|
|
||||||
break;
|
|
||||||
case BTF_KIND_FUNC_PROTO:
|
|
||||||
it->desc = (struct btf_field_desc) {
|
|
||||||
1, {offsetof(struct btf_type, type)},
|
|
||||||
sizeof(struct btf_param),
|
|
||||||
1, {offsetof(struct btf_param, type)}
|
|
||||||
};
|
|
||||||
break;
|
|
||||||
case BTF_KIND_DATASEC:
|
|
||||||
it->desc = (struct btf_field_desc) {
|
|
||||||
0, {},
|
|
||||||
sizeof(struct btf_var_secinfo),
|
|
||||||
1, {offsetof(struct btf_var_secinfo, type)}
|
|
||||||
};
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case BTF_FIELD_ITER_STRS:
|
|
||||||
switch (btf_kind(t)) {
|
|
||||||
case BTF_KIND_UNKN:
|
|
||||||
it->desc = (struct btf_field_desc) {};
|
|
||||||
break;
|
|
||||||
case BTF_KIND_INT:
|
|
||||||
case BTF_KIND_FLOAT:
|
|
||||||
case BTF_KIND_FWD:
|
|
||||||
case BTF_KIND_ARRAY:
|
|
||||||
case BTF_KIND_CONST:
|
|
||||||
case BTF_KIND_VOLATILE:
|
|
||||||
case BTF_KIND_RESTRICT:
|
|
||||||
case BTF_KIND_PTR:
|
|
||||||
case BTF_KIND_TYPEDEF:
|
|
||||||
case BTF_KIND_FUNC:
|
|
||||||
case BTF_KIND_VAR:
|
|
||||||
case BTF_KIND_DECL_TAG:
|
|
||||||
case BTF_KIND_TYPE_TAG:
|
|
||||||
case BTF_KIND_DATASEC:
|
|
||||||
it->desc = (struct btf_field_desc) {
|
|
||||||
1, {offsetof(struct btf_type, name_off)}
|
|
||||||
};
|
|
||||||
break;
|
|
||||||
case BTF_KIND_ENUM:
|
|
||||||
it->desc = (struct btf_field_desc) {
|
|
||||||
1, {offsetof(struct btf_type, name_off)},
|
|
||||||
sizeof(struct btf_enum),
|
|
||||||
1, {offsetof(struct btf_enum, name_off)}
|
|
||||||
};
|
|
||||||
break;
|
|
||||||
case BTF_KIND_ENUM64:
|
|
||||||
it->desc = (struct btf_field_desc) {
|
|
||||||
1, {offsetof(struct btf_type, name_off)},
|
|
||||||
sizeof(struct btf_enum64),
|
|
||||||
1, {offsetof(struct btf_enum64, name_off)}
|
|
||||||
};
|
|
||||||
break;
|
|
||||||
case BTF_KIND_STRUCT:
|
|
||||||
case BTF_KIND_UNION:
|
|
||||||
it->desc = (struct btf_field_desc) {
|
|
||||||
1, {offsetof(struct btf_type, name_off)},
|
|
||||||
sizeof(struct btf_member),
|
|
||||||
1, {offsetof(struct btf_member, name_off)}
|
|
||||||
};
|
|
||||||
break;
|
|
||||||
case BTF_KIND_FUNC_PROTO:
|
|
||||||
it->desc = (struct btf_field_desc) {
|
|
||||||
1, {offsetof(struct btf_type, name_off)},
|
|
||||||
sizeof(struct btf_param),
|
|
||||||
1, {offsetof(struct btf_param, name_off)}
|
|
||||||
};
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (it->desc.m_sz)
|
|
||||||
it->vlen = btf_vlen(t);
|
|
||||||
|
|
||||||
it->p = t;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
__u32 *btf_field_iter_next(struct btf_field_iter *it)
|
|
||||||
{
|
|
||||||
if (!it->p)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
if (it->m_idx < 0) {
|
|
||||||
if (it->off_idx < it->desc.t_off_cnt)
|
|
||||||
return it->p + it->desc.t_offs[it->off_idx++];
|
|
||||||
/* move to per-member iteration */
|
|
||||||
it->m_idx = 0;
|
|
||||||
it->p += sizeof(struct btf_type);
|
|
||||||
it->off_idx = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if type doesn't have members, stop */
|
|
||||||
if (it->desc.m_sz == 0) {
|
|
||||||
it->p = NULL;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (it->off_idx >= it->desc.m_off_cnt) {
|
|
||||||
/* exhausted this member's fields, go to the next member */
|
|
||||||
it->m_idx++;
|
|
||||||
it->p += it->desc.m_sz;
|
|
||||||
it->off_idx = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (it->m_idx < it->vlen)
|
|
||||||
return it->p + it->desc.m_offs[it->off_idx++];
|
|
||||||
|
|
||||||
it->p = NULL;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
@@ -1,519 +0,0 @@
|
|||||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
|
||||||
/* Copyright (c) 2024, Oracle and/or its affiliates. */
|
|
||||||
|
|
||||||
#ifndef _GNU_SOURCE
|
|
||||||
#define _GNU_SOURCE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
|
||||||
#include <linux/bpf.h>
|
|
||||||
#include <linux/bsearch.h>
|
|
||||||
#include <linux/btf.h>
|
|
||||||
#include <linux/sort.h>
|
|
||||||
#include <linux/string.h>
|
|
||||||
#include <linux/bpf_verifier.h>
|
|
||||||
|
|
||||||
#define btf_type_by_id (struct btf_type *)btf_type_by_id
|
|
||||||
#define btf__type_cnt btf_nr_types
|
|
||||||
#define btf__base_btf btf_base_btf
|
|
||||||
#define btf__name_by_offset btf_name_by_offset
|
|
||||||
#define btf__str_by_offset btf_str_by_offset
|
|
||||||
#define btf_kflag btf_type_kflag
|
|
||||||
|
|
||||||
#define calloc(nmemb, sz) kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN)
|
|
||||||
#define free(ptr) kvfree(ptr)
|
|
||||||
#define qsort(base, num, sz, cmp) sort(base, num, sz, cmp, NULL)
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
#include "btf.h"
|
|
||||||
#include "bpf.h"
|
|
||||||
#include "libbpf.h"
|
|
||||||
#include "libbpf_internal.h"
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
|
||||||
|
|
||||||
struct btf;
|
|
||||||
|
|
||||||
struct btf_relocate {
|
|
||||||
struct btf *btf;
|
|
||||||
const struct btf *base_btf;
|
|
||||||
const struct btf *dist_base_btf;
|
|
||||||
unsigned int nr_base_types;
|
|
||||||
unsigned int nr_split_types;
|
|
||||||
unsigned int nr_dist_base_types;
|
|
||||||
int dist_str_len;
|
|
||||||
int base_str_len;
|
|
||||||
__u32 *id_map;
|
|
||||||
__u32 *str_map;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Set temporarily in relocation id_map if distilled base struct/union is
|
|
||||||
* embedded in a split BTF struct/union; in such a case, size information must
|
|
||||||
* match between distilled base BTF and base BTF representation of type.
|
|
||||||
*/
|
|
||||||
#define BTF_IS_EMBEDDED ((__u32)-1)
|
|
||||||
|
|
||||||
/* <name, size, id> triple used in sorting/searching distilled base BTF. */
|
|
||||||
struct btf_name_info {
|
|
||||||
const char *name;
|
|
||||||
/* set when search requires a size match */
|
|
||||||
bool needs_size: 1;
|
|
||||||
unsigned int size: 31;
|
|
||||||
__u32 id;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int btf_relocate_rewrite_type_id(struct btf_relocate *r, __u32 i)
|
|
||||||
{
|
|
||||||
struct btf_type *t = btf_type_by_id(r->btf, i);
|
|
||||||
struct btf_field_iter it;
|
|
||||||
__u32 *id;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
while ((id = btf_field_iter_next(&it)))
|
|
||||||
*id = r->id_map[*id];
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Simple string comparison used for sorting within BTF, since all distilled
|
|
||||||
* types are named. If strings match, and size is non-zero for both elements
|
|
||||||
* fall back to using size for ordering.
|
|
||||||
*/
|
|
||||||
static int cmp_btf_name_size(const void *n1, const void *n2)
|
|
||||||
{
|
|
||||||
const struct btf_name_info *ni1 = n1;
|
|
||||||
const struct btf_name_info *ni2 = n2;
|
|
||||||
int name_diff = strcmp(ni1->name, ni2->name);
|
|
||||||
|
|
||||||
if (!name_diff && ni1->needs_size && ni2->needs_size)
|
|
||||||
return ni2->size - ni1->size;
|
|
||||||
return name_diff;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Binary search with a small twist; find leftmost element that matches
|
|
||||||
* so that we can then iterate through all exact matches. So for example
|
|
||||||
* searching { "a", "bb", "bb", "c" } we would always match on the
|
|
||||||
* leftmost "bb".
|
|
||||||
*/
|
|
||||||
static struct btf_name_info *search_btf_name_size(struct btf_name_info *key,
|
|
||||||
struct btf_name_info *vals,
|
|
||||||
int nelems)
|
|
||||||
{
|
|
||||||
struct btf_name_info *ret = NULL;
|
|
||||||
int high = nelems - 1;
|
|
||||||
int low = 0;
|
|
||||||
|
|
||||||
while (low <= high) {
|
|
||||||
int mid = (low + high)/2;
|
|
||||||
struct btf_name_info *val = &vals[mid];
|
|
||||||
int diff = cmp_btf_name_size(key, val);
|
|
||||||
|
|
||||||
if (diff == 0)
|
|
||||||
ret = val;
|
|
||||||
/* even if found, keep searching for leftmost match */
|
|
||||||
if (diff <= 0)
|
|
||||||
high = mid - 1;
|
|
||||||
else
|
|
||||||
low = mid + 1;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If a member of a split BTF struct/union refers to a base BTF
|
|
||||||
* struct/union, mark that struct/union id temporarily in the id_map
|
|
||||||
* with BTF_IS_EMBEDDED. Members can be const/restrict/volatile/typedef
|
|
||||||
* reference types, but if a pointer is encountered, the type is no longer
|
|
||||||
* considered embedded.
|
|
||||||
*/
|
|
||||||
static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i)
|
|
||||||
{
|
|
||||||
struct btf_type *t = btf_type_by_id(r->btf, i);
|
|
||||||
struct btf_field_iter it;
|
|
||||||
__u32 *id;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
if (!btf_is_composite(t))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
while ((id = btf_field_iter_next(&it))) {
|
|
||||||
__u32 next_id = *id;
|
|
||||||
|
|
||||||
while (next_id) {
|
|
||||||
t = btf_type_by_id(r->btf, next_id);
|
|
||||||
switch (btf_kind(t)) {
|
|
||||||
case BTF_KIND_CONST:
|
|
||||||
case BTF_KIND_RESTRICT:
|
|
||||||
case BTF_KIND_VOLATILE:
|
|
||||||
case BTF_KIND_TYPEDEF:
|
|
||||||
case BTF_KIND_TYPE_TAG:
|
|
||||||
next_id = t->type;
|
|
||||||
break;
|
|
||||||
case BTF_KIND_ARRAY: {
|
|
||||||
struct btf_array *a = btf_array(t);
|
|
||||||
|
|
||||||
next_id = a->type;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case BTF_KIND_STRUCT:
|
|
||||||
case BTF_KIND_UNION:
|
|
||||||
if (next_id < r->nr_dist_base_types)
|
|
||||||
r->id_map[next_id] = BTF_IS_EMBEDDED;
|
|
||||||
next_id = 0;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
next_id = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Build a map from distilled base BTF ids to base BTF ids. To do so, iterate
|
|
||||||
* through base BTF looking up distilled type (using binary search) equivalents.
|
|
||||||
*/
|
|
||||||
static int btf_relocate_map_distilled_base(struct btf_relocate *r)
|
|
||||||
{
|
|
||||||
struct btf_name_info *info, *info_end;
|
|
||||||
struct btf_type *base_t, *dist_t;
|
|
||||||
__u8 *base_name_cnt = NULL;
|
|
||||||
int err = 0;
|
|
||||||
__u32 id;
|
|
||||||
|
|
||||||
/* generate a sort index array of name/type ids sorted by name for
|
|
||||||
* distilled base BTF to speed name-based lookups.
|
|
||||||
*/
|
|
||||||
info = calloc(r->nr_dist_base_types, sizeof(*info));
|
|
||||||
if (!info) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
info_end = info + r->nr_dist_base_types;
|
|
||||||
for (id = 0; id < r->nr_dist_base_types; id++) {
|
|
||||||
dist_t = btf_type_by_id(r->dist_base_btf, id);
|
|
||||||
info[id].name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
|
|
||||||
info[id].id = id;
|
|
||||||
info[id].size = dist_t->size;
|
|
||||||
info[id].needs_size = true;
|
|
||||||
}
|
|
||||||
qsort(info, r->nr_dist_base_types, sizeof(*info), cmp_btf_name_size);
|
|
||||||
|
|
||||||
/* Mark distilled base struct/union members of split BTF structs/unions
|
|
||||||
* in id_map with BTF_IS_EMBEDDED; this signals that these types
|
|
||||||
* need to match both name and size, otherwise embedding the base
|
|
||||||
* struct/union in the split type is invalid.
|
|
||||||
*/
|
|
||||||
for (id = r->nr_dist_base_types; id < r->nr_dist_base_types + r->nr_split_types; id++) {
|
|
||||||
err = btf_mark_embedded_composite_type_ids(r, id);
|
|
||||||
if (err)
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Collect name counts for composite types in base BTF. If multiple
|
|
||||||
* instances of a struct/union of the same name exist, we need to use
|
|
||||||
* size to determine which to map to since name alone is ambiguous.
|
|
||||||
*/
|
|
||||||
base_name_cnt = calloc(r->base_str_len, sizeof(*base_name_cnt));
|
|
||||||
if (!base_name_cnt) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
for (id = 1; id < r->nr_base_types; id++) {
|
|
||||||
base_t = btf_type_by_id(r->base_btf, id);
|
|
||||||
if (!btf_is_composite(base_t) || !base_t->name_off)
|
|
||||||
continue;
|
|
||||||
if (base_name_cnt[base_t->name_off] < 255)
|
|
||||||
base_name_cnt[base_t->name_off]++;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Now search base BTF for matching distilled base BTF types. */
|
|
||||||
for (id = 1; id < r->nr_base_types; id++) {
|
|
||||||
struct btf_name_info *dist_info, base_info = {};
|
|
||||||
int dist_kind, base_kind;
|
|
||||||
|
|
||||||
base_t = btf_type_by_id(r->base_btf, id);
|
|
||||||
/* distilled base consists of named types only. */
|
|
||||||
if (!base_t->name_off)
|
|
||||||
continue;
|
|
||||||
base_kind = btf_kind(base_t);
|
|
||||||
base_info.id = id;
|
|
||||||
base_info.name = btf__name_by_offset(r->base_btf, base_t->name_off);
|
|
||||||
switch (base_kind) {
|
|
||||||
case BTF_KIND_INT:
|
|
||||||
case BTF_KIND_FLOAT:
|
|
||||||
case BTF_KIND_ENUM:
|
|
||||||
case BTF_KIND_ENUM64:
|
|
||||||
/* These types should match both name and size */
|
|
||||||
base_info.needs_size = true;
|
|
||||||
base_info.size = base_t->size;
|
|
||||||
break;
|
|
||||||
case BTF_KIND_FWD:
|
|
||||||
/* No size considerations for fwds. */
|
|
||||||
break;
|
|
||||||
case BTF_KIND_STRUCT:
|
|
||||||
case BTF_KIND_UNION:
|
|
||||||
/* Size only needs to be used for struct/union if there
|
|
||||||
* are multiple types in base BTF with the same name.
|
|
||||||
* If there are multiple _distilled_ types with the same
|
|
||||||
* name (a very unlikely scenario), that doesn't matter
|
|
||||||
* unless corresponding _base_ types to match them are
|
|
||||||
* missing.
|
|
||||||
*/
|
|
||||||
base_info.needs_size = base_name_cnt[base_t->name_off] > 1;
|
|
||||||
base_info.size = base_t->size;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
/* iterate over all matching distilled base types */
|
|
||||||
for (dist_info = search_btf_name_size(&base_info, info, r->nr_dist_base_types);
|
|
||||||
dist_info != NULL && dist_info < info_end &&
|
|
||||||
cmp_btf_name_size(&base_info, dist_info) == 0;
|
|
||||||
dist_info++) {
|
|
||||||
if (!dist_info->id || dist_info->id >= r->nr_dist_base_types) {
|
|
||||||
pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n",
|
|
||||||
id, dist_info->id);
|
|
||||||
err = -EINVAL;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
dist_t = btf_type_by_id(r->dist_base_btf, dist_info->id);
|
|
||||||
dist_kind = btf_kind(dist_t);
|
|
||||||
|
|
||||||
/* Validate that the found distilled type is compatible.
|
|
||||||
* Do not error out on mismatch as another match may
|
|
||||||
* occur for an identically-named type.
|
|
||||||
*/
|
|
||||||
switch (dist_kind) {
|
|
||||||
case BTF_KIND_FWD:
|
|
||||||
switch (base_kind) {
|
|
||||||
case BTF_KIND_FWD:
|
|
||||||
if (btf_kflag(dist_t) != btf_kflag(base_t))
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
case BTF_KIND_STRUCT:
|
|
||||||
if (btf_kflag(base_t))
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
case BTF_KIND_UNION:
|
|
||||||
if (!btf_kflag(base_t))
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case BTF_KIND_INT:
|
|
||||||
if (dist_kind != base_kind ||
|
|
||||||
btf_int_encoding(base_t) != btf_int_encoding(dist_t))
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
case BTF_KIND_FLOAT:
|
|
||||||
if (dist_kind != base_kind)
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
case BTF_KIND_ENUM:
|
|
||||||
/* ENUM and ENUM64 are encoded as sized ENUM in
|
|
||||||
* distilled base BTF.
|
|
||||||
*/
|
|
||||||
if (base_kind != dist_kind && base_kind != BTF_KIND_ENUM64)
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
case BTF_KIND_STRUCT:
|
|
||||||
case BTF_KIND_UNION:
|
|
||||||
/* size verification is required for embedded
|
|
||||||
* struct/unions.
|
|
||||||
*/
|
|
||||||
if (r->id_map[dist_info->id] == BTF_IS_EMBEDDED &&
|
|
||||||
base_t->size != dist_t->size)
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (r->id_map[dist_info->id] &&
|
|
||||||
r->id_map[dist_info->id] != BTF_IS_EMBEDDED) {
|
|
||||||
/* we already have a match; this tells us that
|
|
||||||
* multiple base types of the same name
|
|
||||||
* have the same size, since for cases where
|
|
||||||
* multiple types have the same name we match
|
|
||||||
* on name and size. In this case, we have
|
|
||||||
* no way of determining which to relocate
|
|
||||||
* to in base BTF, so error out.
|
|
||||||
*/
|
|
||||||
pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n",
|
|
||||||
base_info.name, dist_info->id,
|
|
||||||
base_t->size, id, r->id_map[dist_info->id]);
|
|
||||||
err = -EINVAL;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
/* map id and name */
|
|
||||||
r->id_map[dist_info->id] = id;
|
|
||||||
r->str_map[dist_t->name_off] = base_t->name_off;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* ensure all distilled BTF ids now have a mapping... */
|
|
||||||
for (id = 1; id < r->nr_dist_base_types; id++) {
|
|
||||||
const char *name;
|
|
||||||
|
|
||||||
if (r->id_map[id] && r->id_map[id] != BTF_IS_EMBEDDED)
|
|
||||||
continue;
|
|
||||||
dist_t = btf_type_by_id(r->dist_base_btf, id);
|
|
||||||
name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off);
|
|
||||||
pr_warn("distilled base BTF type '%s' [%d] is not mapped to base BTF id\n",
|
|
||||||
name, id);
|
|
||||||
err = -EINVAL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
done:
|
|
||||||
free(base_name_cnt);
|
|
||||||
free(info);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* distilled base should only have named int/float/enum/fwd/struct/union types. */
|
|
||||||
static int btf_relocate_validate_distilled_base(struct btf_relocate *r)
|
|
||||||
{
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
for (i = 1; i < r->nr_dist_base_types; i++) {
|
|
||||||
struct btf_type *t = btf_type_by_id(r->dist_base_btf, i);
|
|
||||||
int kind = btf_kind(t);
|
|
||||||
|
|
||||||
switch (kind) {
|
|
||||||
case BTF_KIND_INT:
|
|
||||||
case BTF_KIND_FLOAT:
|
|
||||||
case BTF_KIND_ENUM:
|
|
||||||
case BTF_KIND_STRUCT:
|
|
||||||
case BTF_KIND_UNION:
|
|
||||||
case BTF_KIND_FWD:
|
|
||||||
if (t->name_off)
|
|
||||||
break;
|
|
||||||
pr_warn("type [%d], kind [%d] is invalid for distilled base BTF; it is anonymous\n",
|
|
||||||
i, kind);
|
|
||||||
return -EINVAL;
|
|
||||||
default:
|
|
||||||
pr_warn("type [%d] in distilled based BTF has unexpected kind [%d]\n",
|
|
||||||
i, kind);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i)
|
|
||||||
{
|
|
||||||
struct btf_type *t = btf_type_by_id(r->btf, i);
|
|
||||||
struct btf_field_iter it;
|
|
||||||
__u32 *str_off;
|
|
||||||
int off, err;
|
|
||||||
|
|
||||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
while ((str_off = btf_field_iter_next(&it))) {
|
|
||||||
if (!*str_off)
|
|
||||||
continue;
|
|
||||||
if (*str_off >= r->dist_str_len) {
|
|
||||||
*str_off += r->base_str_len - r->dist_str_len;
|
|
||||||
} else {
|
|
||||||
off = r->str_map[*str_off];
|
|
||||||
if (!off) {
|
|
||||||
pr_warn("string '%s' [offset %u] is not mapped to base BTF\n",
|
|
||||||
btf__str_by_offset(r->btf, off), *str_off);
|
|
||||||
return -ENOENT;
|
|
||||||
}
|
|
||||||
*str_off = off;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If successful, output of relocation is updated BTF with base BTF pointing
|
|
||||||
* at base_btf, and type ids, strings adjusted accordingly.
|
|
||||||
*/
|
|
||||||
int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map)
|
|
||||||
{
|
|
||||||
unsigned int nr_types = btf__type_cnt(btf);
|
|
||||||
const struct btf_header *dist_base_hdr;
|
|
||||||
const struct btf_header *base_hdr;
|
|
||||||
struct btf_relocate r = {};
|
|
||||||
int err = 0;
|
|
||||||
__u32 id, i;
|
|
||||||
|
|
||||||
r.dist_base_btf = btf__base_btf(btf);
|
|
||||||
if (!base_btf || r.dist_base_btf == base_btf)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
r.nr_dist_base_types = btf__type_cnt(r.dist_base_btf);
|
|
||||||
r.nr_base_types = btf__type_cnt(base_btf);
|
|
||||||
r.nr_split_types = nr_types - r.nr_dist_base_types;
|
|
||||||
r.btf = btf;
|
|
||||||
r.base_btf = base_btf;
|
|
||||||
|
|
||||||
r.id_map = calloc(nr_types, sizeof(*r.id_map));
|
|
||||||
r.str_map = calloc(btf_header(r.dist_base_btf)->str_len, sizeof(*r.str_map));
|
|
||||||
dist_base_hdr = btf_header(r.dist_base_btf);
|
|
||||||
base_hdr = btf_header(r.base_btf);
|
|
||||||
r.dist_str_len = dist_base_hdr->str_len;
|
|
||||||
r.base_str_len = base_hdr->str_len;
|
|
||||||
if (!r.id_map || !r.str_map) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = btf_relocate_validate_distilled_base(&r);
|
|
||||||
if (err)
|
|
||||||
goto err_out;
|
|
||||||
|
|
||||||
/* Split BTF ids need to be adjusted as base and distilled base
|
|
||||||
* have different numbers of types, changing the start id of split
|
|
||||||
* BTF.
|
|
||||||
*/
|
|
||||||
for (id = r.nr_dist_base_types; id < nr_types; id++)
|
|
||||||
r.id_map[id] = id + r.nr_base_types - r.nr_dist_base_types;
|
|
||||||
|
|
||||||
/* Build a map from distilled base ids to actual base BTF ids; it is used
|
|
||||||
* to update split BTF id references. Also build a str_map mapping from
|
|
||||||
* distilled base BTF names to base BTF names.
|
|
||||||
*/
|
|
||||||
err = btf_relocate_map_distilled_base(&r);
|
|
||||||
if (err)
|
|
||||||
goto err_out;
|
|
||||||
|
|
||||||
/* Next, rewrite type ids in split BTF, replacing split ids with updated
|
|
||||||
* ids based on number of types in base BTF, and base ids with
|
|
||||||
* relocated ids from base_btf.
|
|
||||||
*/
|
|
||||||
for (i = 0, id = r.nr_dist_base_types; i < r.nr_split_types; i++, id++) {
|
|
||||||
err = btf_relocate_rewrite_type_id(&r, id);
|
|
||||||
if (err)
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
/* String offsets now need to be updated using the str_map. */
|
|
||||||
for (i = 0; i < r.nr_split_types; i++) {
|
|
||||||
err = btf_relocate_rewrite_strs(&r, i + r.nr_dist_base_types);
|
|
||||||
if (err)
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
/* Finally reset base BTF to be base_btf */
|
|
||||||
btf_set_base_btf(btf, base_btf);
|
|
||||||
|
|
||||||
if (id_map) {
|
|
||||||
*id_map = r.id_map;
|
|
||||||
r.id_map = NULL;
|
|
||||||
}
|
|
||||||
err_out:
|
|
||||||
free(r.id_map);
|
|
||||||
free(r.str_map);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
559
src/elf.c
559
src/elf.c
@@ -1,559 +0,0 @@
|
|||||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
|
||||||
|
|
||||||
#ifndef _GNU_SOURCE
|
|
||||||
#define _GNU_SOURCE
|
|
||||||
#endif
|
|
||||||
#include <libelf.h>
|
|
||||||
#include <gelf.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <linux/kernel.h>
|
|
||||||
|
|
||||||
#include "libbpf_internal.h"
|
|
||||||
#include "str_error.h"
|
|
||||||
|
|
||||||
/* A SHT_GNU_versym section holds 16-bit words. This bit is set if
|
|
||||||
* the symbol is hidden and can only be seen when referenced using an
|
|
||||||
* explicit version number. This is a GNU extension.
|
|
||||||
*/
|
|
||||||
#define VERSYM_HIDDEN 0x8000
|
|
||||||
|
|
||||||
/* This is the mask for the rest of the data in a word read from a
|
|
||||||
* SHT_GNU_versym section.
|
|
||||||
*/
|
|
||||||
#define VERSYM_VERSION 0x7fff
|
|
||||||
|
|
||||||
int elf_open(const char *binary_path, struct elf_fd *elf_fd)
|
|
||||||
{
|
|
||||||
int fd, ret;
|
|
||||||
Elf *elf;
|
|
||||||
|
|
||||||
elf_fd->elf = NULL;
|
|
||||||
elf_fd->fd = -1;
|
|
||||||
|
|
||||||
if (elf_version(EV_CURRENT) == EV_NONE) {
|
|
||||||
pr_warn("elf: failed to init libelf for %s\n", binary_path);
|
|
||||||
return -LIBBPF_ERRNO__LIBELF;
|
|
||||||
}
|
|
||||||
fd = open(binary_path, O_RDONLY | O_CLOEXEC);
|
|
||||||
if (fd < 0) {
|
|
||||||
ret = -errno;
|
|
||||||
pr_warn("elf: failed to open %s: %s\n", binary_path, errstr(ret));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
|
|
||||||
if (!elf) {
|
|
||||||
pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1));
|
|
||||||
close(fd);
|
|
||||||
return -LIBBPF_ERRNO__FORMAT;
|
|
||||||
}
|
|
||||||
elf_fd->fd = fd;
|
|
||||||
elf_fd->elf = elf;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void elf_close(struct elf_fd *elf_fd)
|
|
||||||
{
|
|
||||||
if (!elf_fd)
|
|
||||||
return;
|
|
||||||
elf_end(elf_fd->elf);
|
|
||||||
close(elf_fd->fd);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */
|
|
||||||
static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn)
|
|
||||||
{
|
|
||||||
while ((scn = elf_nextscn(elf, scn)) != NULL) {
|
|
||||||
GElf_Shdr sh;
|
|
||||||
|
|
||||||
if (!gelf_getshdr(scn, &sh))
|
|
||||||
continue;
|
|
||||||
if (sh.sh_type == sh_type)
|
|
||||||
return scn;
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct elf_sym {
|
|
||||||
const char *name;
|
|
||||||
GElf_Sym sym;
|
|
||||||
GElf_Shdr sh;
|
|
||||||
int ver;
|
|
||||||
bool hidden;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct elf_sym_iter {
|
|
||||||
Elf *elf;
|
|
||||||
Elf_Data *syms;
|
|
||||||
Elf_Data *versyms;
|
|
||||||
Elf_Data *verdefs;
|
|
||||||
size_t nr_syms;
|
|
||||||
size_t strtabidx;
|
|
||||||
size_t verdef_strtabidx;
|
|
||||||
size_t next_sym_idx;
|
|
||||||
struct elf_sym sym;
|
|
||||||
int st_type;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int elf_sym_iter_new(struct elf_sym_iter *iter,
|
|
||||||
Elf *elf, const char *binary_path,
|
|
||||||
int sh_type, int st_type)
|
|
||||||
{
|
|
||||||
Elf_Scn *scn = NULL;
|
|
||||||
GElf_Ehdr ehdr;
|
|
||||||
GElf_Shdr sh;
|
|
||||||
|
|
||||||
memset(iter, 0, sizeof(*iter));
|
|
||||||
|
|
||||||
if (!gelf_getehdr(elf, &ehdr)) {
|
|
||||||
pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1));
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
scn = elf_find_next_scn_by_type(elf, sh_type, NULL);
|
|
||||||
if (!scn) {
|
|
||||||
pr_debug("elf: failed to find symbol table ELF sections in '%s'\n",
|
|
||||||
binary_path);
|
|
||||||
return -ENOENT;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!gelf_getshdr(scn, &sh))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
iter->strtabidx = sh.sh_link;
|
|
||||||
iter->syms = elf_getdata(scn, 0);
|
|
||||||
if (!iter->syms) {
|
|
||||||
pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n",
|
|
||||||
binary_path, elf_errmsg(-1));
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
iter->nr_syms = iter->syms->d_size / sh.sh_entsize;
|
|
||||||
iter->elf = elf;
|
|
||||||
iter->st_type = st_type;
|
|
||||||
|
|
||||||
/* Version symbol table is meaningful to dynsym only */
|
|
||||||
if (sh_type != SHT_DYNSYM)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
scn = elf_find_next_scn_by_type(elf, SHT_GNU_versym, NULL);
|
|
||||||
if (!scn)
|
|
||||||
return 0;
|
|
||||||
iter->versyms = elf_getdata(scn, 0);
|
|
||||||
|
|
||||||
scn = elf_find_next_scn_by_type(elf, SHT_GNU_verdef, NULL);
|
|
||||||
if (!scn)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
iter->verdefs = elf_getdata(scn, 0);
|
|
||||||
if (!iter->verdefs || !gelf_getshdr(scn, &sh)) {
|
|
||||||
pr_warn("elf: failed to get verdef ELF section in '%s'\n", binary_path);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
iter->verdef_strtabidx = sh.sh_link;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct elf_sym *elf_sym_iter_next(struct elf_sym_iter *iter)
|
|
||||||
{
|
|
||||||
struct elf_sym *ret = &iter->sym;
|
|
||||||
GElf_Sym *sym = &ret->sym;
|
|
||||||
const char *name = NULL;
|
|
||||||
GElf_Versym versym;
|
|
||||||
Elf_Scn *sym_scn;
|
|
||||||
size_t idx;
|
|
||||||
|
|
||||||
for (idx = iter->next_sym_idx; idx < iter->nr_syms; idx++) {
|
|
||||||
if (!gelf_getsym(iter->syms, idx, sym))
|
|
||||||
continue;
|
|
||||||
if (GELF_ST_TYPE(sym->st_info) != iter->st_type)
|
|
||||||
continue;
|
|
||||||
name = elf_strptr(iter->elf, iter->strtabidx, sym->st_name);
|
|
||||||
if (!name)
|
|
||||||
continue;
|
|
||||||
sym_scn = elf_getscn(iter->elf, sym->st_shndx);
|
|
||||||
if (!sym_scn)
|
|
||||||
continue;
|
|
||||||
if (!gelf_getshdr(sym_scn, &ret->sh))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
iter->next_sym_idx = idx + 1;
|
|
||||||
ret->name = name;
|
|
||||||
ret->ver = 0;
|
|
||||||
ret->hidden = false;
|
|
||||||
|
|
||||||
if (iter->versyms) {
|
|
||||||
if (!gelf_getversym(iter->versyms, idx, &versym))
|
|
||||||
continue;
|
|
||||||
ret->ver = versym & VERSYM_VERSION;
|
|
||||||
ret->hidden = versym & VERSYM_HIDDEN;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char *elf_get_vername(struct elf_sym_iter *iter, int ver)
|
|
||||||
{
|
|
||||||
GElf_Verdaux verdaux;
|
|
||||||
GElf_Verdef verdef;
|
|
||||||
int offset;
|
|
||||||
|
|
||||||
if (!iter->verdefs)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
offset = 0;
|
|
||||||
while (gelf_getverdef(iter->verdefs, offset, &verdef)) {
|
|
||||||
if (verdef.vd_ndx != ver) {
|
|
||||||
if (!verdef.vd_next)
|
|
||||||
break;
|
|
||||||
|
|
||||||
offset += verdef.vd_next;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!gelf_getverdaux(iter->verdefs, offset + verdef.vd_aux, &verdaux))
|
|
||||||
break;
|
|
||||||
|
|
||||||
return elf_strptr(iter->elf, iter->verdef_strtabidx, verdaux.vda_name);
|
|
||||||
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool symbol_match(struct elf_sym_iter *iter, int sh_type, struct elf_sym *sym,
|
|
||||||
const char *name, size_t name_len, const char *lib_ver)
|
|
||||||
{
|
|
||||||
const char *ver_name;
|
|
||||||
|
|
||||||
/* Symbols are in forms of func, func@LIB_VER or func@@LIB_VER
|
|
||||||
* make sure the func part matches the user specified name
|
|
||||||
*/
|
|
||||||
if (strncmp(sym->name, name, name_len) != 0)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* ...but we don't want a search for "foo" to match 'foo2" also, so any
|
|
||||||
* additional characters in sname should be of the form "@@LIB".
|
|
||||||
*/
|
|
||||||
if (sym->name[name_len] != '\0' && sym->name[name_len] != '@')
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* If user does not specify symbol version, then we got a match */
|
|
||||||
if (!lib_ver)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
/* If user specifies symbol version, for dynamic symbols,
|
|
||||||
* get version name from ELF verdef section for comparison.
|
|
||||||
*/
|
|
||||||
if (sh_type == SHT_DYNSYM) {
|
|
||||||
ver_name = elf_get_vername(iter, sym->ver);
|
|
||||||
if (!ver_name)
|
|
||||||
return false;
|
|
||||||
return strcmp(ver_name, lib_ver) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* For normal symbols, it is already in form of func@LIB_VER */
|
|
||||||
return strcmp(sym->name, name) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Transform symbol's virtual address (absolute for binaries and relative
|
|
||||||
* for shared libs) into file offset, which is what kernel is expecting
|
|
||||||
* for uprobe/uretprobe attachment.
|
|
||||||
* See Documentation/trace/uprobetracer.rst for more details. This is done
|
|
||||||
* by looking up symbol's containing section's header and using iter's virtual
|
|
||||||
* address (sh_addr) and corresponding file offset (sh_offset) to transform
|
|
||||||
* sym.st_value (virtual address) into desired final file offset.
|
|
||||||
*/
|
|
||||||
static unsigned long elf_sym_offset(struct elf_sym *sym)
|
|
||||||
{
|
|
||||||
return sym->sym.st_value - sym->sh.sh_addr + sym->sh.sh_offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Find offset of function name in the provided ELF object. "binary_path" is
|
|
||||||
* the path to the ELF binary represented by "elf", and only used for error
|
|
||||||
* reporting matters. "name" matches symbol name or name@@LIB for library
|
|
||||||
* functions.
|
|
||||||
*/
|
|
||||||
long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name)
|
|
||||||
{
|
|
||||||
int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
|
|
||||||
const char *at_symbol, *lib_ver;
|
|
||||||
bool is_shared_lib;
|
|
||||||
long ret = -ENOENT;
|
|
||||||
size_t name_len;
|
|
||||||
GElf_Ehdr ehdr;
|
|
||||||
|
|
||||||
if (!gelf_getehdr(elf, &ehdr)) {
|
|
||||||
pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1));
|
|
||||||
ret = -LIBBPF_ERRNO__FORMAT;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
/* for shared lib case, we do not need to calculate relative offset */
|
|
||||||
is_shared_lib = ehdr.e_type == ET_DYN;
|
|
||||||
|
|
||||||
/* Does name specify "@@LIB_VER" or "@LIB_VER" ? */
|
|
||||||
at_symbol = strchr(name, '@');
|
|
||||||
if (at_symbol) {
|
|
||||||
name_len = at_symbol - name;
|
|
||||||
/* skip second @ if it's @@LIB_VER case */
|
|
||||||
if (at_symbol[1] == '@')
|
|
||||||
at_symbol++;
|
|
||||||
lib_ver = at_symbol + 1;
|
|
||||||
} else {
|
|
||||||
name_len = strlen(name);
|
|
||||||
lib_ver = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if
|
|
||||||
* a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically
|
|
||||||
* linked binary may not have SHT_DYMSYM, so absence of a section should not be
|
|
||||||
* reported as a warning/error.
|
|
||||||
*/
|
|
||||||
for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
|
|
||||||
struct elf_sym_iter iter;
|
|
||||||
struct elf_sym *sym;
|
|
||||||
int last_bind = -1;
|
|
||||||
int cur_bind;
|
|
||||||
|
|
||||||
ret = elf_sym_iter_new(&iter, elf, binary_path, sh_types[i], STT_FUNC);
|
|
||||||
if (ret == -ENOENT)
|
|
||||||
continue;
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
while ((sym = elf_sym_iter_next(&iter))) {
|
|
||||||
if (!symbol_match(&iter, sh_types[i], sym, name, name_len, lib_ver))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
cur_bind = GELF_ST_BIND(sym->sym.st_info);
|
|
||||||
|
|
||||||
if (ret > 0) {
|
|
||||||
/* handle multiple matches */
|
|
||||||
if (elf_sym_offset(sym) == ret) {
|
|
||||||
/* same offset, no problem */
|
|
||||||
continue;
|
|
||||||
} else if (last_bind != STB_WEAK && cur_bind != STB_WEAK) {
|
|
||||||
/* Only accept one non-weak bind. */
|
|
||||||
pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n",
|
|
||||||
sym->name, name, binary_path);
|
|
||||||
ret = -LIBBPF_ERRNO__FORMAT;
|
|
||||||
goto out;
|
|
||||||
} else if (cur_bind == STB_WEAK) {
|
|
||||||
/* already have a non-weak bind, and
|
|
||||||
* this is a weak bind, so ignore.
|
|
||||||
*/
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = elf_sym_offset(sym);
|
|
||||||
last_bind = cur_bind;
|
|
||||||
}
|
|
||||||
if (ret > 0)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret > 0) {
|
|
||||||
pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path,
|
|
||||||
ret);
|
|
||||||
} else {
|
|
||||||
if (ret == 0) {
|
|
||||||
pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path,
|
|
||||||
is_shared_lib ? "should not be 0 in a shared library" :
|
|
||||||
"try using shared library path instead");
|
|
||||||
ret = -ENOENT;
|
|
||||||
} else {
|
|
||||||
pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Find offset of function name in ELF object specified by path. "name" matches
|
|
||||||
* symbol name or name@@LIB for library functions.
|
|
||||||
*/
|
|
||||||
long elf_find_func_offset_from_file(const char *binary_path, const char *name)
|
|
||||||
{
|
|
||||||
struct elf_fd elf_fd;
|
|
||||||
long ret = -ENOENT;
|
|
||||||
|
|
||||||
ret = elf_open(binary_path, &elf_fd);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
ret = elf_find_func_offset(elf_fd.elf, binary_path, name);
|
|
||||||
elf_close(&elf_fd);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct symbol {
|
|
||||||
const char *name;
|
|
||||||
int bind;
|
|
||||||
int idx;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int symbol_cmp(const void *a, const void *b)
|
|
||||||
{
|
|
||||||
const struct symbol *sym_a = a;
|
|
||||||
const struct symbol *sym_b = b;
|
|
||||||
|
|
||||||
return strcmp(sym_a->name, sym_b->name);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Return offsets in @poffsets for symbols specified in @syms array argument.
|
|
||||||
* On success returns 0 and offsets are returned in allocated array with @cnt
|
|
||||||
* size, that needs to be released by the caller.
|
|
||||||
*/
|
|
||||||
int elf_resolve_syms_offsets(const char *binary_path, int cnt,
|
|
||||||
const char **syms, unsigned long **poffsets,
|
|
||||||
int st_type)
|
|
||||||
{
|
|
||||||
int sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
|
|
||||||
int err = 0, i, cnt_done = 0;
|
|
||||||
unsigned long *offsets;
|
|
||||||
struct symbol *symbols;
|
|
||||||
struct elf_fd elf_fd;
|
|
||||||
|
|
||||||
err = elf_open(binary_path, &elf_fd);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
offsets = calloc(cnt, sizeof(*offsets));
|
|
||||||
symbols = calloc(cnt, sizeof(*symbols));
|
|
||||||
|
|
||||||
if (!offsets || !symbols) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < cnt; i++) {
|
|
||||||
symbols[i].name = syms[i];
|
|
||||||
symbols[i].idx = i;
|
|
||||||
}
|
|
||||||
|
|
||||||
qsort(symbols, cnt, sizeof(*symbols), symbol_cmp);
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
|
|
||||||
struct elf_sym_iter iter;
|
|
||||||
struct elf_sym *sym;
|
|
||||||
|
|
||||||
err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], st_type);
|
|
||||||
if (err == -ENOENT)
|
|
||||||
continue;
|
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
while ((sym = elf_sym_iter_next(&iter))) {
|
|
||||||
unsigned long sym_offset = elf_sym_offset(sym);
|
|
||||||
int bind = GELF_ST_BIND(sym->sym.st_info);
|
|
||||||
struct symbol *found, tmp = {
|
|
||||||
.name = sym->name,
|
|
||||||
};
|
|
||||||
unsigned long *offset;
|
|
||||||
|
|
||||||
found = bsearch(&tmp, symbols, cnt, sizeof(*symbols), symbol_cmp);
|
|
||||||
if (!found)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
offset = &offsets[found->idx];
|
|
||||||
if (*offset > 0) {
|
|
||||||
/* same offset, no problem */
|
|
||||||
if (*offset == sym_offset)
|
|
||||||
continue;
|
|
||||||
/* handle multiple matches */
|
|
||||||
if (found->bind != STB_WEAK && bind != STB_WEAK) {
|
|
||||||
/* Only accept one non-weak bind. */
|
|
||||||
pr_warn("elf: ambiguous match found '%s@%lu' in '%s' previous offset %lu\n",
|
|
||||||
sym->name, sym_offset, binary_path, *offset);
|
|
||||||
err = -ESRCH;
|
|
||||||
goto out;
|
|
||||||
} else if (bind == STB_WEAK) {
|
|
||||||
/* already have a non-weak bind, and
|
|
||||||
* this is a weak bind, so ignore.
|
|
||||||
*/
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
cnt_done++;
|
|
||||||
}
|
|
||||||
*offset = sym_offset;
|
|
||||||
found->bind = bind;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cnt != cnt_done) {
|
|
||||||
err = -ENOENT;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
*poffsets = offsets;
|
|
||||||
|
|
||||||
out:
|
|
||||||
free(symbols);
|
|
||||||
if (err)
|
|
||||||
free(offsets);
|
|
||||||
elf_close(&elf_fd);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Return offsets in @poffsets for symbols specified by @pattern argument.
|
|
||||||
* On success returns 0 and offsets are returned in allocated @poffsets
|
|
||||||
* array with the @pctn size, that needs to be released by the caller.
|
|
||||||
*/
|
|
||||||
int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
|
|
||||||
unsigned long **poffsets, size_t *pcnt)
|
|
||||||
{
|
|
||||||
int sh_types[2] = { SHT_SYMTAB, SHT_DYNSYM };
|
|
||||||
unsigned long *offsets = NULL;
|
|
||||||
size_t cap = 0, cnt = 0;
|
|
||||||
struct elf_fd elf_fd;
|
|
||||||
int err = 0, i;
|
|
||||||
|
|
||||||
err = elf_open(binary_path, &elf_fd);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
|
|
||||||
struct elf_sym_iter iter;
|
|
||||||
struct elf_sym *sym;
|
|
||||||
|
|
||||||
err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC);
|
|
||||||
if (err == -ENOENT)
|
|
||||||
continue;
|
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
while ((sym = elf_sym_iter_next(&iter))) {
|
|
||||||
if (!glob_match(sym->name, pattern))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
err = libbpf_ensure_mem((void **) &offsets, &cap, sizeof(*offsets),
|
|
||||||
cnt + 1);
|
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
offsets[cnt++] = elf_sym_offset(sym);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If we found anything in the first symbol section,
|
|
||||||
* do not search others to avoid duplicates.
|
|
||||||
*/
|
|
||||||
if (cnt)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cnt) {
|
|
||||||
*poffsets = offsets;
|
|
||||||
*pcnt = cnt;
|
|
||||||
} else {
|
|
||||||
err = -ENOENT;
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
if (err)
|
|
||||||
free(offsets);
|
|
||||||
elf_close(&elf_fd);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
610
src/features.c
610
src/features.c
@@ -1,610 +0,0 @@
|
|||||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
|
||||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/filter.h>
|
|
||||||
#include "bpf.h"
|
|
||||||
#include "libbpf.h"
|
|
||||||
#include "libbpf_common.h"
|
|
||||||
#include "libbpf_internal.h"
|
|
||||||
#include "str_error.h"
|
|
||||||
|
|
||||||
static inline __u64 ptr_to_u64(const void *ptr)
|
|
||||||
{
|
|
||||||
return (__u64)(unsigned long)ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
int probe_fd(int fd)
|
|
||||||
{
|
|
||||||
if (fd >= 0)
|
|
||||||
close(fd);
|
|
||||||
return fd >= 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_prog_name(int token_fd)
|
|
||||||
{
|
|
||||||
const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
|
|
||||||
struct bpf_insn insns[] = {
|
|
||||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
};
|
|
||||||
union bpf_attr attr;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
memset(&attr, 0, attr_sz);
|
|
||||||
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
|
|
||||||
attr.license = ptr_to_u64("GPL");
|
|
||||||
attr.insns = ptr_to_u64(insns);
|
|
||||||
attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
|
|
||||||
attr.prog_token_fd = token_fd;
|
|
||||||
if (token_fd)
|
|
||||||
attr.prog_flags |= BPF_F_TOKEN_FD;
|
|
||||||
libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
|
|
||||||
|
|
||||||
/* make sure loading with name works */
|
|
||||||
ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
|
|
||||||
return probe_fd(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_global_data(int token_fd)
|
|
||||||
{
|
|
||||||
struct bpf_insn insns[] = {
|
|
||||||
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
|
|
||||||
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
|
|
||||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
};
|
|
||||||
LIBBPF_OPTS(bpf_map_create_opts, map_opts,
|
|
||||||
.token_fd = token_fd,
|
|
||||||
.map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
|
|
||||||
);
|
|
||||||
LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
|
|
||||||
.token_fd = token_fd,
|
|
||||||
.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
|
|
||||||
);
|
|
||||||
int ret, map, insn_cnt = ARRAY_SIZE(insns);
|
|
||||||
|
|
||||||
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
|
|
||||||
if (map < 0) {
|
|
||||||
ret = -errno;
|
|
||||||
pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
|
|
||||||
__func__, errstr(ret));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
insns[0].imm = map;
|
|
||||||
|
|
||||||
ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
|
|
||||||
close(map);
|
|
||||||
return probe_fd(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_btf(int token_fd)
|
|
||||||
{
|
|
||||||
static const char strs[] = "\0int";
|
|
||||||
__u32 types[] = {
|
|
||||||
/* int */
|
|
||||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
|
|
||||||
};
|
|
||||||
|
|
||||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
|
||||||
strs, sizeof(strs), token_fd));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_btf_func(int token_fd)
|
|
||||||
{
|
|
||||||
static const char strs[] = "\0int\0x\0a";
|
|
||||||
/* void x(int a) {} */
|
|
||||||
__u32 types[] = {
|
|
||||||
/* int */
|
|
||||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
|
||||||
/* FUNC_PROTO */ /* [2] */
|
|
||||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
|
|
||||||
BTF_PARAM_ENC(7, 1),
|
|
||||||
/* FUNC x */ /* [3] */
|
|
||||||
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
|
|
||||||
};
|
|
||||||
|
|
||||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
|
||||||
strs, sizeof(strs), token_fd));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_btf_func_global(int token_fd)
|
|
||||||
{
|
|
||||||
static const char strs[] = "\0int\0x\0a";
|
|
||||||
/* static void x(int a) {} */
|
|
||||||
__u32 types[] = {
|
|
||||||
/* int */
|
|
||||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
|
||||||
/* FUNC_PROTO */ /* [2] */
|
|
||||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
|
|
||||||
BTF_PARAM_ENC(7, 1),
|
|
||||||
/* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
|
|
||||||
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
|
|
||||||
};
|
|
||||||
|
|
||||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
|
||||||
strs, sizeof(strs), token_fd));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_btf_datasec(int token_fd)
|
|
||||||
{
|
|
||||||
static const char strs[] = "\0x\0.data";
|
|
||||||
/* static int a; */
|
|
||||||
__u32 types[] = {
|
|
||||||
/* int */
|
|
||||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
|
||||||
/* VAR x */ /* [2] */
|
|
||||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
|
|
||||||
BTF_VAR_STATIC,
|
|
||||||
/* DATASEC val */ /* [3] */
|
|
||||||
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
|
|
||||||
BTF_VAR_SECINFO_ENC(2, 0, 4),
|
|
||||||
};
|
|
||||||
|
|
||||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
|
||||||
strs, sizeof(strs), token_fd));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_btf_qmark_datasec(int token_fd)
|
|
||||||
{
|
|
||||||
static const char strs[] = "\0x\0?.data";
|
|
||||||
/* static int a; */
|
|
||||||
__u32 types[] = {
|
|
||||||
/* int */
|
|
||||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
|
||||||
/* VAR x */ /* [2] */
|
|
||||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
|
|
||||||
BTF_VAR_STATIC,
|
|
||||||
/* DATASEC ?.data */ /* [3] */
|
|
||||||
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
|
|
||||||
BTF_VAR_SECINFO_ENC(2, 0, 4),
|
|
||||||
};
|
|
||||||
|
|
||||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
|
||||||
strs, sizeof(strs), token_fd));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_btf_float(int token_fd)
|
|
||||||
{
|
|
||||||
static const char strs[] = "\0float";
|
|
||||||
__u32 types[] = {
|
|
||||||
/* float */
|
|
||||||
BTF_TYPE_FLOAT_ENC(1, 4),
|
|
||||||
};
|
|
||||||
|
|
||||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
|
||||||
strs, sizeof(strs), token_fd));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_btf_decl_tag(int token_fd)
|
|
||||||
{
|
|
||||||
static const char strs[] = "\0tag";
|
|
||||||
__u32 types[] = {
|
|
||||||
/* int */
|
|
||||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
|
||||||
/* VAR x */ /* [2] */
|
|
||||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
|
|
||||||
BTF_VAR_STATIC,
|
|
||||||
/* attr */
|
|
||||||
BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
|
|
||||||
};
|
|
||||||
|
|
||||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
|
||||||
strs, sizeof(strs), token_fd));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_btf_type_tag(int token_fd)
|
|
||||||
{
|
|
||||||
static const char strs[] = "\0tag";
|
|
||||||
__u32 types[] = {
|
|
||||||
/* int */
|
|
||||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
|
||||||
/* attr */
|
|
||||||
BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
|
|
||||||
/* ptr */
|
|
||||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
|
|
||||||
};
|
|
||||||
|
|
||||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
|
||||||
strs, sizeof(strs), token_fd));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_array_mmap(int token_fd)
|
|
||||||
{
|
|
||||||
LIBBPF_OPTS(bpf_map_create_opts, opts,
|
|
||||||
.map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0),
|
|
||||||
.token_fd = token_fd,
|
|
||||||
);
|
|
||||||
int fd;
|
|
||||||
|
|
||||||
fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
|
|
||||||
return probe_fd(fd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_exp_attach_type(int token_fd)
|
|
||||||
{
|
|
||||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
|
||||||
.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
|
|
||||||
.token_fd = token_fd,
|
|
||||||
.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
|
|
||||||
);
|
|
||||||
struct bpf_insn insns[] = {
|
|
||||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
};
|
|
||||||
int fd, insn_cnt = ARRAY_SIZE(insns);
|
|
||||||
|
|
||||||
/* use any valid combination of program type and (optional)
|
|
||||||
* non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
|
|
||||||
* to see if kernel supports expected_attach_type field for
|
|
||||||
* BPF_PROG_LOAD command
|
|
||||||
*/
|
|
||||||
fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
|
|
||||||
return probe_fd(fd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_probe_read_kernel(int token_fd)
|
|
||||||
{
|
|
||||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
|
||||||
.token_fd = token_fd,
|
|
||||||
.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
|
|
||||||
);
|
|
||||||
struct bpf_insn insns[] = {
|
|
||||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
|
|
||||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
|
|
||||||
BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
|
|
||||||
BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
|
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
};
|
|
||||||
int fd, insn_cnt = ARRAY_SIZE(insns);
|
|
||||||
|
|
||||||
fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
|
|
||||||
return probe_fd(fd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_prog_bind_map(int token_fd)
|
|
||||||
{
|
|
||||||
struct bpf_insn insns[] = {
|
|
||||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
};
|
|
||||||
LIBBPF_OPTS(bpf_map_create_opts, map_opts,
|
|
||||||
.token_fd = token_fd,
|
|
||||||
.map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
|
|
||||||
);
|
|
||||||
LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
|
|
||||||
.token_fd = token_fd,
|
|
||||||
.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
|
|
||||||
);
|
|
||||||
int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
|
|
||||||
|
|
||||||
map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
|
|
||||||
if (map < 0) {
|
|
||||||
ret = -errno;
|
|
||||||
pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
|
|
||||||
__func__, errstr(ret));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
|
|
||||||
if (prog < 0) {
|
|
||||||
close(map);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = bpf_prog_bind_map(prog, map, NULL);
|
|
||||||
|
|
||||||
close(map);
|
|
||||||
close(prog);
|
|
||||||
|
|
||||||
return ret >= 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_module_btf(int token_fd)
|
|
||||||
{
|
|
||||||
static const char strs[] = "\0int";
|
|
||||||
__u32 types[] = {
|
|
||||||
/* int */
|
|
||||||
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
|
|
||||||
};
|
|
||||||
struct bpf_btf_info info;
|
|
||||||
__u32 len = sizeof(info);
|
|
||||||
char name[16];
|
|
||||||
int fd, err;
|
|
||||||
|
|
||||||
fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
|
|
||||||
if (fd < 0)
|
|
||||||
return 0; /* BTF not supported at all */
|
|
||||||
|
|
||||||
memset(&info, 0, sizeof(info));
|
|
||||||
info.name = ptr_to_u64(name);
|
|
||||||
info.name_len = sizeof(name);
|
|
||||||
|
|
||||||
/* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
|
|
||||||
* kernel's module BTF support coincides with support for
|
|
||||||
* name/name_len fields in struct bpf_btf_info.
|
|
||||||
*/
|
|
||||||
err = bpf_btf_get_info_by_fd(fd, &info, &len);
|
|
||||||
close(fd);
|
|
||||||
return !err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_perf_link(int token_fd)
|
|
||||||
{
|
|
||||||
struct bpf_insn insns[] = {
|
|
||||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
};
|
|
||||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
|
||||||
.token_fd = token_fd,
|
|
||||||
.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
|
|
||||||
);
|
|
||||||
int prog_fd, link_fd, err;
|
|
||||||
|
|
||||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
|
|
||||||
insns, ARRAY_SIZE(insns), &opts);
|
|
||||||
if (prog_fd < 0)
|
|
||||||
return -errno;
|
|
||||||
|
|
||||||
/* use invalid perf_event FD to get EBADF, if link is supported;
|
|
||||||
* otherwise EINVAL should be returned
|
|
||||||
*/
|
|
||||||
link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
|
|
||||||
err = -errno; /* close() can clobber errno */
|
|
||||||
|
|
||||||
if (link_fd >= 0)
|
|
||||||
close(link_fd);
|
|
||||||
close(prog_fd);
|
|
||||||
|
|
||||||
return link_fd < 0 && err == -EBADF;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_uprobe_multi_link(int token_fd)
|
|
||||||
{
|
|
||||||
LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
|
|
||||||
.expected_attach_type = BPF_TRACE_UPROBE_MULTI,
|
|
||||||
.token_fd = token_fd,
|
|
||||||
.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
|
|
||||||
);
|
|
||||||
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
|
|
||||||
struct bpf_insn insns[] = {
|
|
||||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
};
|
|
||||||
int prog_fd, link_fd, err;
|
|
||||||
unsigned long offset = 0;
|
|
||||||
|
|
||||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
|
|
||||||
insns, ARRAY_SIZE(insns), &load_opts);
|
|
||||||
if (prog_fd < 0)
|
|
||||||
return -errno;
|
|
||||||
|
|
||||||
/* Creating uprobe in '/' binary should fail with -EBADF. */
|
|
||||||
link_opts.uprobe_multi.path = "/";
|
|
||||||
link_opts.uprobe_multi.offsets = &offset;
|
|
||||||
link_opts.uprobe_multi.cnt = 1;
|
|
||||||
|
|
||||||
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
|
|
||||||
err = -errno; /* close() can clobber errno */
|
|
||||||
|
|
||||||
if (link_fd >= 0 || err != -EBADF) {
|
|
||||||
if (link_fd >= 0)
|
|
||||||
close(link_fd);
|
|
||||||
close(prog_fd);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initial multi-uprobe support in kernel didn't handle PID filtering
|
|
||||||
* correctly (it was doing thread filtering, not process filtering).
|
|
||||||
* So now we'll detect if PID filtering logic was fixed, and, if not,
|
|
||||||
* we'll pretend multi-uprobes are not supported, if not.
|
|
||||||
* Multi-uprobes are used in USDT attachment logic, and we need to be
|
|
||||||
* conservative here, because multi-uprobe selection happens early at
|
|
||||||
* load time, while the use of PID filtering is known late at
|
|
||||||
* attachment time, at which point it's too late to undo multi-uprobe
|
|
||||||
* selection.
|
|
||||||
*
|
|
||||||
* Creating uprobe with pid == -1 for (invalid) '/' binary will fail
|
|
||||||
* early with -EINVAL on kernels with fixed PID filtering logic;
|
|
||||||
* otherwise -ESRCH would be returned if passed correct binary path
|
|
||||||
* (but we'll just get -BADF, of course).
|
|
||||||
*/
|
|
||||||
link_opts.uprobe_multi.pid = -1; /* invalid PID */
|
|
||||||
link_opts.uprobe_multi.path = "/"; /* invalid path */
|
|
||||||
link_opts.uprobe_multi.offsets = &offset;
|
|
||||||
link_opts.uprobe_multi.cnt = 1;
|
|
||||||
|
|
||||||
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
|
|
||||||
err = -errno; /* close() can clobber errno */
|
|
||||||
|
|
||||||
if (link_fd >= 0)
|
|
||||||
close(link_fd);
|
|
||||||
close(prog_fd);
|
|
||||||
|
|
||||||
return link_fd < 0 && err == -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_bpf_cookie(int token_fd)
|
|
||||||
{
|
|
||||||
struct bpf_insn insns[] = {
|
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
};
|
|
||||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
|
||||||
.token_fd = token_fd,
|
|
||||||
.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
|
|
||||||
);
|
|
||||||
int ret, insn_cnt = ARRAY_SIZE(insns);
|
|
||||||
|
|
||||||
ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
|
|
||||||
return probe_fd(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_btf_enum64(int token_fd)
|
|
||||||
{
|
|
||||||
static const char strs[] = "\0enum64";
|
|
||||||
__u32 types[] = {
|
|
||||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
|
|
||||||
};
|
|
||||||
|
|
||||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
|
||||||
strs, sizeof(strs), token_fd));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int probe_kern_arg_ctx_tag(int token_fd)
|
|
||||||
{
|
|
||||||
static const char strs[] = "\0a\0b\0arg:ctx\0";
|
|
||||||
const __u32 types[] = {
|
|
||||||
/* [1] INT */
|
|
||||||
BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
|
|
||||||
/* [2] PTR -> VOID */
|
|
||||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
|
|
||||||
/* [3] FUNC_PROTO `int(void *a)` */
|
|
||||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
|
|
||||||
BTF_PARAM_ENC(1 /* "a" */, 2),
|
|
||||||
/* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
|
|
||||||
BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
|
|
||||||
/* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
|
|
||||||
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
|
|
||||||
BTF_PARAM_ENC(3 /* "b" */, 2),
|
|
||||||
/* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
|
|
||||||
BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
|
|
||||||
/* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
|
|
||||||
BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
|
|
||||||
};
|
|
||||||
const struct bpf_insn insns[] = {
|
|
||||||
/* main prog */
|
|
||||||
BPF_CALL_REL(+1),
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
/* global subprog */
|
|
||||||
BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
|
|
||||||
BPF_EXIT_INSN(),
|
|
||||||
};
|
|
||||||
const struct bpf_func_info_min func_infos[] = {
|
|
||||||
{ 0, 4 }, /* main prog -> FUNC 'a' */
|
|
||||||
{ 2, 6 }, /* subprog -> FUNC 'b' */
|
|
||||||
};
|
|
||||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
|
||||||
.token_fd = token_fd,
|
|
||||||
.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
|
|
||||||
);
|
|
||||||
int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
|
|
||||||
|
|
||||||
btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
|
|
||||||
if (btf_fd < 0)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
opts.prog_btf_fd = btf_fd;
|
|
||||||
opts.func_info = &func_infos;
|
|
||||||
opts.func_info_cnt = ARRAY_SIZE(func_infos);
|
|
||||||
opts.func_info_rec_size = sizeof(func_infos[0]);
|
|
||||||
|
|
||||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
|
|
||||||
"GPL", insns, insn_cnt, &opts);
|
|
||||||
close(btf_fd);
|
|
||||||
|
|
||||||
return probe_fd(prog_fd);
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef int (*feature_probe_fn)(int /* token_fd */);
|
|
||||||
|
|
||||||
static struct kern_feature_cache feature_cache;
|
|
||||||
|
|
||||||
static struct kern_feature_desc {
|
|
||||||
const char *desc;
|
|
||||||
feature_probe_fn probe;
|
|
||||||
} feature_probes[__FEAT_CNT] = {
|
|
||||||
[FEAT_PROG_NAME] = {
|
|
||||||
"BPF program name", probe_kern_prog_name,
|
|
||||||
},
|
|
||||||
[FEAT_GLOBAL_DATA] = {
|
|
||||||
"global variables", probe_kern_global_data,
|
|
||||||
},
|
|
||||||
[FEAT_BTF] = {
|
|
||||||
"minimal BTF", probe_kern_btf,
|
|
||||||
},
|
|
||||||
[FEAT_BTF_FUNC] = {
|
|
||||||
"BTF functions", probe_kern_btf_func,
|
|
||||||
},
|
|
||||||
[FEAT_BTF_GLOBAL_FUNC] = {
|
|
||||||
"BTF global function", probe_kern_btf_func_global,
|
|
||||||
},
|
|
||||||
[FEAT_BTF_DATASEC] = {
|
|
||||||
"BTF data section and variable", probe_kern_btf_datasec,
|
|
||||||
},
|
|
||||||
[FEAT_ARRAY_MMAP] = {
|
|
||||||
"ARRAY map mmap()", probe_kern_array_mmap,
|
|
||||||
},
|
|
||||||
[FEAT_EXP_ATTACH_TYPE] = {
|
|
||||||
"BPF_PROG_LOAD expected_attach_type attribute",
|
|
||||||
probe_kern_exp_attach_type,
|
|
||||||
},
|
|
||||||
[FEAT_PROBE_READ_KERN] = {
|
|
||||||
"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
|
|
||||||
},
|
|
||||||
[FEAT_PROG_BIND_MAP] = {
|
|
||||||
"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
|
|
||||||
},
|
|
||||||
[FEAT_MODULE_BTF] = {
|
|
||||||
"module BTF support", probe_module_btf,
|
|
||||||
},
|
|
||||||
[FEAT_BTF_FLOAT] = {
|
|
||||||
"BTF_KIND_FLOAT support", probe_kern_btf_float,
|
|
||||||
},
|
|
||||||
[FEAT_PERF_LINK] = {
|
|
||||||
"BPF perf link support", probe_perf_link,
|
|
||||||
},
|
|
||||||
[FEAT_BTF_DECL_TAG] = {
|
|
||||||
"BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
|
|
||||||
},
|
|
||||||
[FEAT_BTF_TYPE_TAG] = {
|
|
||||||
"BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
|
|
||||||
},
|
|
||||||
[FEAT_MEMCG_ACCOUNT] = {
|
|
||||||
"memcg-based memory accounting", probe_memcg_account,
|
|
||||||
},
|
|
||||||
[FEAT_BPF_COOKIE] = {
|
|
||||||
"BPF cookie support", probe_kern_bpf_cookie,
|
|
||||||
},
|
|
||||||
[FEAT_BTF_ENUM64] = {
|
|
||||||
"BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
|
|
||||||
},
|
|
||||||
[FEAT_SYSCALL_WRAPPER] = {
|
|
||||||
"Kernel using syscall wrapper", probe_kern_syscall_wrapper,
|
|
||||||
},
|
|
||||||
[FEAT_UPROBE_MULTI_LINK] = {
|
|
||||||
"BPF multi-uprobe link support", probe_uprobe_multi_link,
|
|
||||||
},
|
|
||||||
[FEAT_ARG_CTX_TAG] = {
|
|
||||||
"kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag,
|
|
||||||
},
|
|
||||||
[FEAT_BTF_QMARK_DATASEC] = {
|
|
||||||
"BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
|
|
||||||
{
|
|
||||||
struct kern_feature_desc *feat = &feature_probes[feat_id];
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* assume global feature cache, unless custom one is provided */
|
|
||||||
if (!cache)
|
|
||||||
cache = &feature_cache;
|
|
||||||
|
|
||||||
if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
|
|
||||||
ret = feat->probe(cache->token_fd);
|
|
||||||
if (ret > 0) {
|
|
||||||
WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
|
|
||||||
} else if (ret == 0) {
|
|
||||||
WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
|
|
||||||
} else {
|
|
||||||
pr_warn("Detection of kernel %s support failed: %s\n",
|
|
||||||
feat->desc, errstr(ret));
|
|
||||||
WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
|
|
||||||
}
|
|
||||||
186
src/gen_loader.c
186
src/gen_loader.c
@@ -14,7 +14,6 @@
|
|||||||
#include "bpf_gen_internal.h"
|
#include "bpf_gen_internal.h"
|
||||||
#include "skel_internal.h"
|
#include "skel_internal.h"
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
#include "str_error.h"
|
|
||||||
|
|
||||||
#define MAX_USED_MAPS 64
|
#define MAX_USED_MAPS 64
|
||||||
#define MAX_USED_PROGS 32
|
#define MAX_USED_PROGS 32
|
||||||
@@ -394,7 +393,7 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
|
|||||||
blob_fd_array_off(gen, i));
|
blob_fd_array_off(gen, i));
|
||||||
emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
|
emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
|
||||||
emit(gen, BPF_EXIT_INSN());
|
emit(gen, BPF_EXIT_INSN());
|
||||||
pr_debug("gen: finish %s\n", errstr(gen->error));
|
pr_debug("gen: finish %d\n", gen->error);
|
||||||
if (!gen->error) {
|
if (!gen->error) {
|
||||||
struct gen_loader_opts *opts = gen->opts;
|
struct gen_loader_opts *opts = gen->opts;
|
||||||
|
|
||||||
@@ -402,15 +401,6 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
|
|||||||
opts->insns_sz = gen->insn_cur - gen->insn_start;
|
opts->insns_sz = gen->insn_cur - gen->insn_start;
|
||||||
opts->data = gen->data_start;
|
opts->data = gen->data_start;
|
||||||
opts->data_sz = gen->data_cur - gen->data_start;
|
opts->data_sz = gen->data_cur - gen->data_start;
|
||||||
|
|
||||||
/* use target endianness for embedded loader */
|
|
||||||
if (gen->swapped_endian) {
|
|
||||||
struct bpf_insn *insn = (struct bpf_insn *)opts->insns;
|
|
||||||
int insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
|
|
||||||
|
|
||||||
for (i = 0; i < insn_cnt; i++)
|
|
||||||
bpf_insn_bswap(insn++);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return gen->error;
|
return gen->error;
|
||||||
}
|
}
|
||||||
@@ -424,28 +414,6 @@ void bpf_gen__free(struct bpf_gen *gen)
|
|||||||
free(gen);
|
free(gen);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Fields of bpf_attr are set to values in native byte-order before being
|
|
||||||
* written to the target-bound data blob, and may need endian conversion.
|
|
||||||
* This macro allows providing the correct value in situ more simply than
|
|
||||||
* writing a separate converter for *all fields* of *all records* included
|
|
||||||
* in union bpf_attr. Note that sizeof(rval) should match the assignment
|
|
||||||
* target to avoid runtime problems.
|
|
||||||
*/
|
|
||||||
#define tgt_endian(rval) ({ \
|
|
||||||
typeof(rval) _val = (rval); \
|
|
||||||
if (gen->swapped_endian) { \
|
|
||||||
switch (sizeof(_val)) { \
|
|
||||||
case 1: break; \
|
|
||||||
case 2: _val = bswap_16(_val); break; \
|
|
||||||
case 4: _val = bswap_32(_val); break; \
|
|
||||||
case 8: _val = bswap_64(_val); break; \
|
|
||||||
default: pr_warn("unsupported bswap size!\n"); \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
_val; \
|
|
||||||
})
|
|
||||||
|
|
||||||
void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
|
void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
|
||||||
__u32 btf_raw_size)
|
__u32 btf_raw_size)
|
||||||
{
|
{
|
||||||
@@ -454,12 +422,11 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
|
|||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
|
|
||||||
memset(&attr, 0, attr_size);
|
memset(&attr, 0, attr_size);
|
||||||
|
pr_debug("gen: load_btf: size %d\n", btf_raw_size);
|
||||||
btf_data = add_data(gen, btf_raw_data, btf_raw_size);
|
btf_data = add_data(gen, btf_raw_data, btf_raw_size);
|
||||||
|
|
||||||
attr.btf_size = tgt_endian(btf_raw_size);
|
attr.btf_size = btf_raw_size;
|
||||||
btf_load_attr = add_data(gen, &attr, attr_size);
|
btf_load_attr = add_data(gen, &attr, attr_size);
|
||||||
pr_debug("gen: load_btf: off %d size %d, attr: off %d size %d\n",
|
|
||||||
btf_data, btf_raw_size, btf_load_attr, attr_size);
|
|
||||||
|
|
||||||
/* populate union bpf_attr with user provided log details */
|
/* populate union bpf_attr with user provided log details */
|
||||||
move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
|
move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
|
||||||
@@ -490,29 +457,28 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
|||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
|
|
||||||
memset(&attr, 0, attr_size);
|
memset(&attr, 0, attr_size);
|
||||||
attr.map_type = tgt_endian(map_type);
|
attr.map_type = map_type;
|
||||||
attr.key_size = tgt_endian(key_size);
|
attr.key_size = key_size;
|
||||||
attr.value_size = tgt_endian(value_size);
|
attr.value_size = value_size;
|
||||||
attr.map_flags = tgt_endian(map_attr->map_flags);
|
attr.map_flags = map_attr->map_flags;
|
||||||
attr.map_extra = tgt_endian(map_attr->map_extra);
|
attr.map_extra = map_attr->map_extra;
|
||||||
if (map_name)
|
if (map_name)
|
||||||
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
|
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
|
||||||
attr.numa_node = tgt_endian(map_attr->numa_node);
|
attr.numa_node = map_attr->numa_node;
|
||||||
attr.map_ifindex = tgt_endian(map_attr->map_ifindex);
|
attr.map_ifindex = map_attr->map_ifindex;
|
||||||
attr.max_entries = tgt_endian(max_entries);
|
attr.max_entries = max_entries;
|
||||||
attr.btf_key_type_id = tgt_endian(map_attr->btf_key_type_id);
|
attr.btf_key_type_id = map_attr->btf_key_type_id;
|
||||||
attr.btf_value_type_id = tgt_endian(map_attr->btf_value_type_id);
|
attr.btf_value_type_id = map_attr->btf_value_type_id;
|
||||||
|
|
||||||
|
pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
|
||||||
|
attr.map_name, map_idx, map_type, attr.btf_value_type_id);
|
||||||
|
|
||||||
map_create_attr = add_data(gen, &attr, attr_size);
|
map_create_attr = add_data(gen, &attr, attr_size);
|
||||||
pr_debug("gen: map_create: %s idx %d type %d value_type_id %d, attr: off %d size %d\n",
|
if (attr.btf_value_type_id)
|
||||||
map_name, map_idx, map_type, map_attr->btf_value_type_id,
|
|
||||||
map_create_attr, attr_size);
|
|
||||||
|
|
||||||
if (map_attr->btf_value_type_id)
|
|
||||||
/* populate union bpf_attr with btf_fd saved in the stack earlier */
|
/* populate union bpf_attr with btf_fd saved in the stack earlier */
|
||||||
move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
|
move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
|
||||||
stack_off(btf_fd));
|
stack_off(btf_fd));
|
||||||
switch (map_type) {
|
switch (attr.map_type) {
|
||||||
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
|
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
|
||||||
case BPF_MAP_TYPE_HASH_OF_MAPS:
|
case BPF_MAP_TYPE_HASH_OF_MAPS:
|
||||||
move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
|
move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
|
||||||
@@ -532,8 +498,8 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
|||||||
/* emit MAP_CREATE command */
|
/* emit MAP_CREATE command */
|
||||||
emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
|
emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
|
||||||
debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
|
debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
|
||||||
map_name, map_idx, map_type, value_size,
|
attr.map_name, map_idx, map_type, value_size,
|
||||||
map_attr->btf_value_type_id);
|
attr.btf_value_type_id);
|
||||||
emit_check_err(gen);
|
emit_check_err(gen);
|
||||||
/* remember map_fd in the stack, if successful */
|
/* remember map_fd in the stack, if successful */
|
||||||
if (map_idx < 0) {
|
if (map_idx < 0) {
|
||||||
@@ -818,12 +784,12 @@ log:
|
|||||||
emit_ksym_relo_log(gen, relo, kdesc->ref);
|
emit_ksym_relo_log(gen, relo, kdesc->ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __u32 src_reg_mask(struct bpf_gen *gen)
|
static __u32 src_reg_mask(void)
|
||||||
{
|
{
|
||||||
#if defined(__LITTLE_ENDIAN_BITFIELD) /* src_reg,dst_reg,... */
|
#if defined(__LITTLE_ENDIAN_BITFIELD)
|
||||||
return gen->swapped_endian ? 0xf0 : 0x0f;
|
return 0x0f; /* src_reg,dst_reg,... */
|
||||||
#elif defined(__BIG_ENDIAN_BITFIELD) /* dst_reg,src_reg,... */
|
#elif defined(__BIG_ENDIAN_BITFIELD)
|
||||||
return gen->swapped_endian ? 0x0f : 0xf0;
|
return 0xf0; /* dst_reg,src_reg,... */
|
||||||
#else
|
#else
|
||||||
#error "Unsupported bit endianness, cannot proceed"
|
#error "Unsupported bit endianness, cannot proceed"
|
||||||
#endif
|
#endif
|
||||||
@@ -874,7 +840,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
|
|||||||
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
|
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
|
||||||
clear_src_reg:
|
clear_src_reg:
|
||||||
/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
|
/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
|
||||||
reg_mask = src_reg_mask(gen);
|
reg_mask = src_reg_mask();
|
||||||
emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
|
emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
|
||||||
emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
|
emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
|
||||||
emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
|
emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
|
||||||
@@ -965,94 +931,48 @@ static void cleanup_relos(struct bpf_gen *gen, int insns)
|
|||||||
cleanup_core_relo(gen);
|
cleanup_core_relo(gen);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Convert func, line, and core relo info blobs to target endianness */
|
|
||||||
static void info_blob_bswap(struct bpf_gen *gen, int func_info, int line_info,
|
|
||||||
int core_relos, struct bpf_prog_load_opts *load_attr)
|
|
||||||
{
|
|
||||||
struct bpf_func_info *fi = gen->data_start + func_info;
|
|
||||||
struct bpf_line_info *li = gen->data_start + line_info;
|
|
||||||
struct bpf_core_relo *cr = gen->data_start + core_relos;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < load_attr->func_info_cnt; i++)
|
|
||||||
bpf_func_info_bswap(fi++);
|
|
||||||
|
|
||||||
for (i = 0; i < load_attr->line_info_cnt; i++)
|
|
||||||
bpf_line_info_bswap(li++);
|
|
||||||
|
|
||||||
for (i = 0; i < gen->core_relo_cnt; i++)
|
|
||||||
bpf_core_relo_bswap(cr++);
|
|
||||||
}
|
|
||||||
|
|
||||||
void bpf_gen__prog_load(struct bpf_gen *gen,
|
void bpf_gen__prog_load(struct bpf_gen *gen,
|
||||||
enum bpf_prog_type prog_type, const char *prog_name,
|
enum bpf_prog_type prog_type, const char *prog_name,
|
||||||
const char *license, struct bpf_insn *insns, size_t insn_cnt,
|
const char *license, struct bpf_insn *insns, size_t insn_cnt,
|
||||||
struct bpf_prog_load_opts *load_attr, int prog_idx)
|
struct bpf_prog_load_opts *load_attr, int prog_idx)
|
||||||
{
|
{
|
||||||
int func_info_tot_sz = load_attr->func_info_cnt *
|
|
||||||
load_attr->func_info_rec_size;
|
|
||||||
int line_info_tot_sz = load_attr->line_info_cnt *
|
|
||||||
load_attr->line_info_rec_size;
|
|
||||||
int core_relo_tot_sz = gen->core_relo_cnt *
|
|
||||||
sizeof(struct bpf_core_relo);
|
|
||||||
int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
|
int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
|
||||||
int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
|
int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
|
||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
|
|
||||||
memset(&attr, 0, attr_size);
|
memset(&attr, 0, attr_size);
|
||||||
|
pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n",
|
||||||
|
prog_type, insn_cnt, prog_idx);
|
||||||
/* add license string to blob of bytes */
|
/* add license string to blob of bytes */
|
||||||
license_off = add_data(gen, license, strlen(license) + 1);
|
license_off = add_data(gen, license, strlen(license) + 1);
|
||||||
/* add insns to blob of bytes */
|
/* add insns to blob of bytes */
|
||||||
insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
|
insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
|
||||||
pr_debug("gen: prog_load: prog_idx %d type %d insn off %d insns_cnt %zd license off %d\n",
|
|
||||||
prog_idx, prog_type, insns_off, insn_cnt, license_off);
|
|
||||||
|
|
||||||
/* convert blob insns to target endianness */
|
attr.prog_type = prog_type;
|
||||||
if (gen->swapped_endian) {
|
attr.expected_attach_type = load_attr->expected_attach_type;
|
||||||
struct bpf_insn *insn = gen->data_start + insns_off;
|
attr.attach_btf_id = load_attr->attach_btf_id;
|
||||||
int i;
|
attr.prog_ifindex = load_attr->prog_ifindex;
|
||||||
|
|
||||||
for (i = 0; i < insn_cnt; i++, insn++)
|
|
||||||
bpf_insn_bswap(insn);
|
|
||||||
}
|
|
||||||
|
|
||||||
attr.prog_type = tgt_endian(prog_type);
|
|
||||||
attr.expected_attach_type = tgt_endian(load_attr->expected_attach_type);
|
|
||||||
attr.attach_btf_id = tgt_endian(load_attr->attach_btf_id);
|
|
||||||
attr.prog_ifindex = tgt_endian(load_attr->prog_ifindex);
|
|
||||||
attr.kern_version = 0;
|
attr.kern_version = 0;
|
||||||
attr.insn_cnt = tgt_endian((__u32)insn_cnt);
|
attr.insn_cnt = (__u32)insn_cnt;
|
||||||
attr.prog_flags = tgt_endian(load_attr->prog_flags);
|
attr.prog_flags = load_attr->prog_flags;
|
||||||
|
|
||||||
attr.func_info_rec_size = tgt_endian(load_attr->func_info_rec_size);
|
attr.func_info_rec_size = load_attr->func_info_rec_size;
|
||||||
attr.func_info_cnt = tgt_endian(load_attr->func_info_cnt);
|
attr.func_info_cnt = load_attr->func_info_cnt;
|
||||||
func_info = add_data(gen, load_attr->func_info, func_info_tot_sz);
|
func_info = add_data(gen, load_attr->func_info,
|
||||||
pr_debug("gen: prog_load: func_info: off %d cnt %d rec size %d\n",
|
attr.func_info_cnt * attr.func_info_rec_size);
|
||||||
func_info, load_attr->func_info_cnt,
|
|
||||||
load_attr->func_info_rec_size);
|
|
||||||
|
|
||||||
attr.line_info_rec_size = tgt_endian(load_attr->line_info_rec_size);
|
attr.line_info_rec_size = load_attr->line_info_rec_size;
|
||||||
attr.line_info_cnt = tgt_endian(load_attr->line_info_cnt);
|
attr.line_info_cnt = load_attr->line_info_cnt;
|
||||||
line_info = add_data(gen, load_attr->line_info, line_info_tot_sz);
|
line_info = add_data(gen, load_attr->line_info,
|
||||||
pr_debug("gen: prog_load: line_info: off %d cnt %d rec size %d\n",
|
attr.line_info_cnt * attr.line_info_rec_size);
|
||||||
line_info, load_attr->line_info_cnt,
|
|
||||||
load_attr->line_info_rec_size);
|
|
||||||
|
|
||||||
attr.core_relo_rec_size = tgt_endian((__u32)sizeof(struct bpf_core_relo));
|
attr.core_relo_rec_size = sizeof(struct bpf_core_relo);
|
||||||
attr.core_relo_cnt = tgt_endian(gen->core_relo_cnt);
|
attr.core_relo_cnt = gen->core_relo_cnt;
|
||||||
core_relos = add_data(gen, gen->core_relos, core_relo_tot_sz);
|
core_relos = add_data(gen, gen->core_relos,
|
||||||
pr_debug("gen: prog_load: core_relos: off %d cnt %d rec size %zd\n",
|
attr.core_relo_cnt * attr.core_relo_rec_size);
|
||||||
core_relos, gen->core_relo_cnt,
|
|
||||||
sizeof(struct bpf_core_relo));
|
|
||||||
|
|
||||||
/* convert all info blobs to target endianness */
|
|
||||||
if (gen->swapped_endian)
|
|
||||||
info_blob_bswap(gen, func_info, line_info, core_relos, load_attr);
|
|
||||||
|
|
||||||
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
|
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
|
||||||
prog_load_attr = add_data(gen, &attr, attr_size);
|
prog_load_attr = add_data(gen, &attr, attr_size);
|
||||||
pr_debug("gen: prog_load: attr: off %d size %d\n",
|
|
||||||
prog_load_attr, attr_size);
|
|
||||||
|
|
||||||
/* populate union bpf_attr with a pointer to license */
|
/* populate union bpf_attr with a pointer to license */
|
||||||
emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
|
emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
|
||||||
@@ -1120,6 +1040,7 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
|
|||||||
int zero = 0;
|
int zero = 0;
|
||||||
|
|
||||||
memset(&attr, 0, attr_size);
|
memset(&attr, 0, attr_size);
|
||||||
|
pr_debug("gen: map_update_elem: idx %d\n", map_idx);
|
||||||
|
|
||||||
value = add_data(gen, pvalue, value_size);
|
value = add_data(gen, pvalue, value_size);
|
||||||
key = add_data(gen, &zero, sizeof(zero));
|
key = add_data(gen, &zero, sizeof(zero));
|
||||||
@@ -1147,8 +1068,6 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
|
|||||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
|
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
|
||||||
|
|
||||||
map_update_attr = add_data(gen, &attr, attr_size);
|
map_update_attr = add_data(gen, &attr, attr_size);
|
||||||
pr_debug("gen: map_update_elem: idx %d, value: off %d size %d, attr: off %d size %d\n",
|
|
||||||
map_idx, value, value_size, map_update_attr, attr_size);
|
|
||||||
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
||||||
blob_fd_array_off(gen, map_idx));
|
blob_fd_array_off(gen, map_idx));
|
||||||
emit_rel_store(gen, attr_field(map_update_attr, key), key);
|
emit_rel_store(gen, attr_field(map_update_attr, key), key);
|
||||||
@@ -1165,16 +1084,14 @@ void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slo
|
|||||||
int attr_size = offsetofend(union bpf_attr, flags);
|
int attr_size = offsetofend(union bpf_attr, flags);
|
||||||
int map_update_attr, key;
|
int map_update_attr, key;
|
||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
int tgt_slot;
|
|
||||||
|
|
||||||
memset(&attr, 0, attr_size);
|
memset(&attr, 0, attr_size);
|
||||||
|
pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n",
|
||||||
|
outer_map_idx, slot, inner_map_idx);
|
||||||
|
|
||||||
tgt_slot = tgt_endian(slot);
|
key = add_data(gen, &slot, sizeof(slot));
|
||||||
key = add_data(gen, &tgt_slot, sizeof(tgt_slot));
|
|
||||||
|
|
||||||
map_update_attr = add_data(gen, &attr, attr_size);
|
map_update_attr = add_data(gen, &attr, attr_size);
|
||||||
pr_debug("gen: populate_outer_map: outer %d key %d inner %d, attr: off %d size %d\n",
|
|
||||||
outer_map_idx, slot, inner_map_idx, map_update_attr, attr_size);
|
|
||||||
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
||||||
blob_fd_array_off(gen, outer_map_idx));
|
blob_fd_array_off(gen, outer_map_idx));
|
||||||
emit_rel_store(gen, attr_field(map_update_attr, key), key);
|
emit_rel_store(gen, attr_field(map_update_attr, key), key);
|
||||||
@@ -1195,9 +1112,8 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
|
|||||||
union bpf_attr attr;
|
union bpf_attr attr;
|
||||||
|
|
||||||
memset(&attr, 0, attr_size);
|
memset(&attr, 0, attr_size);
|
||||||
|
pr_debug("gen: map_freeze: idx %d\n", map_idx);
|
||||||
map_freeze_attr = add_data(gen, &attr, attr_size);
|
map_freeze_attr = add_data(gen, &attr, attr_size);
|
||||||
pr_debug("gen: map_freeze: idx %d, attr: off %d size %d\n",
|
|
||||||
map_idx, map_freeze_attr, attr_size);
|
|
||||||
move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
|
move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
|
||||||
blob_fd_array_off(gen, map_idx));
|
blob_fd_array_off(gen, map_idx));
|
||||||
/* emit MAP_FREEZE command */
|
/* emit MAP_FREEZE command */
|
||||||
|
|||||||
@@ -166,8 +166,8 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
|
|||||||
* @bkt: integer used as a bucket loop cursor
|
* @bkt: integer used as a bucket loop cursor
|
||||||
*/
|
*/
|
||||||
#define hashmap__for_each_entry(map, cur, bkt) \
|
#define hashmap__for_each_entry(map, cur, bkt) \
|
||||||
for (bkt = 0; bkt < (map)->cap; bkt++) \
|
for (bkt = 0; bkt < map->cap; bkt++) \
|
||||||
for (cur = (map)->buckets[bkt]; cur; cur = cur->next)
|
for (cur = map->buckets[bkt]; cur; cur = cur->next)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe
|
* hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe
|
||||||
@@ -178,8 +178,8 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
|
|||||||
* @bkt: integer used as a bucket loop cursor
|
* @bkt: integer used as a bucket loop cursor
|
||||||
*/
|
*/
|
||||||
#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
|
#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
|
||||||
for (bkt = 0; bkt < (map)->cap; bkt++) \
|
for (bkt = 0; bkt < map->cap; bkt++) \
|
||||||
for (cur = (map)->buckets[bkt]; \
|
for (cur = map->buckets[bkt]; \
|
||||||
cur && ({tmp = cur->next; true; }); \
|
cur && ({tmp = cur->next; true; }); \
|
||||||
cur = tmp)
|
cur = tmp)
|
||||||
|
|
||||||
@@ -190,19 +190,19 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
|
|||||||
* @key: key to iterate entries for
|
* @key: key to iterate entries for
|
||||||
*/
|
*/
|
||||||
#define hashmap__for_each_key_entry(map, cur, _key) \
|
#define hashmap__for_each_key_entry(map, cur, _key) \
|
||||||
for (cur = (map)->buckets \
|
for (cur = map->buckets \
|
||||||
? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
|
? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
|
||||||
: NULL; \
|
: NULL; \
|
||||||
cur; \
|
cur; \
|
||||||
cur = cur->next) \
|
cur = cur->next) \
|
||||||
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
|
if (map->equal_fn(cur->key, (_key), map->ctx))
|
||||||
|
|
||||||
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
|
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
|
||||||
for (cur = (map)->buckets \
|
for (cur = map->buckets \
|
||||||
? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
|
? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
|
||||||
: NULL; \
|
: NULL; \
|
||||||
cur && ({ tmp = cur->next; true; }); \
|
cur && ({ tmp = cur->next; true; }); \
|
||||||
cur = tmp) \
|
cur = tmp) \
|
||||||
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
|
if (map->equal_fn(cur->key, (_key), map->ctx))
|
||||||
|
|
||||||
#endif /* __LIBBPF_HASHMAP_H */
|
#endif /* __LIBBPF_HASHMAP_H */
|
||||||
|
|||||||
3498
src/libbpf.c
3498
src/libbpf.c
File diff suppressed because it is too large
Load Diff
279
src/libbpf.h
279
src/libbpf.h
@@ -98,10 +98,7 @@ typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief **libbpf_set_print()** sets user-provided log callback function to
|
* @brief **libbpf_set_print()** sets user-provided log callback function to
|
||||||
* be used for libbpf warnings and informational messages. If the user callback
|
* be used for libbpf warnings and informational messages.
|
||||||
* is not set, messages are logged to stderr by default. The verbosity of these
|
|
||||||
* messages can be controlled by setting the environment variable
|
|
||||||
* LIBBPF_LOG_LEVEL to either warn, info, or debug.
|
|
||||||
* @param fn The log print function. If NULL, libbpf won't print anything.
|
* @param fn The log print function. If NULL, libbpf won't print anything.
|
||||||
* @return Pointer to old print function.
|
* @return Pointer to old print function.
|
||||||
*
|
*
|
||||||
@@ -152,7 +149,7 @@ struct bpf_object_open_opts {
|
|||||||
* log_buf and log_level settings.
|
* log_buf and log_level settings.
|
||||||
*
|
*
|
||||||
* If specified, this log buffer will be passed for:
|
* If specified, this log buffer will be passed for:
|
||||||
* - each BPF progral load (BPF_PROG_LOAD) attempt, unless overridden
|
* - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden
|
||||||
* with bpf_program__set_log() on per-program level, to get
|
* with bpf_program__set_log() on per-program level, to get
|
||||||
* BPF verifier log output.
|
* BPF verifier log output.
|
||||||
* - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get
|
* - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get
|
||||||
@@ -180,29 +177,10 @@ struct bpf_object_open_opts {
|
|||||||
* logs through its print callback.
|
* logs through its print callback.
|
||||||
*/
|
*/
|
||||||
__u32 kernel_log_level;
|
__u32 kernel_log_level;
|
||||||
/* Path to BPF FS mount point to derive BPF token from.
|
|
||||||
*
|
|
||||||
* Created BPF token will be used for all bpf() syscall operations
|
|
||||||
* that accept BPF token (e.g., map creation, BTF and program loads,
|
|
||||||
* etc) automatically within instantiated BPF object.
|
|
||||||
*
|
|
||||||
* If bpf_token_path is not specified, libbpf will consult
|
|
||||||
* LIBBPF_BPF_TOKEN_PATH environment variable. If set, it will be
|
|
||||||
* taken as a value of bpf_token_path option and will force libbpf to
|
|
||||||
* either create BPF token from provided custom BPF FS path, or will
|
|
||||||
* disable implicit BPF token creation, if envvar value is an empty
|
|
||||||
* string. bpf_token_path overrides LIBBPF_BPF_TOKEN_PATH, if both are
|
|
||||||
* set at the same time.
|
|
||||||
*
|
|
||||||
* Setting bpf_token_path option to empty string disables libbpf's
|
|
||||||
* automatic attempt to create BPF token from default BPF FS mount
|
|
||||||
* point (/sys/fs/bpf), in case this default behavior is undesirable.
|
|
||||||
*/
|
|
||||||
const char *bpf_token_path;
|
|
||||||
|
|
||||||
size_t :0;
|
size_t :0;
|
||||||
};
|
};
|
||||||
#define bpf_object_open_opts__last_field bpf_token_path
|
#define bpf_object_open_opts__last_field kernel_log_level
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief **bpf_object__open()** creates a bpf_object by opening
|
* @brief **bpf_object__open()** creates a bpf_object by opening
|
||||||
@@ -241,19 +219,6 @@ LIBBPF_API struct bpf_object *
|
|||||||
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
|
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
|
||||||
const struct bpf_object_open_opts *opts);
|
const struct bpf_object_open_opts *opts);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **bpf_object__prepare()** prepares BPF object for loading:
|
|
||||||
* performs ELF processing, relocations, prepares final state of BPF program
|
|
||||||
* instructions (accessible with bpf_program__insns()), creates and
|
|
||||||
* (potentially) pins maps. Leaves BPF object in the state ready for program
|
|
||||||
* loading.
|
|
||||||
* @param obj Pointer to a valid BPF object instance returned by
|
|
||||||
* **bpf_object__open*()** API
|
|
||||||
* @return 0, on success; negative error code, otherwise, error code is
|
|
||||||
* stored in errno
|
|
||||||
*/
|
|
||||||
int bpf_object__prepare(struct bpf_object *obj);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief **bpf_object__load()** loads BPF object into kernel.
|
* @brief **bpf_object__load()** loads BPF object into kernel.
|
||||||
* @param obj Pointer to a valid BPF object instance returned by
|
* @param obj Pointer to a valid BPF object instance returned by
|
||||||
@@ -301,20 +266,11 @@ LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj,
|
|||||||
LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
|
LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
|
||||||
const char *path);
|
const char *path);
|
||||||
LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
|
LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
|
||||||
LIBBPF_API int bpf_object__unpin(struct bpf_object *object, const char *path);
|
|
||||||
|
|
||||||
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
|
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
|
||||||
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
|
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
|
||||||
LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version);
|
LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **bpf_object__token_fd** is an accessor for BPF token FD associated
|
|
||||||
* with BPF object.
|
|
||||||
* @param obj Pointer to a valid BPF object
|
|
||||||
* @return BPF token FD or -1, if it wasn't set
|
|
||||||
*/
|
|
||||||
LIBBPF_API int bpf_object__token_fd(const struct bpf_object *obj);
|
|
||||||
|
|
||||||
struct btf;
|
struct btf;
|
||||||
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
|
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
|
||||||
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
|
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
|
||||||
@@ -476,7 +432,7 @@ LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
|
|||||||
/**
|
/**
|
||||||
* @brief **bpf_program__attach()** is a generic function for attaching
|
* @brief **bpf_program__attach()** is a generic function for attaching
|
||||||
* a BPF program based on auto-detection of program type, attach type,
|
* a BPF program based on auto-detection of program type, attach type,
|
||||||
* and extra parameters, where applicable.
|
* and extra paremeters, where applicable.
|
||||||
*
|
*
|
||||||
* @param prog BPF program to attach
|
* @param prog BPF program to attach
|
||||||
* @return Reference to the newly created BPF link; or NULL is returned on error,
|
* @return Reference to the newly created BPF link; or NULL is returned on error,
|
||||||
@@ -563,73 +519,16 @@ struct bpf_kprobe_multi_opts {
|
|||||||
size_t cnt;
|
size_t cnt;
|
||||||
/* create return kprobes */
|
/* create return kprobes */
|
||||||
bool retprobe;
|
bool retprobe;
|
||||||
/* create session kprobes */
|
|
||||||
bool session;
|
|
||||||
/* enforce unique match */
|
|
||||||
bool unique_match;
|
|
||||||
size_t :0;
|
size_t :0;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define bpf_kprobe_multi_opts__last_field unique_match
|
#define bpf_kprobe_multi_opts__last_field retprobe
|
||||||
|
|
||||||
LIBBPF_API struct bpf_link *
|
LIBBPF_API struct bpf_link *
|
||||||
bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
|
bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
|
||||||
const char *pattern,
|
const char *pattern,
|
||||||
const struct bpf_kprobe_multi_opts *opts);
|
const struct bpf_kprobe_multi_opts *opts);
|
||||||
|
|
||||||
struct bpf_uprobe_multi_opts {
|
|
||||||
/* size of this struct, for forward/backward compatibility */
|
|
||||||
size_t sz;
|
|
||||||
/* array of function symbols to attach to */
|
|
||||||
const char **syms;
|
|
||||||
/* array of function addresses to attach to */
|
|
||||||
const unsigned long *offsets;
|
|
||||||
/* optional, array of associated ref counter offsets */
|
|
||||||
const unsigned long *ref_ctr_offsets;
|
|
||||||
/* optional, array of associated BPF cookies */
|
|
||||||
const __u64 *cookies;
|
|
||||||
/* number of elements in syms/addrs/cookies arrays */
|
|
||||||
size_t cnt;
|
|
||||||
/* create return uprobes */
|
|
||||||
bool retprobe;
|
|
||||||
/* create session kprobes */
|
|
||||||
bool session;
|
|
||||||
size_t :0;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define bpf_uprobe_multi_opts__last_field session
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **bpf_program__attach_uprobe_multi()** attaches a BPF program
|
|
||||||
* to multiple uprobes with uprobe_multi link.
|
|
||||||
*
|
|
||||||
* User can specify 2 mutually exclusive set of inputs:
|
|
||||||
*
|
|
||||||
* 1) use only path/func_pattern/pid arguments
|
|
||||||
*
|
|
||||||
* 2) use path/pid with allowed combinations of
|
|
||||||
* syms/offsets/ref_ctr_offsets/cookies/cnt
|
|
||||||
*
|
|
||||||
* - syms and offsets are mutually exclusive
|
|
||||||
* - ref_ctr_offsets and cookies are optional
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* @param prog BPF program to attach
|
|
||||||
* @param pid Process ID to attach the uprobe to, 0 for self (own process),
|
|
||||||
* -1 for all processes
|
|
||||||
* @param binary_path Path to binary
|
|
||||||
* @param func_pattern Regular expression to specify functions to attach
|
|
||||||
* BPF program to
|
|
||||||
* @param opts Additional options (see **struct bpf_uprobe_multi_opts**)
|
|
||||||
* @return 0, on success; negative error code, otherwise
|
|
||||||
*/
|
|
||||||
LIBBPF_API struct bpf_link *
|
|
||||||
bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
|
|
||||||
pid_t pid,
|
|
||||||
const char *binary_path,
|
|
||||||
const char *func_pattern,
|
|
||||||
const struct bpf_uprobe_multi_opts *opts);
|
|
||||||
|
|
||||||
struct bpf_ksyscall_opts {
|
struct bpf_ksyscall_opts {
|
||||||
/* size of this struct, for forward/backward compatibility */
|
/* size of this struct, for forward/backward compatibility */
|
||||||
size_t sz;
|
size_t sz;
|
||||||
@@ -704,7 +603,7 @@ struct bpf_uprobe_opts {
|
|||||||
/**
|
/**
|
||||||
* @brief **bpf_program__attach_uprobe()** attaches a BPF program
|
* @brief **bpf_program__attach_uprobe()** attaches a BPF program
|
||||||
* to the userspace function which is found by binary path and
|
* to the userspace function which is found by binary path and
|
||||||
* offset. You can optionally specify a particular process to attach
|
* offset. You can optionally specify a particular proccess to attach
|
||||||
* to. You can also optionally attach the program to the function
|
* to. You can also optionally attach the program to the function
|
||||||
* exit instead of entry.
|
* exit instead of entry.
|
||||||
*
|
*
|
||||||
@@ -790,20 +689,9 @@ bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
|
|||||||
const char *tp_name,
|
const char *tp_name,
|
||||||
const struct bpf_tracepoint_opts *opts);
|
const struct bpf_tracepoint_opts *opts);
|
||||||
|
|
||||||
struct bpf_raw_tracepoint_opts {
|
|
||||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
|
||||||
__u64 cookie;
|
|
||||||
size_t :0;
|
|
||||||
};
|
|
||||||
#define bpf_raw_tracepoint_opts__last_field cookie
|
|
||||||
|
|
||||||
LIBBPF_API struct bpf_link *
|
LIBBPF_API struct bpf_link *
|
||||||
bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
|
bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
|
||||||
const char *tp_name);
|
const char *tp_name);
|
||||||
LIBBPF_API struct bpf_link *
|
|
||||||
bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog,
|
|
||||||
const char *tp_name,
|
|
||||||
struct bpf_raw_tracepoint_opts *opts);
|
|
||||||
|
|
||||||
struct bpf_trace_opts {
|
struct bpf_trace_opts {
|
||||||
/* size of this struct, for forward/backward compatibility */
|
/* size of this struct, for forward/backward compatibility */
|
||||||
@@ -825,8 +713,6 @@ bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd);
|
|||||||
LIBBPF_API struct bpf_link *
|
LIBBPF_API struct bpf_link *
|
||||||
bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd);
|
bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd);
|
||||||
LIBBPF_API struct bpf_link *
|
LIBBPF_API struct bpf_link *
|
||||||
bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd);
|
|
||||||
LIBBPF_API struct bpf_link *
|
|
||||||
bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex);
|
bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex);
|
||||||
LIBBPF_API struct bpf_link *
|
LIBBPF_API struct bpf_link *
|
||||||
bpf_program__attach_freplace(const struct bpf_program *prog,
|
bpf_program__attach_freplace(const struct bpf_program *prog,
|
||||||
@@ -847,36 +733,6 @@ LIBBPF_API struct bpf_link *
|
|||||||
bpf_program__attach_netfilter(const struct bpf_program *prog,
|
bpf_program__attach_netfilter(const struct bpf_program *prog,
|
||||||
const struct bpf_netfilter_opts *opts);
|
const struct bpf_netfilter_opts *opts);
|
||||||
|
|
||||||
struct bpf_tcx_opts {
|
|
||||||
/* size of this struct, for forward/backward compatibility */
|
|
||||||
size_t sz;
|
|
||||||
__u32 flags;
|
|
||||||
__u32 relative_fd;
|
|
||||||
__u32 relative_id;
|
|
||||||
__u64 expected_revision;
|
|
||||||
size_t :0;
|
|
||||||
};
|
|
||||||
#define bpf_tcx_opts__last_field expected_revision
|
|
||||||
|
|
||||||
LIBBPF_API struct bpf_link *
|
|
||||||
bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
|
|
||||||
const struct bpf_tcx_opts *opts);
|
|
||||||
|
|
||||||
struct bpf_netkit_opts {
|
|
||||||
/* size of this struct, for forward/backward compatibility */
|
|
||||||
size_t sz;
|
|
||||||
__u32 flags;
|
|
||||||
__u32 relative_fd;
|
|
||||||
__u32 relative_id;
|
|
||||||
__u64 expected_revision;
|
|
||||||
size_t :0;
|
|
||||||
};
|
|
||||||
#define bpf_netkit_opts__last_field expected_revision
|
|
||||||
|
|
||||||
LIBBPF_API struct bpf_link *
|
|
||||||
bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
|
|
||||||
const struct bpf_netkit_opts *opts);
|
|
||||||
|
|
||||||
struct bpf_map;
|
struct bpf_map;
|
||||||
|
|
||||||
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
|
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
|
||||||
@@ -940,12 +796,6 @@ LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_le
|
|||||||
LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size);
|
LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size);
|
||||||
LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size);
|
LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size);
|
||||||
|
|
||||||
LIBBPF_API struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog);
|
|
||||||
LIBBPF_API __u32 bpf_program__func_info_cnt(const struct bpf_program *prog);
|
|
||||||
|
|
||||||
LIBBPF_API struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog);
|
|
||||||
LIBBPF_API __u32 bpf_program__line_info_cnt(const struct bpf_program *prog);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief **bpf_program__set_attach_target()** sets BTF-based attach target
|
* @brief **bpf_program__set_attach_target()** sets BTF-based attach target
|
||||||
* for supported BPF program types:
|
* for supported BPF program types:
|
||||||
@@ -1009,23 +859,6 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
|
|||||||
LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
|
LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
|
||||||
LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
|
LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach
|
|
||||||
* map during BPF skeleton attach phase.
|
|
||||||
* @param map the BPF map instance
|
|
||||||
* @param autoattach whether to attach map during BPF skeleton attach phase
|
|
||||||
* @return 0 on success; negative error code, otherwise
|
|
||||||
*/
|
|
||||||
LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **bpf_map__autoattach()** returns whether BPF map is configured to
|
|
||||||
* auto-attach during BPF skeleton attach phase.
|
|
||||||
* @param map the BPF map instance
|
|
||||||
* @return true if map is set to auto-attach during skeleton attach phase; false, otherwise
|
|
||||||
*/
|
|
||||||
LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief **bpf_map__fd()** gets the file descriptor of the passed
|
* @brief **bpf_map__fd()** gets the file descriptor of the passed
|
||||||
* BPF map
|
* BPF map
|
||||||
@@ -1080,7 +913,7 @@ LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
|
|||||||
|
|
||||||
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
|
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
|
||||||
const void *data, size_t size);
|
const void *data, size_t size);
|
||||||
LIBBPF_API void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize);
|
LIBBPF_API void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief **bpf_map__is_internal()** tells the caller whether or not the
|
* @brief **bpf_map__is_internal()** tells the caller whether or not the
|
||||||
@@ -1272,10 +1105,9 @@ struct bpf_xdp_query_opts {
|
|||||||
__u32 skb_prog_id; /* output */
|
__u32 skb_prog_id; /* output */
|
||||||
__u8 attach_mode; /* output */
|
__u8 attach_mode; /* output */
|
||||||
__u64 feature_flags; /* output */
|
__u64 feature_flags; /* output */
|
||||||
__u32 xdp_zc_max_segs; /* output */
|
|
||||||
size_t :0;
|
size_t :0;
|
||||||
};
|
};
|
||||||
#define bpf_xdp_query_opts__last_field xdp_zc_max_segs
|
#define bpf_xdp_query_opts__last_field feature_flags
|
||||||
|
|
||||||
LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags,
|
LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags,
|
||||||
const struct bpf_xdp_attach_opts *opts);
|
const struct bpf_xdp_attach_opts *opts);
|
||||||
@@ -1329,7 +1161,6 @@ LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook,
|
|||||||
|
|
||||||
/* Ring buffer APIs */
|
/* Ring buffer APIs */
|
||||||
struct ring_buffer;
|
struct ring_buffer;
|
||||||
struct ring;
|
|
||||||
struct user_ring_buffer;
|
struct user_ring_buffer;
|
||||||
|
|
||||||
typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
|
typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
|
||||||
@@ -1348,92 +1179,8 @@ LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd,
|
|||||||
ring_buffer_sample_fn sample_cb, void *ctx);
|
ring_buffer_sample_fn sample_cb, void *ctx);
|
||||||
LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
|
LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
|
||||||
LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb);
|
LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb);
|
||||||
LIBBPF_API int ring_buffer__consume_n(struct ring_buffer *rb, size_t n);
|
|
||||||
LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb);
|
LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **ring_buffer__ring()** returns the ringbuffer object inside a given
|
|
||||||
* ringbuffer manager representing a single BPF_MAP_TYPE_RINGBUF map instance.
|
|
||||||
*
|
|
||||||
* @param rb A ringbuffer manager object.
|
|
||||||
* @param idx An index into the ringbuffers contained within the ringbuffer
|
|
||||||
* manager object. The index is 0-based and corresponds to the order in which
|
|
||||||
* ring_buffer__add was called.
|
|
||||||
* @return A ringbuffer object on success; NULL and errno set if the index is
|
|
||||||
* invalid.
|
|
||||||
*/
|
|
||||||
LIBBPF_API struct ring *ring_buffer__ring(struct ring_buffer *rb,
|
|
||||||
unsigned int idx);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **ring__consumer_pos()** returns the current consumer position in the
|
|
||||||
* given ringbuffer.
|
|
||||||
*
|
|
||||||
* @param r A ringbuffer object.
|
|
||||||
* @return The current consumer position.
|
|
||||||
*/
|
|
||||||
LIBBPF_API unsigned long ring__consumer_pos(const struct ring *r);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **ring__producer_pos()** returns the current producer position in the
|
|
||||||
* given ringbuffer.
|
|
||||||
*
|
|
||||||
* @param r A ringbuffer object.
|
|
||||||
* @return The current producer position.
|
|
||||||
*/
|
|
||||||
LIBBPF_API unsigned long ring__producer_pos(const struct ring *r);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **ring__avail_data_size()** returns the number of bytes in the
|
|
||||||
* ringbuffer not yet consumed. This has no locking associated with it, so it
|
|
||||||
* can be inaccurate if operations are ongoing while this is called. However, it
|
|
||||||
* should still show the correct trend over the long-term.
|
|
||||||
*
|
|
||||||
* @param r A ringbuffer object.
|
|
||||||
* @return The number of bytes not yet consumed.
|
|
||||||
*/
|
|
||||||
LIBBPF_API size_t ring__avail_data_size(const struct ring *r);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **ring__size()** returns the total size of the ringbuffer's map data
|
|
||||||
* area (excluding special producer/consumer pages). Effectively this gives the
|
|
||||||
* amount of usable bytes of data inside the ringbuffer.
|
|
||||||
*
|
|
||||||
* @param r A ringbuffer object.
|
|
||||||
* @return The total size of the ringbuffer map data area.
|
|
||||||
*/
|
|
||||||
LIBBPF_API size_t ring__size(const struct ring *r);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **ring__map_fd()** returns the file descriptor underlying the given
|
|
||||||
* ringbuffer.
|
|
||||||
*
|
|
||||||
* @param r A ringbuffer object.
|
|
||||||
* @return The underlying ringbuffer file descriptor
|
|
||||||
*/
|
|
||||||
LIBBPF_API int ring__map_fd(const struct ring *r);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **ring__consume()** consumes available ringbuffer data without event
|
|
||||||
* polling.
|
|
||||||
*
|
|
||||||
* @param r A ringbuffer object.
|
|
||||||
* @return The number of records consumed (or INT_MAX, whichever is less), or
|
|
||||||
* a negative number if any of the callbacks return an error.
|
|
||||||
*/
|
|
||||||
LIBBPF_API int ring__consume(struct ring *r);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **ring__consume_n()** consumes up to a requested amount of items from
|
|
||||||
* a ringbuffer without event polling.
|
|
||||||
*
|
|
||||||
* @param r A ringbuffer object.
|
|
||||||
* @param n Maximum amount of items to consume.
|
|
||||||
* @return The number of items consumed, or a negative number if any of the
|
|
||||||
* callbacks return an error.
|
|
||||||
*/
|
|
||||||
LIBBPF_API int ring__consume_n(struct ring *r, size_t n);
|
|
||||||
|
|
||||||
struct user_ring_buffer_opts {
|
struct user_ring_buffer_opts {
|
||||||
size_t sz; /* size of this struct, for forward/backward compatibility */
|
size_t sz; /* size of this struct, for forward/backward compatibility */
|
||||||
};
|
};
|
||||||
@@ -1624,11 +1371,11 @@ LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_i
|
|||||||
* memory region of the ring buffer.
|
* memory region of the ring buffer.
|
||||||
* This ring buffer can be used to implement a custom events consumer.
|
* This ring buffer can be used to implement a custom events consumer.
|
||||||
* The ring buffer starts with the *struct perf_event_mmap_page*, which
|
* The ring buffer starts with the *struct perf_event_mmap_page*, which
|
||||||
* holds the ring buffer management fields, when accessing the header
|
* holds the ring buffer managment fields, when accessing the header
|
||||||
* structure it's important to be SMP aware.
|
* structure it's important to be SMP aware.
|
||||||
* You can refer to *perf_event_read_simple* for a simple example.
|
* You can refer to *perf_event_read_simple* for a simple example.
|
||||||
* @param pb the perf buffer structure
|
* @param pb the perf buffer structure
|
||||||
* @param buf_idx the buffer index to retrieve
|
* @param buf_idx the buffer index to retreive
|
||||||
* @param buf (out) gets the base pointer of the mmap()'ed memory
|
* @param buf (out) gets the base pointer of the mmap()'ed memory
|
||||||
* @param buf_size (out) gets the size of the mmap()'ed region
|
* @param buf_size (out) gets the size of the mmap()'ed region
|
||||||
* @return 0 on success, negative error code for failure
|
* @return 0 on success, negative error code for failure
|
||||||
@@ -1720,7 +1467,6 @@ struct bpf_map_skeleton {
|
|||||||
const char *name;
|
const char *name;
|
||||||
struct bpf_map **map;
|
struct bpf_map **map;
|
||||||
void **mmaped;
|
void **mmaped;
|
||||||
struct bpf_link **link;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bpf_prog_skeleton {
|
struct bpf_prog_skeleton {
|
||||||
@@ -1817,14 +1563,9 @@ struct bpf_linker_file_opts {
|
|||||||
struct bpf_linker;
|
struct bpf_linker;
|
||||||
|
|
||||||
LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
|
LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
|
||||||
LIBBPF_API struct bpf_linker *bpf_linker__new_fd(int fd, struct bpf_linker_opts *opts);
|
|
||||||
LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker,
|
LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker,
|
||||||
const char *filename,
|
const char *filename,
|
||||||
const struct bpf_linker_file_opts *opts);
|
const struct bpf_linker_file_opts *opts);
|
||||||
LIBBPF_API int bpf_linker__add_fd(struct bpf_linker *linker, int fd,
|
|
||||||
const struct bpf_linker_file_opts *opts);
|
|
||||||
LIBBPF_API int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz,
|
|
||||||
const struct bpf_linker_file_opts *opts);
|
|
||||||
LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
|
LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
|
||||||
LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);
|
LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);
|
||||||
|
|
||||||
|
|||||||
@@ -245,6 +245,7 @@ LIBBPF_0.3.0 {
|
|||||||
btf__parse_raw_split;
|
btf__parse_raw_split;
|
||||||
btf__parse_split;
|
btf__parse_split;
|
||||||
btf__new_empty_split;
|
btf__new_empty_split;
|
||||||
|
btf__new_split;
|
||||||
ring_buffer__epoll_fd;
|
ring_buffer__epoll_fd;
|
||||||
} LIBBPF_0.2.0;
|
} LIBBPF_0.2.0;
|
||||||
|
|
||||||
@@ -325,6 +326,7 @@ LIBBPF_0.7.0 {
|
|||||||
bpf_xdp_detach;
|
bpf_xdp_detach;
|
||||||
bpf_xdp_query;
|
bpf_xdp_query;
|
||||||
bpf_xdp_query_id;
|
bpf_xdp_query_id;
|
||||||
|
btf_ext__raw_data;
|
||||||
libbpf_probe_bpf_helper;
|
libbpf_probe_bpf_helper;
|
||||||
libbpf_probe_bpf_map_type;
|
libbpf_probe_bpf_map_type;
|
||||||
libbpf_probe_bpf_prog_type;
|
libbpf_probe_bpf_prog_type;
|
||||||
@@ -393,54 +395,5 @@ LIBBPF_1.2.0 {
|
|||||||
LIBBPF_1.3.0 {
|
LIBBPF_1.3.0 {
|
||||||
global:
|
global:
|
||||||
bpf_obj_pin_opts;
|
bpf_obj_pin_opts;
|
||||||
bpf_object__unpin;
|
|
||||||
bpf_prog_detach_opts;
|
|
||||||
bpf_program__attach_netfilter;
|
bpf_program__attach_netfilter;
|
||||||
bpf_program__attach_netkit;
|
|
||||||
bpf_program__attach_tcx;
|
|
||||||
bpf_program__attach_uprobe_multi;
|
|
||||||
ring__avail_data_size;
|
|
||||||
ring__consume;
|
|
||||||
ring__consumer_pos;
|
|
||||||
ring__map_fd;
|
|
||||||
ring__producer_pos;
|
|
||||||
ring__size;
|
|
||||||
ring_buffer__ring;
|
|
||||||
} LIBBPF_1.2.0;
|
} LIBBPF_1.2.0;
|
||||||
|
|
||||||
LIBBPF_1.4.0 {
|
|
||||||
global:
|
|
||||||
bpf_program__attach_raw_tracepoint_opts;
|
|
||||||
bpf_raw_tracepoint_open_opts;
|
|
||||||
bpf_token_create;
|
|
||||||
btf__new_split;
|
|
||||||
btf_ext__raw_data;
|
|
||||||
} LIBBPF_1.3.0;
|
|
||||||
|
|
||||||
LIBBPF_1.5.0 {
|
|
||||||
global:
|
|
||||||
btf__distill_base;
|
|
||||||
btf__relocate;
|
|
||||||
btf_ext__endianness;
|
|
||||||
btf_ext__set_endianness;
|
|
||||||
bpf_map__autoattach;
|
|
||||||
bpf_map__set_autoattach;
|
|
||||||
bpf_object__token_fd;
|
|
||||||
bpf_program__attach_sockmap;
|
|
||||||
ring__consume_n;
|
|
||||||
ring_buffer__consume_n;
|
|
||||||
} LIBBPF_1.4.0;
|
|
||||||
|
|
||||||
LIBBPF_1.6.0 {
|
|
||||||
global:
|
|
||||||
bpf_linker__add_buf;
|
|
||||||
bpf_linker__add_fd;
|
|
||||||
bpf_linker__new_fd;
|
|
||||||
bpf_object__prepare;
|
|
||||||
bpf_program__func_info;
|
|
||||||
bpf_program__func_info_cnt;
|
|
||||||
bpf_program__line_info;
|
|
||||||
bpf_program__line_info_cnt;
|
|
||||||
btf__add_decl_attr;
|
|
||||||
btf__add_type_attr;
|
|
||||||
} LIBBPF_1.5.0;
|
|
||||||
|
|||||||
@@ -70,23 +70,4 @@
|
|||||||
}; \
|
}; \
|
||||||
})
|
})
|
||||||
|
|
||||||
/* Helper macro to clear and optionally reinitialize libbpf options struct
|
|
||||||
*
|
|
||||||
* Small helper macro to reset all fields and to reinitialize the common
|
|
||||||
* structure size member. Values provided by users in struct initializer-
|
|
||||||
* syntax as varargs can be provided as well to reinitialize options struct
|
|
||||||
* specific members.
|
|
||||||
*/
|
|
||||||
#define LIBBPF_OPTS_RESET(NAME, ...) \
|
|
||||||
do { \
|
|
||||||
typeof(NAME) ___##NAME = ({ \
|
|
||||||
memset(&___##NAME, 0, sizeof(NAME)); \
|
|
||||||
(typeof(NAME)) { \
|
|
||||||
.sz = sizeof(NAME), \
|
|
||||||
__VA_ARGS__ \
|
|
||||||
}; \
|
|
||||||
}); \
|
|
||||||
memcpy(&NAME, &___##NAME, sizeof(NAME)); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#endif /* __LIBBPF_LIBBPF_COMMON_H */
|
#endif /* __LIBBPF_LIBBPF_COMMON_H */
|
||||||
|
|||||||
@@ -10,30 +10,13 @@
|
|||||||
#define __LIBBPF_LIBBPF_INTERNAL_H
|
#define __LIBBPF_LIBBPF_INTERNAL_H
|
||||||
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <byteswap.h>
|
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <sys/syscall.h>
|
|
||||||
#include <libelf.h>
|
|
||||||
#include "relo_core.h"
|
#include "relo_core.h"
|
||||||
|
|
||||||
/* Android's libc doesn't support AT_EACCESS in faccessat() implementation
|
|
||||||
* ([0]), and just returns -EINVAL even if file exists and is accessible.
|
|
||||||
* See [1] for issues caused by this.
|
|
||||||
*
|
|
||||||
* So just redefine it to 0 on Android.
|
|
||||||
*
|
|
||||||
* [0] https://android.googlesource.com/platform/bionic/+/refs/heads/android13-release/libc/bionic/faccessat.cpp#50
|
|
||||||
* [1] https://github.com/libbpf/libbpf-bootstrap/issues/250#issuecomment-1911324250
|
|
||||||
*/
|
|
||||||
#ifdef __ANDROID__
|
|
||||||
#undef AT_EACCESS
|
|
||||||
#define AT_EACCESS 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* make sure libbpf doesn't use kernel-only integer typedefs */
|
/* make sure libbpf doesn't use kernel-only integer typedefs */
|
||||||
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
|
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
|
||||||
|
|
||||||
@@ -235,9 +218,6 @@ struct btf_type;
|
|||||||
struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);
|
struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);
|
||||||
const char *btf_kind_str(const struct btf_type *t);
|
const char *btf_kind_str(const struct btf_type *t);
|
||||||
const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
|
const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
|
||||||
const struct btf_header *btf_header(const struct btf *btf);
|
|
||||||
void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
|
|
||||||
int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map);
|
|
||||||
|
|
||||||
static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
|
static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
|
||||||
{
|
{
|
||||||
@@ -374,42 +354,18 @@ enum kern_feature_id {
|
|||||||
FEAT_BTF_ENUM64,
|
FEAT_BTF_ENUM64,
|
||||||
/* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */
|
/* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */
|
||||||
FEAT_SYSCALL_WRAPPER,
|
FEAT_SYSCALL_WRAPPER,
|
||||||
/* BPF multi-uprobe link support */
|
|
||||||
FEAT_UPROBE_MULTI_LINK,
|
|
||||||
/* Kernel supports arg:ctx tag (__arg_ctx) for global subprogs natively */
|
|
||||||
FEAT_ARG_CTX_TAG,
|
|
||||||
/* Kernel supports '?' at the front of datasec names */
|
|
||||||
FEAT_BTF_QMARK_DATASEC,
|
|
||||||
__FEAT_CNT,
|
__FEAT_CNT,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum kern_feature_result {
|
int probe_memcg_account(void);
|
||||||
FEAT_UNKNOWN = 0,
|
|
||||||
FEAT_SUPPORTED = 1,
|
|
||||||
FEAT_MISSING = 2,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct kern_feature_cache {
|
|
||||||
enum kern_feature_result res[__FEAT_CNT];
|
|
||||||
int token_fd;
|
|
||||||
};
|
|
||||||
|
|
||||||
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id);
|
|
||||||
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
|
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
|
||||||
|
|
||||||
int probe_kern_syscall_wrapper(int token_fd);
|
|
||||||
int probe_memcg_account(int token_fd);
|
|
||||||
int bump_rlimit_memlock(void);
|
int bump_rlimit_memlock(void);
|
||||||
|
|
||||||
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
|
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
|
||||||
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
|
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
|
||||||
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
||||||
const char *str_sec, size_t str_len,
|
const char *str_sec, size_t str_len);
|
||||||
int token_fd);
|
int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level);
|
||||||
int btf_load_into_kernel(struct btf *btf,
|
|
||||||
char *log_buf, size_t log_sz, __u32 log_level,
|
|
||||||
int token_fd);
|
|
||||||
struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd);
|
|
||||||
|
|
||||||
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
|
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
|
||||||
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
|
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
|
||||||
@@ -450,11 +406,11 @@ struct btf_ext_info {
|
|||||||
*
|
*
|
||||||
* The func_info subsection layout:
|
* The func_info subsection layout:
|
||||||
* record size for struct bpf_func_info in the func_info subsection
|
* record size for struct bpf_func_info in the func_info subsection
|
||||||
* struct btf_ext_info_sec for section #1
|
* struct btf_sec_func_info for section #1
|
||||||
* a list of bpf_func_info records for section #1
|
* a list of bpf_func_info records for section #1
|
||||||
* where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
|
* where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
|
||||||
* but may not be identical
|
* but may not be identical
|
||||||
* struct btf_ext_info_sec for section #2
|
* struct btf_sec_func_info for section #2
|
||||||
* a list of bpf_func_info records for section #2
|
* a list of bpf_func_info records for section #2
|
||||||
* ......
|
* ......
|
||||||
*
|
*
|
||||||
@@ -486,8 +442,6 @@ struct btf_ext {
|
|||||||
struct btf_ext_header *hdr;
|
struct btf_ext_header *hdr;
|
||||||
void *data;
|
void *data;
|
||||||
};
|
};
|
||||||
void *data_swapped;
|
|
||||||
bool swapped_endian;
|
|
||||||
struct btf_ext_info func_info;
|
struct btf_ext_info func_info;
|
||||||
struct btf_ext_info line_info;
|
struct btf_ext_info line_info;
|
||||||
struct btf_ext_info core_relo_info;
|
struct btf_ext_info core_relo_info;
|
||||||
@@ -515,64 +469,21 @@ struct bpf_line_info_min {
|
|||||||
__u32 line_col;
|
__u32 line_col;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Functions to byte-swap info records */
|
|
||||||
|
|
||||||
typedef void (*info_rec_bswap_fn)(void *);
|
|
||||||
|
|
||||||
static inline void bpf_func_info_bswap(struct bpf_func_info *i)
|
|
||||||
{
|
|
||||||
i->insn_off = bswap_32(i->insn_off);
|
|
||||||
i->type_id = bswap_32(i->type_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void bpf_line_info_bswap(struct bpf_line_info *i)
|
|
||||||
{
|
|
||||||
i->insn_off = bswap_32(i->insn_off);
|
|
||||||
i->file_name_off = bswap_32(i->file_name_off);
|
|
||||||
i->line_off = bswap_32(i->line_off);
|
|
||||||
i->line_col = bswap_32(i->line_col);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void bpf_core_relo_bswap(struct bpf_core_relo *i)
|
|
||||||
{
|
|
||||||
i->insn_off = bswap_32(i->insn_off);
|
|
||||||
i->type_id = bswap_32(i->type_id);
|
|
||||||
i->access_str_off = bswap_32(i->access_str_off);
|
|
||||||
i->kind = bswap_32(i->kind);
|
|
||||||
}
|
|
||||||
|
|
||||||
enum btf_field_iter_kind {
|
|
||||||
BTF_FIELD_ITER_IDS,
|
|
||||||
BTF_FIELD_ITER_STRS,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct btf_field_desc {
|
|
||||||
/* once-per-type offsets */
|
|
||||||
int t_off_cnt, t_offs[2];
|
|
||||||
/* member struct size, or zero, if no members */
|
|
||||||
int m_sz;
|
|
||||||
/* repeated per-member offsets */
|
|
||||||
int m_off_cnt, m_offs[1];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct btf_field_iter {
|
|
||||||
struct btf_field_desc desc;
|
|
||||||
void *p;
|
|
||||||
int m_idx;
|
|
||||||
int off_idx;
|
|
||||||
int vlen;
|
|
||||||
};
|
|
||||||
|
|
||||||
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind);
|
|
||||||
__u32 *btf_field_iter_next(struct btf_field_iter *it);
|
|
||||||
|
|
||||||
typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
|
typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
|
||||||
typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
|
typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
|
||||||
|
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
|
||||||
|
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
|
||||||
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
|
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
|
||||||
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
|
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
|
||||||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||||
__u32 kind);
|
__u32 kind);
|
||||||
|
|
||||||
|
typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
|
||||||
|
const char *sym_name, void *ctx);
|
||||||
|
|
||||||
|
int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *arg);
|
||||||
|
|
||||||
/* handle direct returned errors */
|
/* handle direct returned errors */
|
||||||
static inline int libbpf_err(int ret)
|
static inline int libbpf_err(int ret)
|
||||||
{
|
{
|
||||||
@@ -618,27 +529,6 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
|
|||||||
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
|
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void bpf_insn_bswap(struct bpf_insn *insn)
|
|
||||||
{
|
|
||||||
__u8 tmp_reg = insn->dst_reg;
|
|
||||||
|
|
||||||
insn->dst_reg = insn->src_reg;
|
|
||||||
insn->src_reg = tmp_reg;
|
|
||||||
insn->off = bswap_16(insn->off);
|
|
||||||
insn->imm = bswap_32(insn->imm);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Unconditionally dup FD, ensuring it doesn't use [0, 2] range.
|
|
||||||
* Original FD is not closed or altered in any other way.
|
|
||||||
* Preserves original FD value, if it's invalid (negative).
|
|
||||||
*/
|
|
||||||
static inline int dup_good_fd(int fd)
|
|
||||||
{
|
|
||||||
if (fd < 0)
|
|
||||||
return fd;
|
|
||||||
return fcntl(fd, F_DUPFD_CLOEXEC, 3);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
|
/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
|
||||||
* Takes ownership of the fd passed in, and closes it if calling
|
* Takes ownership of the fd passed in, and closes it if calling
|
||||||
* fcntl(fd, F_DUPFD_CLOEXEC, 3).
|
* fcntl(fd, F_DUPFD_CLOEXEC, 3).
|
||||||
@@ -650,7 +540,7 @@ static inline int ensure_good_fd(int fd)
|
|||||||
if (fd < 0)
|
if (fd < 0)
|
||||||
return fd;
|
return fd;
|
||||||
if (fd < 3) {
|
if (fd < 3) {
|
||||||
fd = dup_good_fd(fd);
|
fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
|
||||||
saved_errno = errno;
|
saved_errno = errno;
|
||||||
close(old_fd);
|
close(old_fd);
|
||||||
errno = saved_errno;
|
errno = saved_errno;
|
||||||
@@ -662,34 +552,6 @@ static inline int ensure_good_fd(int fd)
|
|||||||
return fd;
|
return fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int sys_dup3(int oldfd, int newfd, int flags)
|
|
||||||
{
|
|
||||||
return syscall(__NR_dup3, oldfd, newfd, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Some versions of Android don't provide memfd_create() in their libc
|
|
||||||
* implementation, so avoid complications and just go straight to Linux
|
|
||||||
* syscall.
|
|
||||||
*/
|
|
||||||
static inline int sys_memfd_create(const char *name, unsigned flags)
|
|
||||||
{
|
|
||||||
return syscall(__NR_memfd_create, name, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Point *fixed_fd* to the same file that *tmp_fd* points to.
|
|
||||||
* Regardless of success, *tmp_fd* is closed.
|
|
||||||
* Whatever *fixed_fd* pointed to is closed silently.
|
|
||||||
*/
|
|
||||||
static inline int reuse_fd(int fixed_fd, int tmp_fd)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC);
|
|
||||||
err = err < 0 ? -errno : 0;
|
|
||||||
close(tmp_fd); /* clean up temporary FD */
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The following two functions are exposed to bpftool */
|
/* The following two functions are exposed to bpftool */
|
||||||
int bpf_core_add_cands(struct bpf_core_cand *local_cand,
|
int bpf_core_add_cands(struct bpf_core_cand *local_cand,
|
||||||
size_t local_essent_len,
|
size_t local_essent_len,
|
||||||
@@ -715,25 +577,4 @@ static inline bool is_pow_of_2(size_t x)
|
|||||||
#define PROG_LOAD_ATTEMPTS 5
|
#define PROG_LOAD_ATTEMPTS 5
|
||||||
int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts);
|
int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts);
|
||||||
|
|
||||||
bool glob_match(const char *str, const char *pat);
|
|
||||||
|
|
||||||
long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name);
|
|
||||||
long elf_find_func_offset_from_file(const char *binary_path, const char *name);
|
|
||||||
|
|
||||||
struct elf_fd {
|
|
||||||
Elf *elf;
|
|
||||||
int fd;
|
|
||||||
};
|
|
||||||
|
|
||||||
int elf_open(const char *binary_path, struct elf_fd *elf_fd);
|
|
||||||
void elf_close(struct elf_fd *elf_fd);
|
|
||||||
|
|
||||||
int elf_resolve_syms_offsets(const char *binary_path, int cnt,
|
|
||||||
const char **syms, unsigned long **poffsets,
|
|
||||||
int st_type);
|
|
||||||
int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
|
|
||||||
unsigned long **poffsets, size_t *pcnt);
|
|
||||||
|
|
||||||
int probe_fd(int fd);
|
|
||||||
|
|
||||||
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
|
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ enum libbpf_strict_mode {
|
|||||||
* first BPF program or map creation operation. This is done only if
|
* first BPF program or map creation operation. This is done only if
|
||||||
* kernel is too old to support memcg-based memory accounting for BPF
|
* kernel is too old to support memcg-based memory accounting for BPF
|
||||||
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
|
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
|
||||||
* but it can be overridden with libbpf_set_memlock_rlim() API.
|
* but it can be overriden with libbpf_set_memlock_rlim() API.
|
||||||
* Note that libbpf_set_memlock_rlim() needs to be called before
|
* Note that libbpf_set_memlock_rlim() needs to be called before
|
||||||
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
|
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
|
||||||
* operation.
|
* operation.
|
||||||
@@ -97,7 +97,7 @@ LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
|
|||||||
* @brief **libbpf_get_error()** extracts the error code from the passed
|
* @brief **libbpf_get_error()** extracts the error code from the passed
|
||||||
* pointer
|
* pointer
|
||||||
* @param ptr pointer returned from libbpf API function
|
* @param ptr pointer returned from libbpf API function
|
||||||
* @return error code; or 0 if no error occurred
|
* @return error code; or 0 if no error occured
|
||||||
*
|
*
|
||||||
* Note, as of libbpf 1.0 this function is not necessary and not recommended
|
* Note, as of libbpf 1.0 this function is not necessary and not recommended
|
||||||
* to be used. Libbpf doesn't return error code embedded into the pointer
|
* to be used. Libbpf doesn't return error code embedded into the pointer
|
||||||
|
|||||||
@@ -74,6 +74,10 @@ static __u32 get_debian_kernel_version(struct utsname *info)
|
|||||||
if (sscanf(p, "Debian %u.%u.%u", &major, &minor, &patch) != 3)
|
if (sscanf(p, "Debian %u.%u.%u", &major, &minor, &patch) != 3)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
// Patch to run on Debian 10
|
||||||
|
if (major == 4 && minor == 19 && patch > 255)
|
||||||
|
return KERNEL_VERSION(major, minor, 255);
|
||||||
|
|
||||||
return KERNEL_VERSION(major, minor, patch);
|
return KERNEL_VERSION(major, minor, patch);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,8 +223,7 @@ int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
||||||
const char *str_sec, size_t str_len,
|
const char *str_sec, size_t str_len)
|
||||||
int token_fd)
|
|
||||||
{
|
{
|
||||||
struct btf_header hdr = {
|
struct btf_header hdr = {
|
||||||
.magic = BTF_MAGIC,
|
.magic = BTF_MAGIC,
|
||||||
@@ -230,10 +233,6 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
|||||||
.str_off = types_len,
|
.str_off = types_len,
|
||||||
.str_len = str_len,
|
.str_len = str_len,
|
||||||
};
|
};
|
||||||
LIBBPF_OPTS(bpf_btf_load_opts, opts,
|
|
||||||
.token_fd = token_fd,
|
|
||||||
.btf_flags = token_fd ? BPF_F_TOKEN_FD : 0,
|
|
||||||
);
|
|
||||||
int btf_fd, btf_len;
|
int btf_fd, btf_len;
|
||||||
__u8 *raw_btf;
|
__u8 *raw_btf;
|
||||||
|
|
||||||
@@ -246,7 +245,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
|||||||
memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
|
memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
|
||||||
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
|
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
|
||||||
|
|
||||||
btf_fd = bpf_btf_load(raw_btf, btf_len, &opts);
|
btf_fd = bpf_btf_load(raw_btf, btf_len, NULL);
|
||||||
|
|
||||||
free(raw_btf);
|
free(raw_btf);
|
||||||
return btf_fd;
|
return btf_fd;
|
||||||
@@ -276,7 +275,7 @@ static int load_local_storage_btf(void)
|
|||||||
};
|
};
|
||||||
|
|
||||||
return libbpf__load_raw_btf((char *)types, sizeof(types),
|
return libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||||
strs, sizeof(strs), 0);
|
strs, sizeof(strs));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int probe_map_create(enum bpf_map_type map_type)
|
static int probe_map_create(enum bpf_map_type map_type)
|
||||||
@@ -331,20 +330,12 @@ static int probe_map_create(enum bpf_map_type map_type)
|
|||||||
case BPF_MAP_TYPE_STRUCT_OPS:
|
case BPF_MAP_TYPE_STRUCT_OPS:
|
||||||
/* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
|
/* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
|
||||||
opts.btf_vmlinux_value_type_id = 1;
|
opts.btf_vmlinux_value_type_id = 1;
|
||||||
opts.value_type_btf_obj_fd = -1;
|
|
||||||
exp_err = -524; /* -ENOTSUPP */
|
exp_err = -524; /* -ENOTSUPP */
|
||||||
break;
|
break;
|
||||||
case BPF_MAP_TYPE_BLOOM_FILTER:
|
case BPF_MAP_TYPE_BLOOM_FILTER:
|
||||||
key_size = 0;
|
key_size = 0;
|
||||||
max_entries = 1;
|
max_entries = 1;
|
||||||
break;
|
break;
|
||||||
case BPF_MAP_TYPE_ARENA:
|
|
||||||
key_size = 0;
|
|
||||||
value_size = 0;
|
|
||||||
max_entries = 1; /* one page */
|
|
||||||
opts.map_extra = 0; /* can mmap() at any address */
|
|
||||||
opts.map_flags = BPF_F_MMAPABLE;
|
|
||||||
break;
|
|
||||||
case BPF_MAP_TYPE_HASH:
|
case BPF_MAP_TYPE_HASH:
|
||||||
case BPF_MAP_TYPE_ARRAY:
|
case BPF_MAP_TYPE_ARRAY:
|
||||||
case BPF_MAP_TYPE_PROG_ARRAY:
|
case BPF_MAP_TYPE_PROG_ARRAY:
|
||||||
@@ -448,8 +439,7 @@ int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helpe
|
|||||||
/* If BPF verifier doesn't recognize BPF helper ID (enum bpf_func_id)
|
/* If BPF verifier doesn't recognize BPF helper ID (enum bpf_func_id)
|
||||||
* at all, it will emit something like "invalid func unknown#181".
|
* at all, it will emit something like "invalid func unknown#181".
|
||||||
* If BPF verifier recognizes BPF helper but it's not supported for
|
* If BPF verifier recognizes BPF helper but it's not supported for
|
||||||
* given BPF program type, it will emit "unknown func bpf_sys_bpf#166"
|
* given BPF program type, it will emit "unknown func bpf_sys_bpf#166".
|
||||||
* or "program of this type cannot use helper bpf_sys_bpf#166".
|
|
||||||
* In both cases, provided combination of BPF program type and BPF
|
* In both cases, provided combination of BPF program type and BPF
|
||||||
* helper is not supported by the kernel.
|
* helper is not supported by the kernel.
|
||||||
* In all other cases, probe_prog_load() above will either succeed (e.g.,
|
* In all other cases, probe_prog_load() above will either succeed (e.g.,
|
||||||
@@ -458,8 +448,7 @@ int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helpe
|
|||||||
* that), or we'll get some more specific BPF verifier error about
|
* that), or we'll get some more specific BPF verifier error about
|
||||||
* some unsatisfied conditions.
|
* some unsatisfied conditions.
|
||||||
*/
|
*/
|
||||||
if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func ") ||
|
if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func ")))
|
||||||
strstr(buf, "program of this type cannot use helper ")))
|
|
||||||
return 0;
|
return 0;
|
||||||
return 1; /* assume supported */
|
return 1; /* assume supported */
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,6 @@
|
|||||||
#define __LIBBPF_VERSION_H
|
#define __LIBBPF_VERSION_H
|
||||||
|
|
||||||
#define LIBBPF_MAJOR_VERSION 1
|
#define LIBBPF_MAJOR_VERSION 1
|
||||||
#define LIBBPF_MINOR_VERSION 6
|
#define LIBBPF_MINOR_VERSION 3
|
||||||
|
|
||||||
#endif /* __LIBBPF_VERSION_H */
|
#endif /* __LIBBPF_VERSION_H */
|
||||||
|
|||||||
451
src/linker.c
451
src/linker.c
@@ -4,10 +4,6 @@
|
|||||||
*
|
*
|
||||||
* Copyright (c) 2021 Facebook
|
* Copyright (c) 2021 Facebook
|
||||||
*/
|
*/
|
||||||
#ifndef _GNU_SOURCE
|
|
||||||
#define _GNU_SOURCE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
@@ -20,12 +16,10 @@
|
|||||||
#include <elf.h>
|
#include <elf.h>
|
||||||
#include <libelf.h>
|
#include <libelf.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <sys/mman.h>
|
|
||||||
#include "libbpf.h"
|
#include "libbpf.h"
|
||||||
#include "btf.h"
|
#include "btf.h"
|
||||||
#include "libbpf_internal.h"
|
#include "libbpf_internal.h"
|
||||||
#include "strset.h"
|
#include "strset.h"
|
||||||
#include "str_error.h"
|
|
||||||
|
|
||||||
#define BTF_EXTERN_SEC ".extern"
|
#define BTF_EXTERN_SEC ".extern"
|
||||||
|
|
||||||
@@ -141,7 +135,6 @@ struct bpf_linker {
|
|||||||
int fd;
|
int fd;
|
||||||
Elf *elf;
|
Elf *elf;
|
||||||
Elf64_Ehdr *elf_hdr;
|
Elf64_Ehdr *elf_hdr;
|
||||||
bool swapped_endian;
|
|
||||||
|
|
||||||
/* Output sections metadata */
|
/* Output sections metadata */
|
||||||
struct dst_sec *secs;
|
struct dst_sec *secs;
|
||||||
@@ -157,19 +150,15 @@ struct bpf_linker {
|
|||||||
/* global (including extern) ELF symbols */
|
/* global (including extern) ELF symbols */
|
||||||
int glob_sym_cnt;
|
int glob_sym_cnt;
|
||||||
struct glob_sym *glob_syms;
|
struct glob_sym *glob_syms;
|
||||||
|
|
||||||
bool fd_is_owned;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define pr_warn_elf(fmt, ...) \
|
#define pr_warn_elf(fmt, ...) \
|
||||||
libbpf_print(LIBBPF_WARN, "libbpf: " fmt ": %s\n", ##__VA_ARGS__, elf_errmsg(-1))
|
libbpf_print(LIBBPF_WARN, "libbpf: " fmt ": %s\n", ##__VA_ARGS__, elf_errmsg(-1))
|
||||||
|
|
||||||
static int init_output_elf(struct bpf_linker *linker);
|
static int init_output_elf(struct bpf_linker *linker, const char *file);
|
||||||
|
|
||||||
static int bpf_linker_add_file(struct bpf_linker *linker, int fd,
|
static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||||
const char *filename);
|
const struct bpf_linker_file_opts *opts,
|
||||||
|
|
||||||
static int linker_load_obj_file(struct bpf_linker *linker,
|
|
||||||
struct src_obj *obj);
|
struct src_obj *obj);
|
||||||
static int linker_sanity_check_elf(struct src_obj *obj);
|
static int linker_sanity_check_elf(struct src_obj *obj);
|
||||||
static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec);
|
static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec);
|
||||||
@@ -200,7 +189,7 @@ void bpf_linker__free(struct bpf_linker *linker)
|
|||||||
if (linker->elf)
|
if (linker->elf)
|
||||||
elf_end(linker->elf);
|
elf_end(linker->elf);
|
||||||
|
|
||||||
if (linker->fd >= 0 && linker->fd_is_owned)
|
if (linker->fd >= 0)
|
||||||
close(linker->fd);
|
close(linker->fd);
|
||||||
|
|
||||||
strset__free(linker->strtab_strs);
|
strset__free(linker->strtab_strs);
|
||||||
@@ -242,63 +231,9 @@ struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts
|
|||||||
if (!linker)
|
if (!linker)
|
||||||
return errno = ENOMEM, NULL;
|
return errno = ENOMEM, NULL;
|
||||||
|
|
||||||
linker->filename = strdup(filename);
|
linker->fd = -1;
|
||||||
if (!linker->filename) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
linker->fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
|
err = init_output_elf(linker, filename);
|
||||||
if (linker->fd < 0) {
|
|
||||||
err = -errno;
|
|
||||||
pr_warn("failed to create '%s': %d\n", filename, err);
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
linker->fd_is_owned = true;
|
|
||||||
|
|
||||||
err = init_output_elf(linker);
|
|
||||||
if (err)
|
|
||||||
goto err_out;
|
|
||||||
|
|
||||||
return linker;
|
|
||||||
|
|
||||||
err_out:
|
|
||||||
bpf_linker__free(linker);
|
|
||||||
return errno = -err, NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct bpf_linker *bpf_linker__new_fd(int fd, struct bpf_linker_opts *opts)
|
|
||||||
{
|
|
||||||
struct bpf_linker *linker;
|
|
||||||
char filename[32];
|
|
||||||
int err;
|
|
||||||
|
|
||||||
if (fd < 0)
|
|
||||||
return errno = EINVAL, NULL;
|
|
||||||
|
|
||||||
if (!OPTS_VALID(opts, bpf_linker_opts))
|
|
||||||
return errno = EINVAL, NULL;
|
|
||||||
|
|
||||||
if (elf_version(EV_CURRENT) == EV_NONE) {
|
|
||||||
pr_warn_elf("libelf initialization failed");
|
|
||||||
return errno = EINVAL, NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
linker = calloc(1, sizeof(*linker));
|
|
||||||
if (!linker)
|
|
||||||
return errno = ENOMEM, NULL;
|
|
||||||
|
|
||||||
snprintf(filename, sizeof(filename), "fd:%d", fd);
|
|
||||||
linker->filename = strdup(filename);
|
|
||||||
if (!linker->filename) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
linker->fd = fd;
|
|
||||||
linker->fd_is_owned = false;
|
|
||||||
|
|
||||||
err = init_output_elf(linker);
|
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
@@ -357,12 +292,23 @@ static Elf64_Sym *add_new_sym(struct bpf_linker *linker, size_t *sym_idx)
|
|||||||
return sym;
|
return sym;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int init_output_elf(struct bpf_linker *linker)
|
static int init_output_elf(struct bpf_linker *linker, const char *file)
|
||||||
{
|
{
|
||||||
int err, str_off;
|
int err, str_off;
|
||||||
Elf64_Sym *init_sym;
|
Elf64_Sym *init_sym;
|
||||||
struct dst_sec *sec;
|
struct dst_sec *sec;
|
||||||
|
|
||||||
|
linker->filename = strdup(file);
|
||||||
|
if (!linker->filename)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
|
||||||
|
if (linker->fd < 0) {
|
||||||
|
err = -errno;
|
||||||
|
pr_warn("failed to create '%s': %d\n", file, err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
linker->elf = elf_begin(linker->fd, ELF_C_WRITE, NULL);
|
linker->elf = elf_begin(linker->fd, ELF_C_WRITE, NULL);
|
||||||
if (!linker->elf) {
|
if (!linker->elf) {
|
||||||
pr_warn_elf("failed to create ELF object");
|
pr_warn_elf("failed to create ELF object");
|
||||||
@@ -378,8 +324,13 @@ static int init_output_elf(struct bpf_linker *linker)
|
|||||||
|
|
||||||
linker->elf_hdr->e_machine = EM_BPF;
|
linker->elf_hdr->e_machine = EM_BPF;
|
||||||
linker->elf_hdr->e_type = ET_REL;
|
linker->elf_hdr->e_type = ET_REL;
|
||||||
/* Set unknown ELF endianness, assign later from input files */
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||||
linker->elf_hdr->e_ident[EI_DATA] = ELFDATANONE;
|
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB;
|
||||||
|
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||||
|
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB;
|
||||||
|
#else
|
||||||
|
#error "Unknown __BYTE_ORDER__"
|
||||||
|
#endif
|
||||||
|
|
||||||
/* STRTAB */
|
/* STRTAB */
|
||||||
/* initialize strset with an empty string to conform to ELF */
|
/* initialize strset with an empty string to conform to ELF */
|
||||||
@@ -445,8 +396,6 @@ static int init_output_elf(struct bpf_linker *linker)
|
|||||||
pr_warn_elf("failed to create SYMTAB data");
|
pr_warn_elf("failed to create SYMTAB data");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
/* Ensure libelf translates byte-order of symbol records */
|
|
||||||
sec->data->d_type = ELF_T_SYM;
|
|
||||||
|
|
||||||
str_off = strset__add_str(linker->strtab_strs, sec->sec_name);
|
str_off = strset__add_str(linker->strtab_strs, sec->sec_name);
|
||||||
if (str_off < 0)
|
if (str_off < 0)
|
||||||
@@ -488,16 +437,19 @@ static int init_output_elf(struct bpf_linker *linker)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bpf_linker_add_file(struct bpf_linker *linker, int fd,
|
int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
|
||||||
const char *filename)
|
const struct bpf_linker_file_opts *opts)
|
||||||
{
|
{
|
||||||
struct src_obj obj = {};
|
struct src_obj obj = {};
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
obj.filename = filename;
|
if (!OPTS_VALID(opts, bpf_linker_file_opts))
|
||||||
obj.fd = fd;
|
return libbpf_err(-EINVAL);
|
||||||
|
|
||||||
err = err ?: linker_load_obj_file(linker, &obj);
|
if (!linker->elf)
|
||||||
|
return libbpf_err(-EINVAL);
|
||||||
|
|
||||||
|
err = err ?: linker_load_obj_file(linker, filename, opts, &obj);
|
||||||
err = err ?: linker_append_sec_data(linker, &obj);
|
err = err ?: linker_append_sec_data(linker, &obj);
|
||||||
err = err ?: linker_append_elf_syms(linker, &obj);
|
err = err ?: linker_append_elf_syms(linker, &obj);
|
||||||
err = err ?: linker_append_elf_relos(linker, &obj);
|
err = err ?: linker_append_elf_relos(linker, &obj);
|
||||||
@@ -512,91 +464,12 @@ static int bpf_linker_add_file(struct bpf_linker *linker, int fd,
|
|||||||
free(obj.sym_map);
|
free(obj.sym_map);
|
||||||
if (obj.elf)
|
if (obj.elf)
|
||||||
elf_end(obj.elf);
|
elf_end(obj.elf);
|
||||||
|
if (obj.fd >= 0)
|
||||||
|
close(obj.fd);
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
|
|
||||||
const struct bpf_linker_file_opts *opts)
|
|
||||||
{
|
|
||||||
int fd, err;
|
|
||||||
|
|
||||||
if (!OPTS_VALID(opts, bpf_linker_file_opts))
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
if (!linker->elf)
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
fd = open(filename, O_RDONLY | O_CLOEXEC);
|
|
||||||
if (fd < 0) {
|
|
||||||
err = -errno;
|
|
||||||
pr_warn("failed to open file '%s': %s\n", filename, errstr(err));
|
|
||||||
return libbpf_err(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
err = bpf_linker_add_file(linker, fd, filename);
|
|
||||||
close(fd);
|
|
||||||
return libbpf_err(err);
|
return libbpf_err(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bpf_linker__add_fd(struct bpf_linker *linker, int fd,
|
|
||||||
const struct bpf_linker_file_opts *opts)
|
|
||||||
{
|
|
||||||
char filename[32];
|
|
||||||
int err;
|
|
||||||
|
|
||||||
if (!OPTS_VALID(opts, bpf_linker_file_opts))
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
if (!linker->elf)
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
if (fd < 0)
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
snprintf(filename, sizeof(filename), "fd:%d", fd);
|
|
||||||
err = bpf_linker_add_file(linker, fd, filename);
|
|
||||||
return libbpf_err(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz,
|
|
||||||
const struct bpf_linker_file_opts *opts)
|
|
||||||
{
|
|
||||||
char filename[32];
|
|
||||||
int fd, written, ret;
|
|
||||||
|
|
||||||
if (!OPTS_VALID(opts, bpf_linker_file_opts))
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
if (!linker->elf)
|
|
||||||
return libbpf_err(-EINVAL);
|
|
||||||
|
|
||||||
snprintf(filename, sizeof(filename), "mem:%p+%zu", buf, buf_sz);
|
|
||||||
|
|
||||||
fd = sys_memfd_create(filename, 0);
|
|
||||||
if (fd < 0) {
|
|
||||||
ret = -errno;
|
|
||||||
pr_warn("failed to create memfd '%s': %s\n", filename, errstr(ret));
|
|
||||||
return libbpf_err(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
written = 0;
|
|
||||||
while (written < buf_sz) {
|
|
||||||
ret = write(fd, buf, buf_sz);
|
|
||||||
if (ret < 0) {
|
|
||||||
ret = -errno;
|
|
||||||
pr_warn("failed to write '%s': %s\n", filename, errstr(ret));
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
written += ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = bpf_linker_add_file(linker, fd, filename);
|
|
||||||
err_out:
|
|
||||||
close(fd);
|
|
||||||
return libbpf_err(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool is_dwarf_sec_name(const char *name)
|
static bool is_dwarf_sec_name(const char *name)
|
||||||
{
|
{
|
||||||
/* approximation, but the actual list is too long */
|
/* approximation, but the actual list is too long */
|
||||||
@@ -662,69 +535,65 @@ static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name)
|
|||||||
return sec;
|
return sec;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int linker_load_obj_file(struct bpf_linker *linker,
|
static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||||
|
const struct bpf_linker_file_opts *opts,
|
||||||
struct src_obj *obj)
|
struct src_obj *obj)
|
||||||
{
|
{
|
||||||
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||||
|
const int host_endianness = ELFDATA2LSB;
|
||||||
|
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||||
|
const int host_endianness = ELFDATA2MSB;
|
||||||
|
#else
|
||||||
|
#error "Unknown __BYTE_ORDER__"
|
||||||
|
#endif
|
||||||
int err = 0;
|
int err = 0;
|
||||||
Elf_Scn *scn;
|
Elf_Scn *scn;
|
||||||
Elf_Data *data;
|
Elf_Data *data;
|
||||||
Elf64_Ehdr *ehdr;
|
Elf64_Ehdr *ehdr;
|
||||||
Elf64_Shdr *shdr;
|
Elf64_Shdr *shdr;
|
||||||
struct src_sec *sec;
|
struct src_sec *sec;
|
||||||
unsigned char obj_byteorder;
|
|
||||||
unsigned char link_byteorder = linker->elf_hdr->e_ident[EI_DATA];
|
|
||||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
||||||
const unsigned char host_byteorder = ELFDATA2LSB;
|
|
||||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
|
||||||
const unsigned char host_byteorder = ELFDATA2MSB;
|
|
||||||
#else
|
|
||||||
#error "Unknown __BYTE_ORDER__"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
pr_debug("linker: adding object file '%s'...\n", obj->filename);
|
pr_debug("linker: adding object file '%s'...\n", filename);
|
||||||
|
|
||||||
|
obj->filename = filename;
|
||||||
|
|
||||||
|
obj->fd = open(filename, O_RDONLY | O_CLOEXEC);
|
||||||
|
if (obj->fd < 0) {
|
||||||
|
err = -errno;
|
||||||
|
pr_warn("failed to open file '%s': %d\n", filename, err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL);
|
obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL);
|
||||||
if (!obj->elf) {
|
if (!obj->elf) {
|
||||||
pr_warn_elf("failed to parse ELF file '%s'", obj->filename);
|
err = -errno;
|
||||||
return -EINVAL;
|
pr_warn_elf("failed to parse ELF file '%s'", filename);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sanity check ELF file high-level properties */
|
/* Sanity check ELF file high-level properties */
|
||||||
ehdr = elf64_getehdr(obj->elf);
|
ehdr = elf64_getehdr(obj->elf);
|
||||||
if (!ehdr) {
|
if (!ehdr) {
|
||||||
pr_warn_elf("failed to get ELF header for %s", obj->filename);
|
err = -errno;
|
||||||
return -EINVAL;
|
pr_warn_elf("failed to get ELF header for %s", filename);
|
||||||
}
|
|
||||||
|
|
||||||
/* Linker output endianness set by first input object */
|
|
||||||
obj_byteorder = ehdr->e_ident[EI_DATA];
|
|
||||||
if (obj_byteorder != ELFDATA2LSB && obj_byteorder != ELFDATA2MSB) {
|
|
||||||
err = -EOPNOTSUPP;
|
|
||||||
pr_warn("unknown byte order of ELF file %s\n", obj->filename);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
if (link_byteorder == ELFDATANONE) {
|
if (ehdr->e_ident[EI_DATA] != host_endianness) {
|
||||||
linker->elf_hdr->e_ident[EI_DATA] = obj_byteorder;
|
|
||||||
linker->swapped_endian = obj_byteorder != host_byteorder;
|
|
||||||
pr_debug("linker: set %s-endian output byte order\n",
|
|
||||||
obj_byteorder == ELFDATA2MSB ? "big" : "little");
|
|
||||||
} else if (link_byteorder != obj_byteorder) {
|
|
||||||
err = -EOPNOTSUPP;
|
err = -EOPNOTSUPP;
|
||||||
pr_warn("byte order mismatch with ELF file %s\n", obj->filename);
|
pr_warn_elf("unsupported byte order of ELF file %s", filename);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ehdr->e_type != ET_REL
|
if (ehdr->e_type != ET_REL
|
||||||
|| ehdr->e_machine != EM_BPF
|
|| ehdr->e_machine != EM_BPF
|
||||||
|| ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
|
|| ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
|
||||||
err = -EOPNOTSUPP;
|
err = -EOPNOTSUPP;
|
||||||
pr_warn_elf("unsupported kind of ELF file %s", obj->filename);
|
pr_warn_elf("unsupported kind of ELF file %s", filename);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) {
|
if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) {
|
||||||
pr_warn_elf("failed to get SHSTRTAB section index for %s", obj->filename);
|
err = -errno;
|
||||||
return -EINVAL;
|
pr_warn_elf("failed to get SHSTRTAB section index for %s", filename);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
scn = NULL;
|
scn = NULL;
|
||||||
@@ -734,23 +603,26 @@ static int linker_load_obj_file(struct bpf_linker *linker,
|
|||||||
|
|
||||||
shdr = elf64_getshdr(scn);
|
shdr = elf64_getshdr(scn);
|
||||||
if (!shdr) {
|
if (!shdr) {
|
||||||
|
err = -errno;
|
||||||
pr_warn_elf("failed to get section #%zu header for %s",
|
pr_warn_elf("failed to get section #%zu header for %s",
|
||||||
sec_idx, obj->filename);
|
sec_idx, filename);
|
||||||
return -EINVAL;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name);
|
sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name);
|
||||||
if (!sec_name) {
|
if (!sec_name) {
|
||||||
|
err = -errno;
|
||||||
pr_warn_elf("failed to get section #%zu name for %s",
|
pr_warn_elf("failed to get section #%zu name for %s",
|
||||||
sec_idx, obj->filename);
|
sec_idx, filename);
|
||||||
return -EINVAL;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
data = elf_getdata(scn, 0);
|
data = elf_getdata(scn, 0);
|
||||||
if (!data) {
|
if (!data) {
|
||||||
|
err = -errno;
|
||||||
pr_warn_elf("failed to get section #%zu (%s) data from %s",
|
pr_warn_elf("failed to get section #%zu (%s) data from %s",
|
||||||
sec_idx, sec_name, obj->filename);
|
sec_idx, sec_name, filename);
|
||||||
return -EINVAL;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
sec = add_src_sec(obj, sec_name);
|
sec = add_src_sec(obj, sec_name);
|
||||||
@@ -784,8 +656,7 @@ static int linker_load_obj_file(struct bpf_linker *linker,
|
|||||||
obj->btf = btf__new(data->d_buf, shdr->sh_size);
|
obj->btf = btf__new(data->d_buf, shdr->sh_size);
|
||||||
err = libbpf_get_error(obj->btf);
|
err = libbpf_get_error(obj->btf);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_warn("failed to parse .BTF from %s: %s\n",
|
pr_warn("failed to parse .BTF from %s: %d\n", filename, err);
|
||||||
obj->filename, errstr(err));
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
sec->skipped = true;
|
sec->skipped = true;
|
||||||
@@ -795,8 +666,7 @@ static int linker_load_obj_file(struct bpf_linker *linker,
|
|||||||
obj->btf_ext = btf_ext__new(data->d_buf, shdr->sh_size);
|
obj->btf_ext = btf_ext__new(data->d_buf, shdr->sh_size);
|
||||||
err = libbpf_get_error(obj->btf_ext);
|
err = libbpf_get_error(obj->btf_ext);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_warn("failed to parse .BTF.ext from '%s': %s\n",
|
pr_warn("failed to parse .BTF.ext from '%s': %d\n", filename, err);
|
||||||
obj->filename, errstr(err));
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
sec->skipped = true;
|
sec->skipped = true;
|
||||||
@@ -813,7 +683,7 @@ static int linker_load_obj_file(struct bpf_linker *linker,
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_warn("unrecognized section #%zu (%s) in %s\n",
|
pr_warn("unrecognized section #%zu (%s) in %s\n",
|
||||||
sec_idx, sec_name, obj->filename);
|
sec_idx, sec_name, filename);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@@ -849,28 +719,13 @@ static int linker_sanity_check_elf(struct src_obj *obj)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_dwarf_sec_name(sec->sec_name))
|
if (sec->shdr->sh_addralign && !is_pow_of_2(sec->shdr->sh_addralign))
|
||||||
continue;
|
return -EINVAL;
|
||||||
|
if (sec->shdr->sh_addralign != sec->data->d_align)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (sec->shdr->sh_addralign && !is_pow_of_2(sec->shdr->sh_addralign)) {
|
if (sec->shdr->sh_size != sec->data->d_size)
|
||||||
pr_warn("ELF section #%zu alignment %llu is non pow-of-2 alignment in %s\n",
|
|
||||||
sec->sec_idx, (long long unsigned)sec->shdr->sh_addralign,
|
|
||||||
obj->filename);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
if (sec->shdr->sh_addralign != sec->data->d_align) {
|
|
||||||
pr_warn("ELF section #%zu has inconsistent alignment addr=%llu != d=%llu in %s\n",
|
|
||||||
sec->sec_idx, (long long unsigned)sec->shdr->sh_addralign,
|
|
||||||
(long long unsigned)sec->data->d_align, obj->filename);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sec->shdr->sh_size != sec->data->d_size) {
|
|
||||||
pr_warn("ELF section #%zu has inconsistent section size sh=%llu != d=%llu in %s\n",
|
|
||||||
sec->sec_idx, (long long unsigned)sec->shdr->sh_size,
|
|
||||||
(long long unsigned)sec->data->d_size, obj->filename);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (sec->shdr->sh_type) {
|
switch (sec->shdr->sh_type) {
|
||||||
case SHT_SYMTAB:
|
case SHT_SYMTAB:
|
||||||
@@ -882,12 +737,8 @@ static int linker_sanity_check_elf(struct src_obj *obj)
|
|||||||
break;
|
break;
|
||||||
case SHT_PROGBITS:
|
case SHT_PROGBITS:
|
||||||
if (sec->shdr->sh_flags & SHF_EXECINSTR) {
|
if (sec->shdr->sh_flags & SHF_EXECINSTR) {
|
||||||
if (sec->shdr->sh_size % sizeof(struct bpf_insn) != 0) {
|
if (sec->shdr->sh_size % sizeof(struct bpf_insn) != 0)
|
||||||
pr_warn("ELF section #%zu has unexpected size alignment %llu in %s\n",
|
|
||||||
sec->sec_idx, (long long unsigned)sec->shdr->sh_size,
|
|
||||||
obj->filename);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case SHT_NOBITS:
|
case SHT_NOBITS:
|
||||||
@@ -1087,33 +938,19 @@ static int check_btf_str_off(__u32 *str_off, void *ctx)
|
|||||||
static int linker_sanity_check_btf(struct src_obj *obj)
|
static int linker_sanity_check_btf(struct src_obj *obj)
|
||||||
{
|
{
|
||||||
struct btf_type *t;
|
struct btf_type *t;
|
||||||
int i, n, err;
|
int i, n, err = 0;
|
||||||
|
|
||||||
if (!obj->btf)
|
if (!obj->btf)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
n = btf__type_cnt(obj->btf);
|
n = btf__type_cnt(obj->btf);
|
||||||
for (i = 1; i < n; i++) {
|
for (i = 1; i < n; i++) {
|
||||||
struct btf_field_iter it;
|
|
||||||
__u32 *type_id, *str_off;
|
|
||||||
|
|
||||||
t = btf_type_by_id(obj->btf, i);
|
t = btf_type_by_id(obj->btf, i);
|
||||||
|
|
||||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
|
||||||
|
err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
while ((type_id = btf_field_iter_next(&it))) {
|
|
||||||
if (*type_id >= n)
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
while ((str_off = btf_field_iter_next(&it))) {
|
|
||||||
if (!btf__str_by_offset(obj->btf, *str_off))
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1239,24 +1076,6 @@ static bool sec_content_is_same(struct dst_sec *dst_sec, struct src_sec *src_sec
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_exec_sec(struct dst_sec *sec)
|
|
||||||
{
|
|
||||||
if (!sec || sec->ephemeral)
|
|
||||||
return false;
|
|
||||||
return (sec->shdr->sh_type == SHT_PROGBITS) &&
|
|
||||||
(sec->shdr->sh_flags & SHF_EXECINSTR);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void exec_sec_bswap(void *raw_data, int size)
|
|
||||||
{
|
|
||||||
const int insn_cnt = size / sizeof(struct bpf_insn);
|
|
||||||
struct bpf_insn *insn = raw_data;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < insn_cnt; i++, insn++)
|
|
||||||
bpf_insn_bswap(insn);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src_sec *src)
|
static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src_sec *src)
|
||||||
{
|
{
|
||||||
void *tmp;
|
void *tmp;
|
||||||
@@ -1316,10 +1135,6 @@ static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src
|
|||||||
memset(dst->raw_data + dst->sec_sz, 0, dst_align_sz - dst->sec_sz);
|
memset(dst->raw_data + dst->sec_sz, 0, dst_align_sz - dst->sec_sz);
|
||||||
/* now copy src data at a properly aligned offset */
|
/* now copy src data at a properly aligned offset */
|
||||||
memcpy(dst->raw_data + dst_align_sz, src->data->d_buf, src->shdr->sh_size);
|
memcpy(dst->raw_data + dst_align_sz, src->data->d_buf, src->shdr->sh_size);
|
||||||
|
|
||||||
/* convert added bpf insns to native byte-order */
|
|
||||||
if (linker->swapped_endian && is_exec_sec(dst))
|
|
||||||
exec_sec_bswap(dst->raw_data + dst_align_sz, src->shdr->sh_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dst->sec_sz = dst_final_sz;
|
dst->sec_sz = dst_final_sz;
|
||||||
@@ -1376,7 +1191,7 @@ static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj
|
|||||||
} else {
|
} else {
|
||||||
if (!secs_match(dst_sec, src_sec)) {
|
if (!secs_match(dst_sec, src_sec)) {
|
||||||
pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name);
|
pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name);
|
||||||
return -EINVAL;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* "license" and "version" sections are deduped */
|
/* "license" and "version" sections are deduped */
|
||||||
@@ -1565,7 +1380,7 @@ recur:
|
|||||||
return true;
|
return true;
|
||||||
case BTF_KIND_PTR:
|
case BTF_KIND_PTR:
|
||||||
/* just validate overall shape of the referenced type, so no
|
/* just validate overall shape of the referenced type, so no
|
||||||
* contents comparison for struct/union, and allowed fwd vs
|
* contents comparison for struct/union, and allowd fwd vs
|
||||||
* struct/union
|
* struct/union
|
||||||
*/
|
*/
|
||||||
exact = false;
|
exact = false;
|
||||||
@@ -2114,7 +1929,7 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
|
|||||||
|
|
||||||
/* If existing symbol is a strong resolved symbol, bail out,
|
/* If existing symbol is a strong resolved symbol, bail out,
|
||||||
* because we lost resolution battle have nothing to
|
* because we lost resolution battle have nothing to
|
||||||
* contribute. We already checked above that there is no
|
* contribute. We already checked abover that there is no
|
||||||
* strong-strong conflict. We also already tightened binding
|
* strong-strong conflict. We also already tightened binding
|
||||||
* and visibility, so nothing else to contribute at that point.
|
* and visibility, so nothing else to contribute at that point.
|
||||||
*/
|
*/
|
||||||
@@ -2163,7 +1978,7 @@ add_sym:
|
|||||||
|
|
||||||
obj->sym_map[src_sym_idx] = dst_sym_idx;
|
obj->sym_map[src_sym_idx] = dst_sym_idx;
|
||||||
|
|
||||||
if (sym_type == STT_SECTION && dst_sec) {
|
if (sym_type == STT_SECTION && dst_sym) {
|
||||||
dst_sec->sec_sym_idx = dst_sym_idx;
|
dst_sec->sec_sym_idx = dst_sym_idx;
|
||||||
dst_sym->st_value = 0;
|
dst_sym->st_value = 0;
|
||||||
}
|
}
|
||||||
@@ -2223,7 +2038,7 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob
|
|||||||
}
|
}
|
||||||
} else if (!secs_match(dst_sec, src_sec)) {
|
} else if (!secs_match(dst_sec, src_sec)) {
|
||||||
pr_warn("sections %s are not compatible\n", src_sec->sec_name);
|
pr_warn("sections %s are not compatible\n", src_sec->sec_name);
|
||||||
return -EINVAL;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* shdr->sh_link points to SYMTAB */
|
/* shdr->sh_link points to SYMTAB */
|
||||||
@@ -2379,17 +2194,10 @@ static int linker_fixup_btf(struct src_obj *obj)
|
|||||||
vi = btf_var_secinfos(t);
|
vi = btf_var_secinfos(t);
|
||||||
for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
|
for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
|
||||||
const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type);
|
const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type);
|
||||||
const char *var_name;
|
const char *var_name = btf__str_by_offset(obj->btf, vt->name_off);
|
||||||
int var_linkage;
|
int var_linkage = btf_var(vt)->linkage;
|
||||||
Elf64_Sym *sym;
|
Elf64_Sym *sym;
|
||||||
|
|
||||||
/* could be a variable or function */
|
|
||||||
if (!btf_is_var(vt))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
var_name = btf__str_by_offset(obj->btf, vt->name_off);
|
|
||||||
var_linkage = btf_var(vt)->linkage;
|
|
||||||
|
|
||||||
/* no need to patch up static or extern vars */
|
/* no need to patch up static or extern vars */
|
||||||
if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)
|
if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)
|
||||||
continue;
|
continue;
|
||||||
@@ -2407,10 +2215,26 @@ static int linker_fixup_btf(struct src_obj *obj)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int remap_type_id(__u32 *type_id, void *ctx)
|
||||||
|
{
|
||||||
|
int *id_map = ctx;
|
||||||
|
int new_id = id_map[*type_id];
|
||||||
|
|
||||||
|
/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
|
||||||
|
if (new_id == 0 && *type_id != 0) {
|
||||||
|
pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
*type_id = id_map[*type_id];
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
||||||
{
|
{
|
||||||
const struct btf_type *t;
|
const struct btf_type *t;
|
||||||
int i, j, n, start_id, id, err;
|
int i, j, n, start_id, id;
|
||||||
const char *name;
|
const char *name;
|
||||||
|
|
||||||
if (!obj->btf)
|
if (!obj->btf)
|
||||||
@@ -2481,25 +2305,9 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
|||||||
n = btf__type_cnt(linker->btf);
|
n = btf__type_cnt(linker->btf);
|
||||||
for (i = start_id; i < n; i++) {
|
for (i = start_id; i < n; i++) {
|
||||||
struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
|
struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
|
||||||
struct btf_field_iter it;
|
|
||||||
__u32 *type_id;
|
|
||||||
|
|
||||||
err = btf_field_iter_init(&it, dst_t, BTF_FIELD_ITER_IDS);
|
if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
|
||||||
if (err)
|
return -EINVAL;
|
||||||
return err;
|
|
||||||
|
|
||||||
while ((type_id = btf_field_iter_next(&it))) {
|
|
||||||
int new_id = obj->btf_type_map[*type_id];
|
|
||||||
|
|
||||||
/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
|
|
||||||
if (new_id == 0 && *type_id != 0) {
|
|
||||||
pr_warn("failed to find new ID mapping for original BTF type ID %u\n",
|
|
||||||
*type_id);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
*type_id = obj->btf_type_map[*type_id];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's
|
/* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's
|
||||||
@@ -2567,10 +2375,6 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
|||||||
if (glob_sym && glob_sym->var_idx >= 0) {
|
if (glob_sym && glob_sym->var_idx >= 0) {
|
||||||
__s64 sz;
|
__s64 sz;
|
||||||
|
|
||||||
/* FUNCs don't have size, nothing to update */
|
|
||||||
if (btf_is_func(t))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
dst_var = &dst_sec->sec_vars[glob_sym->var_idx];
|
dst_var = &dst_sec->sec_vars[glob_sym->var_idx];
|
||||||
/* Because underlying BTF type might have
|
/* Because underlying BTF type might have
|
||||||
* changed, so might its size have changed, so
|
* changed, so might its size have changed, so
|
||||||
@@ -2784,32 +2588,27 @@ int bpf_linker__finalize(struct bpf_linker *linker)
|
|||||||
if (!sec->scn)
|
if (!sec->scn)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* restore sections with bpf insns to target byte-order */
|
|
||||||
if (linker->swapped_endian && is_exec_sec(sec))
|
|
||||||
exec_sec_bswap(sec->raw_data, sec->sec_sz);
|
|
||||||
|
|
||||||
sec->data->d_buf = sec->raw_data;
|
sec->data->d_buf = sec->raw_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Finalize ELF layout */
|
/* Finalize ELF layout */
|
||||||
if (elf_update(linker->elf, ELF_C_NULL) < 0) {
|
if (elf_update(linker->elf, ELF_C_NULL) < 0) {
|
||||||
err = -EINVAL;
|
err = -errno;
|
||||||
pr_warn_elf("failed to finalize ELF layout");
|
pr_warn_elf("failed to finalize ELF layout");
|
||||||
return libbpf_err(err);
|
return libbpf_err(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write out final ELF contents */
|
/* Write out final ELF contents */
|
||||||
if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
|
if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
|
||||||
err = -EINVAL;
|
err = -errno;
|
||||||
pr_warn_elf("failed to write ELF contents");
|
pr_warn_elf("failed to write ELF contents");
|
||||||
return libbpf_err(err);
|
return libbpf_err(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
elf_end(linker->elf);
|
elf_end(linker->elf);
|
||||||
linker->elf = NULL;
|
close(linker->fd);
|
||||||
|
|
||||||
if (linker->fd_is_owned)
|
linker->elf = NULL;
|
||||||
close(linker->fd);
|
|
||||||
linker->fd = -1;
|
linker->fd = -1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -2857,7 +2656,6 @@ static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name,
|
|||||||
|
|
||||||
static int finalize_btf(struct bpf_linker *linker)
|
static int finalize_btf(struct bpf_linker *linker)
|
||||||
{
|
{
|
||||||
enum btf_endianness link_endianness;
|
|
||||||
LIBBPF_OPTS(btf_dedup_opts, opts);
|
LIBBPF_OPTS(btf_dedup_opts, opts);
|
||||||
struct btf *btf = linker->btf;
|
struct btf *btf = linker->btf;
|
||||||
const void *raw_data;
|
const void *raw_data;
|
||||||
@@ -2891,24 +2689,17 @@ static int finalize_btf(struct bpf_linker *linker)
|
|||||||
|
|
||||||
err = finalize_btf_ext(linker);
|
err = finalize_btf_ext(linker);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_warn(".BTF.ext generation failed: %s\n", errstr(err));
|
pr_warn(".BTF.ext generation failed: %d\n", err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
opts.btf_ext = linker->btf_ext;
|
opts.btf_ext = linker->btf_ext;
|
||||||
err = btf__dedup(linker->btf, &opts);
|
err = btf__dedup(linker->btf, &opts);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_warn("BTF dedup failed: %s\n", errstr(err));
|
pr_warn("BTF dedup failed: %d\n", err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set .BTF and .BTF.ext output byte order */
|
|
||||||
link_endianness = linker->elf_hdr->e_ident[EI_DATA] == ELFDATA2MSB ?
|
|
||||||
BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
|
|
||||||
btf__set_endianness(linker->btf, link_endianness);
|
|
||||||
if (linker->btf_ext)
|
|
||||||
btf_ext__set_endianness(linker->btf_ext, link_endianness);
|
|
||||||
|
|
||||||
/* Emit .BTF section */
|
/* Emit .BTF section */
|
||||||
raw_data = btf__raw_data(linker->btf, &raw_sz);
|
raw_data = btf__raw_data(linker->btf, &raw_sz);
|
||||||
if (!raw_data)
|
if (!raw_data)
|
||||||
@@ -2916,19 +2707,19 @@ static int finalize_btf(struct bpf_linker *linker)
|
|||||||
|
|
||||||
err = emit_elf_data_sec(linker, BTF_ELF_SEC, 8, raw_data, raw_sz);
|
err = emit_elf_data_sec(linker, BTF_ELF_SEC, 8, raw_data, raw_sz);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_warn("failed to write out .BTF ELF section: %s\n", errstr(err));
|
pr_warn("failed to write out .BTF ELF section: %d\n", err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Emit .BTF.ext section */
|
/* Emit .BTF.ext section */
|
||||||
if (linker->btf_ext) {
|
if (linker->btf_ext) {
|
||||||
raw_data = btf_ext__raw_data(linker->btf_ext, &raw_sz);
|
raw_data = btf_ext__get_raw_data(linker->btf_ext, &raw_sz);
|
||||||
if (!raw_data)
|
if (!raw_data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
err = emit_elf_data_sec(linker, BTF_EXT_ELF_SEC, 8, raw_data, raw_sz);
|
err = emit_elf_data_sec(linker, BTF_EXT_ELF_SEC, 8, raw_data, raw_sz);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_warn("failed to write out .BTF.ext ELF section: %s\n", errstr(err));
|
pr_warn("failed to write out .BTF.ext ELF section: %d\n", err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -3104,7 +2895,7 @@ static int finalize_btf_ext(struct bpf_linker *linker)
|
|||||||
err = libbpf_get_error(linker->btf_ext);
|
err = libbpf_get_error(linker->btf_ext);
|
||||||
if (err) {
|
if (err) {
|
||||||
linker->btf_ext = NULL;
|
linker->btf_ext = NULL;
|
||||||
pr_warn("failed to parse final .BTF.ext data: %s\n", errstr(err));
|
pr_warn("failed to parse final .BTF.ext data: %d\n", err);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -45,7 +45,6 @@ struct xdp_id_md {
|
|||||||
|
|
||||||
struct xdp_features_md {
|
struct xdp_features_md {
|
||||||
int ifindex;
|
int ifindex;
|
||||||
__u32 xdp_zc_max_segs;
|
|
||||||
__u64 flags;
|
__u64 flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -422,9 +421,6 @@ static int parse_xdp_features(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
|
|||||||
return NL_CONT;
|
return NL_CONT;
|
||||||
|
|
||||||
md->flags = libbpf_nla_getattr_u64(tb[NETDEV_A_DEV_XDP_FEATURES]);
|
md->flags = libbpf_nla_getattr_u64(tb[NETDEV_A_DEV_XDP_FEATURES]);
|
||||||
if (tb[NETDEV_A_DEV_XDP_ZC_MAX_SEGS])
|
|
||||||
md->xdp_zc_max_segs =
|
|
||||||
libbpf_nla_getattr_u32(tb[NETDEV_A_DEV_XDP_ZC_MAX_SEGS]);
|
|
||||||
return NL_DONE;
|
return NL_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -496,8 +492,7 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
|
|||||||
if (err)
|
if (err)
|
||||||
return libbpf_err(err);
|
return libbpf_err(err);
|
||||||
|
|
||||||
OPTS_SET(opts, feature_flags, md.flags);
|
opts->feature_flags = md.flags;
|
||||||
OPTS_SET(opts, xdp_zc_max_segs, md.xdp_zc_max_segs);
|
|
||||||
|
|
||||||
skip_feature_flags:
|
skip_feature_flags:
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
15
src/nlattr.c
15
src/nlattr.c
@@ -63,16 +63,16 @@ static int validate_nla(struct nlattr *nla, int maxtype,
|
|||||||
minlen = nla_attr_minlen[pt->type];
|
minlen = nla_attr_minlen[pt->type];
|
||||||
|
|
||||||
if (libbpf_nla_len(nla) < minlen)
|
if (libbpf_nla_len(nla) < minlen)
|
||||||
return -EINVAL;
|
return -1;
|
||||||
|
|
||||||
if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen)
|
if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen)
|
||||||
return -EINVAL;
|
return -1;
|
||||||
|
|
||||||
if (pt->type == LIBBPF_NLA_STRING) {
|
if (pt->type == LIBBPF_NLA_STRING) {
|
||||||
char *data = libbpf_nla_data(nla);
|
char *data = libbpf_nla_data(nla);
|
||||||
|
|
||||||
if (data[libbpf_nla_len(nla) - 1] != '\0')
|
if (data[libbpf_nla_len(nla) - 1] != '\0')
|
||||||
return -EINVAL;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -118,18 +118,19 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head,
|
|||||||
if (policy) {
|
if (policy) {
|
||||||
err = validate_nla(nla, maxtype, policy);
|
err = validate_nla(nla, maxtype, policy);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
goto errout;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tb[type]) {
|
if (tb[type])
|
||||||
pr_warn("Attribute of type %#x found multiple times in message, "
|
pr_warn("Attribute of type %#x found multiple times in message, "
|
||||||
"previous attribute is being ignored.\n", type);
|
"previous attribute is being ignored.\n", type);
|
||||||
}
|
|
||||||
|
|
||||||
tb[type] = nla;
|
tb[type] = nla;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
err = 0;
|
||||||
|
errout:
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -683,7 +683,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
|||||||
{
|
{
|
||||||
const struct bpf_core_accessor *acc;
|
const struct bpf_core_accessor *acc;
|
||||||
const struct btf_type *t;
|
const struct btf_type *t;
|
||||||
__u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id, elem_id;
|
__u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
|
||||||
const struct btf_member *m;
|
const struct btf_member *m;
|
||||||
const struct btf_type *mt;
|
const struct btf_type *mt;
|
||||||
bool bitfield;
|
bool bitfield;
|
||||||
@@ -706,14 +706,8 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
|||||||
if (!acc->name) {
|
if (!acc->name) {
|
||||||
if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
|
if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
|
||||||
*val = spec->bit_offset / 8;
|
*val = spec->bit_offset / 8;
|
||||||
/* remember field size for load/store mem size;
|
/* remember field size for load/store mem size */
|
||||||
* note, for arrays we care about individual element
|
sz = btf__resolve_size(spec->btf, acc->type_id);
|
||||||
* sizes, not the overall array size
|
|
||||||
*/
|
|
||||||
t = skip_mods_and_typedefs(spec->btf, acc->type_id, &elem_id);
|
|
||||||
while (btf_is_array(t))
|
|
||||||
t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
|
|
||||||
sz = btf__resolve_size(spec->btf, elem_id);
|
|
||||||
if (sz < 0)
|
if (sz < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
*field_sz = sz;
|
*field_sz = sz;
|
||||||
@@ -773,17 +767,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
|||||||
case BPF_CORE_FIELD_BYTE_OFFSET:
|
case BPF_CORE_FIELD_BYTE_OFFSET:
|
||||||
*val = byte_off;
|
*val = byte_off;
|
||||||
if (!bitfield) {
|
if (!bitfield) {
|
||||||
/* remember field size for load/store mem size;
|
*field_sz = byte_sz;
|
||||||
* note, for arrays we care about individual element
|
|
||||||
* sizes, not the overall array size
|
|
||||||
*/
|
|
||||||
t = skip_mods_and_typedefs(spec->btf, field_type_id, &elem_id);
|
|
||||||
while (btf_is_array(t))
|
|
||||||
t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id);
|
|
||||||
sz = btf__resolve_size(spec->btf, elem_id);
|
|
||||||
if (sz < 0)
|
|
||||||
return -EINVAL;
|
|
||||||
*field_sz = sz;
|
|
||||||
*type_id = field_type_id;
|
*type_id = field_type_id;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -792,7 +776,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
|||||||
break;
|
break;
|
||||||
case BPF_CORE_FIELD_SIGNED:
|
case BPF_CORE_FIELD_SIGNED:
|
||||||
*val = (btf_is_any_enum(mt) && BTF_INFO_KFLAG(mt->info)) ||
|
*val = (btf_is_any_enum(mt) && BTF_INFO_KFLAG(mt->info)) ||
|
||||||
(btf_is_int(mt) && (btf_int_encoding(mt) & BTF_INT_SIGNED));
|
(btf_int_encoding(mt) & BTF_INT_SIGNED);
|
||||||
if (validate)
|
if (validate)
|
||||||
*validate = true; /* signedness is never ambiguous */
|
*validate = true; /* signedness is never ambiguous */
|
||||||
break;
|
break;
|
||||||
@@ -1355,7 +1339,7 @@ int bpf_core_calc_relo_insn(const char *prog_name,
|
|||||||
cands->cands[i].id, cand_spec);
|
cands->cands[i].id, cand_spec);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
|
bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
|
||||||
pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n",
|
pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ",
|
||||||
prog_name, relo_idx, i, spec_buf, err);
|
prog_name, relo_idx, i, spec_buf, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|||||||
168
src/ringbuf.c
168
src/ringbuf.c
@@ -21,7 +21,6 @@
|
|||||||
#include "libbpf.h"
|
#include "libbpf.h"
|
||||||
#include "libbpf_internal.h"
|
#include "libbpf_internal.h"
|
||||||
#include "bpf.h"
|
#include "bpf.h"
|
||||||
#include "str_error.h"
|
|
||||||
|
|
||||||
struct ring {
|
struct ring {
|
||||||
ring_buffer_sample_fn sample_cb;
|
ring_buffer_sample_fn sample_cb;
|
||||||
@@ -35,7 +34,7 @@ struct ring {
|
|||||||
|
|
||||||
struct ring_buffer {
|
struct ring_buffer {
|
||||||
struct epoll_event *events;
|
struct epoll_event *events;
|
||||||
struct ring **rings;
|
struct ring *rings;
|
||||||
size_t page_size;
|
size_t page_size;
|
||||||
int epoll_fd;
|
int epoll_fd;
|
||||||
int ring_cnt;
|
int ring_cnt;
|
||||||
@@ -58,7 +57,7 @@ struct ringbuf_hdr {
|
|||||||
__u32 pad;
|
__u32 pad;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void ringbuf_free_ring(struct ring_buffer *rb, struct ring *r)
|
static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r)
|
||||||
{
|
{
|
||||||
if (r->consumer_pos) {
|
if (r->consumer_pos) {
|
||||||
munmap(r->consumer_pos, rb->page_size);
|
munmap(r->consumer_pos, rb->page_size);
|
||||||
@@ -68,8 +67,6 @@ static void ringbuf_free_ring(struct ring_buffer *rb, struct ring *r)
|
|||||||
munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1));
|
munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1));
|
||||||
r->producer_pos = NULL;
|
r->producer_pos = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
free(r);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add extra RINGBUF maps to this ring buffer manager */
|
/* Add extra RINGBUF maps to this ring buffer manager */
|
||||||
@@ -89,8 +86,8 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
|
|||||||
err = bpf_map_get_info_by_fd(map_fd, &info, &len);
|
err = bpf_map_get_info_by_fd(map_fd, &info, &len);
|
||||||
if (err) {
|
if (err) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("ringbuf: failed to get map info for fd=%d: %s\n",
|
pr_warn("ringbuf: failed to get map info for fd=%d: %d\n",
|
||||||
map_fd, errstr(err));
|
map_fd, err);
|
||||||
return libbpf_err(err);
|
return libbpf_err(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,10 +107,8 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
|
|||||||
return libbpf_err(-ENOMEM);
|
return libbpf_err(-ENOMEM);
|
||||||
rb->events = tmp;
|
rb->events = tmp;
|
||||||
|
|
||||||
r = calloc(1, sizeof(*r));
|
r = &rb->rings[rb->ring_cnt];
|
||||||
if (!r)
|
memset(r, 0, sizeof(*r));
|
||||||
return libbpf_err(-ENOMEM);
|
|
||||||
rb->rings[rb->ring_cnt] = r;
|
|
||||||
|
|
||||||
r->map_fd = map_fd;
|
r->map_fd = map_fd;
|
||||||
r->sample_cb = sample_cb;
|
r->sample_cb = sample_cb;
|
||||||
@@ -124,9 +119,9 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
|
|||||||
tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
|
tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
|
||||||
if (tmp == MAP_FAILED) {
|
if (tmp == MAP_FAILED) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %s\n",
|
pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
|
||||||
map_fd, errstr(err));
|
map_fd, err);
|
||||||
goto err_out;
|
return libbpf_err(err);
|
||||||
}
|
}
|
||||||
r->consumer_pos = tmp;
|
r->consumer_pos = tmp;
|
||||||
|
|
||||||
@@ -136,16 +131,16 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
|
|||||||
*/
|
*/
|
||||||
mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
|
mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
|
||||||
if (mmap_sz != (__u64)(size_t)mmap_sz) {
|
if (mmap_sz != (__u64)(size_t)mmap_sz) {
|
||||||
err = -E2BIG;
|
|
||||||
pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
|
pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
|
||||||
goto err_out;
|
return libbpf_err(-E2BIG);
|
||||||
}
|
}
|
||||||
tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
|
tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
|
||||||
if (tmp == MAP_FAILED) {
|
if (tmp == MAP_FAILED) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %s\n",
|
ringbuf_unmap_ring(rb, r);
|
||||||
map_fd, errstr(err));
|
pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n",
|
||||||
goto err_out;
|
map_fd, err);
|
||||||
|
return libbpf_err(err);
|
||||||
}
|
}
|
||||||
r->producer_pos = tmp;
|
r->producer_pos = tmp;
|
||||||
r->data = tmp + rb->page_size;
|
r->data = tmp + rb->page_size;
|
||||||
@@ -157,17 +152,14 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
|
|||||||
e->data.fd = rb->ring_cnt;
|
e->data.fd = rb->ring_cnt;
|
||||||
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
|
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("ringbuf: failed to epoll add map fd=%d: %s\n",
|
ringbuf_unmap_ring(rb, r);
|
||||||
map_fd, errstr(err));
|
pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n",
|
||||||
goto err_out;
|
map_fd, err);
|
||||||
|
return libbpf_err(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
rb->ring_cnt++;
|
rb->ring_cnt++;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_out:
|
|
||||||
ringbuf_free_ring(rb, r);
|
|
||||||
return libbpf_err(err);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ring_buffer__free(struct ring_buffer *rb)
|
void ring_buffer__free(struct ring_buffer *rb)
|
||||||
@@ -178,7 +170,7 @@ void ring_buffer__free(struct ring_buffer *rb)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < rb->ring_cnt; ++i)
|
for (i = 0; i < rb->ring_cnt; ++i)
|
||||||
ringbuf_free_ring(rb, rb->rings[i]);
|
ringbuf_unmap_ring(rb, &rb->rings[i]);
|
||||||
if (rb->epoll_fd >= 0)
|
if (rb->epoll_fd >= 0)
|
||||||
close(rb->epoll_fd);
|
close(rb->epoll_fd);
|
||||||
|
|
||||||
@@ -206,7 +198,7 @@ ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
|
|||||||
rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
|
rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
|
||||||
if (rb->epoll_fd < 0) {
|
if (rb->epoll_fd < 0) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("ringbuf: failed to create epoll instance: %s\n", errstr(err));
|
pr_warn("ringbuf: failed to create epoll instance: %d\n", err);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -232,7 +224,7 @@ static inline int roundup_len(__u32 len)
|
|||||||
return (len + 7) / 8 * 8;
|
return (len + 7) / 8 * 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int64_t ringbuf_process_ring(struct ring *r, size_t n)
|
static int64_t ringbuf_process_ring(struct ring *r)
|
||||||
{
|
{
|
||||||
int *len_ptr, len, err;
|
int *len_ptr, len, err;
|
||||||
/* 64-bit to avoid overflow in case of extreme application behavior */
|
/* 64-bit to avoid overflow in case of extreme application behavior */
|
||||||
@@ -269,42 +261,12 @@ static int64_t ringbuf_process_ring(struct ring *r, size_t n)
|
|||||||
}
|
}
|
||||||
|
|
||||||
smp_store_release(r->consumer_pos, cons_pos);
|
smp_store_release(r->consumer_pos, cons_pos);
|
||||||
|
|
||||||
if (cnt >= n)
|
|
||||||
goto done;
|
|
||||||
}
|
}
|
||||||
} while (got_new_data);
|
} while (got_new_data);
|
||||||
done:
|
done:
|
||||||
return cnt;
|
return cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Consume available ring buffer(s) data without event polling, up to n
|
|
||||||
* records.
|
|
||||||
*
|
|
||||||
* Returns number of records consumed across all registered ring buffers (or
|
|
||||||
* n, whichever is less), or negative number if any of the callbacks return
|
|
||||||
* error.
|
|
||||||
*/
|
|
||||||
int ring_buffer__consume_n(struct ring_buffer *rb, size_t n)
|
|
||||||
{
|
|
||||||
int64_t err, res = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < rb->ring_cnt; i++) {
|
|
||||||
struct ring *ring = rb->rings[i];
|
|
||||||
|
|
||||||
err = ringbuf_process_ring(ring, n);
|
|
||||||
if (err < 0)
|
|
||||||
return libbpf_err(err);
|
|
||||||
res += err;
|
|
||||||
n -= err;
|
|
||||||
|
|
||||||
if (n == 0)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return res > INT_MAX ? INT_MAX : res;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Consume available ring buffer(s) data without event polling.
|
/* Consume available ring buffer(s) data without event polling.
|
||||||
* Returns number of records consumed across all registered ring buffers (or
|
* Returns number of records consumed across all registered ring buffers (or
|
||||||
* INT_MAX, whichever is less), or negative number if any of the callbacks
|
* INT_MAX, whichever is less), or negative number if any of the callbacks
|
||||||
@@ -316,17 +278,15 @@ int ring_buffer__consume(struct ring_buffer *rb)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < rb->ring_cnt; i++) {
|
for (i = 0; i < rb->ring_cnt; i++) {
|
||||||
struct ring *ring = rb->rings[i];
|
struct ring *ring = &rb->rings[i];
|
||||||
|
|
||||||
err = ringbuf_process_ring(ring, INT_MAX);
|
err = ringbuf_process_ring(ring);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return libbpf_err(err);
|
return libbpf_err(err);
|
||||||
res += err;
|
res += err;
|
||||||
if (res > INT_MAX) {
|
|
||||||
res = INT_MAX;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if (res > INT_MAX)
|
||||||
|
return INT_MAX;
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -345,15 +305,15 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
|
|||||||
|
|
||||||
for (i = 0; i < cnt; i++) {
|
for (i = 0; i < cnt; i++) {
|
||||||
__u32 ring_id = rb->events[i].data.fd;
|
__u32 ring_id = rb->events[i].data.fd;
|
||||||
struct ring *ring = rb->rings[ring_id];
|
struct ring *ring = &rb->rings[ring_id];
|
||||||
|
|
||||||
err = ringbuf_process_ring(ring, INT_MAX);
|
err = ringbuf_process_ring(ring);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return libbpf_err(err);
|
return libbpf_err(err);
|
||||||
res += err;
|
res += err;
|
||||||
}
|
}
|
||||||
if (res > INT_MAX)
|
if (res > INT_MAX)
|
||||||
res = INT_MAX;
|
return INT_MAX;
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -363,63 +323,6 @@ int ring_buffer__epoll_fd(const struct ring_buffer *rb)
|
|||||||
return rb->epoll_fd;
|
return rb->epoll_fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ring *ring_buffer__ring(struct ring_buffer *rb, unsigned int idx)
|
|
||||||
{
|
|
||||||
if (idx >= rb->ring_cnt)
|
|
||||||
return errno = ERANGE, NULL;
|
|
||||||
|
|
||||||
return rb->rings[idx];
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned long ring__consumer_pos(const struct ring *r)
|
|
||||||
{
|
|
||||||
/* Synchronizes with smp_store_release() in ringbuf_process_ring(). */
|
|
||||||
return smp_load_acquire(r->consumer_pos);
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned long ring__producer_pos(const struct ring *r)
|
|
||||||
{
|
|
||||||
/* Synchronizes with smp_store_release() in __bpf_ringbuf_reserve() in
|
|
||||||
* the kernel.
|
|
||||||
*/
|
|
||||||
return smp_load_acquire(r->producer_pos);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ring__avail_data_size(const struct ring *r)
|
|
||||||
{
|
|
||||||
unsigned long cons_pos, prod_pos;
|
|
||||||
|
|
||||||
cons_pos = ring__consumer_pos(r);
|
|
||||||
prod_pos = ring__producer_pos(r);
|
|
||||||
return prod_pos - cons_pos;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ring__size(const struct ring *r)
|
|
||||||
{
|
|
||||||
return r->mask + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ring__map_fd(const struct ring *r)
|
|
||||||
{
|
|
||||||
return r->map_fd;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ring__consume_n(struct ring *r, size_t n)
|
|
||||||
{
|
|
||||||
int64_t res;
|
|
||||||
|
|
||||||
res = ringbuf_process_ring(r, n);
|
|
||||||
if (res < 0)
|
|
||||||
return libbpf_err(res);
|
|
||||||
|
|
||||||
return res > INT_MAX ? INT_MAX : res;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ring__consume(struct ring *r)
|
|
||||||
{
|
|
||||||
return ring__consume_n(r, INT_MAX);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
|
static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
|
||||||
{
|
{
|
||||||
if (rb->consumer_pos) {
|
if (rb->consumer_pos) {
|
||||||
@@ -459,8 +362,7 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
|
|||||||
err = bpf_map_get_info_by_fd(map_fd, &info, &len);
|
err = bpf_map_get_info_by_fd(map_fd, &info, &len);
|
||||||
if (err) {
|
if (err) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("user ringbuf: failed to get map info for fd=%d: %s\n",
|
pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err);
|
||||||
map_fd, errstr(err));
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -476,8 +378,8 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
|
|||||||
tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0);
|
tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0);
|
||||||
if (tmp == MAP_FAILED) {
|
if (tmp == MAP_FAILED) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %s\n",
|
pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
|
||||||
map_fd, errstr(err));
|
map_fd, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
rb->consumer_pos = tmp;
|
rb->consumer_pos = tmp;
|
||||||
@@ -496,8 +398,8 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
|
|||||||
map_fd, rb->page_size);
|
map_fd, rb->page_size);
|
||||||
if (tmp == MAP_FAILED) {
|
if (tmp == MAP_FAILED) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %s\n",
|
pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %d\n",
|
||||||
map_fd, errstr(err));
|
map_fd, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -508,7 +410,7 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
|
|||||||
rb_epoll->events = EPOLLOUT;
|
rb_epoll->events = EPOLLOUT;
|
||||||
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) {
|
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("user ringbuf: failed to epoll add map fd=%d: %s\n", map_fd, errstr(err));
|
pr_warn("user ringbuf: failed to epoll add map fd=%d: %d\n", map_fd, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -533,7 +435,7 @@ user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts)
|
|||||||
rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
|
rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
|
||||||
if (rb->epoll_fd < 0) {
|
if (rb->epoll_fd < 0) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("user ringbuf: failed to create epoll instance: %s\n", errstr(err));
|
pr_warn("user ringbuf: failed to create epoll instance: %d\n", err);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ static inline void skel_free(const void *p)
|
|||||||
* The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
|
* The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
|
||||||
* skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
|
* skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
|
||||||
* does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
|
* does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
|
||||||
* is not necessary.
|
* is not nessary.
|
||||||
*
|
*
|
||||||
* For user space:
|
* For user space:
|
||||||
* skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
|
* skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
|
||||||
@@ -351,11 +351,10 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
|
|||||||
attr.test.ctx_size_in = opts->ctx->sz;
|
attr.test.ctx_size_in = opts->ctx->sz;
|
||||||
err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz);
|
err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz);
|
||||||
if (err < 0 || (int)attr.test.retval < 0) {
|
if (err < 0 || (int)attr.test.retval < 0) {
|
||||||
|
opts->errstr = "failed to execute loader prog";
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
opts->errstr = "failed to execute loader prog";
|
|
||||||
set_err;
|
set_err;
|
||||||
} else {
|
} else {
|
||||||
opts->errstr = "error returned by loader prog";
|
|
||||||
err = (int)attr.test.retval;
|
err = (int)attr.test.retval;
|
||||||
#ifndef __KERNEL__
|
#ifndef __KERNEL__
|
||||||
errno = -err;
|
errno = -err;
|
||||||
|
|||||||
@@ -2,13 +2,8 @@
|
|||||||
#undef _GNU_SOURCE
|
#undef _GNU_SOURCE
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <errno.h>
|
|
||||||
#include "str_error.h"
|
#include "str_error.h"
|
||||||
|
|
||||||
#ifndef ENOTSUPP
|
|
||||||
#define ENOTSUPP 524
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* make sure libbpf doesn't use kernel-only integer typedefs */
|
/* make sure libbpf doesn't use kernel-only integer typedefs */
|
||||||
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
|
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
|
||||||
|
|
||||||
@@ -20,85 +15,7 @@
|
|||||||
char *libbpf_strerror_r(int err, char *dst, int len)
|
char *libbpf_strerror_r(int err, char *dst, int len)
|
||||||
{
|
{
|
||||||
int ret = strerror_r(err < 0 ? -err : err, dst, len);
|
int ret = strerror_r(err < 0 ? -err : err, dst, len);
|
||||||
/* on glibc <2.13, ret == -1 and errno is set, if strerror_r() can't
|
if (ret)
|
||||||
* handle the error, on glibc >=2.13 *positive* (errno-like) error
|
snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
|
||||||
* code is returned directly
|
|
||||||
*/
|
|
||||||
if (ret == -1)
|
|
||||||
ret = errno;
|
|
||||||
if (ret) {
|
|
||||||
if (ret == EINVAL)
|
|
||||||
/* strerror_r() doesn't recognize this specific error */
|
|
||||||
snprintf(dst, len, "unknown error (%d)", err < 0 ? err : -err);
|
|
||||||
else
|
|
||||||
snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
|
|
||||||
}
|
|
||||||
return dst;
|
return dst;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char *libbpf_errstr(int err)
|
|
||||||
{
|
|
||||||
static __thread char buf[12];
|
|
||||||
|
|
||||||
if (err > 0)
|
|
||||||
err = -err;
|
|
||||||
|
|
||||||
switch (err) {
|
|
||||||
case -E2BIG: return "-E2BIG";
|
|
||||||
case -EACCES: return "-EACCES";
|
|
||||||
case -EADDRINUSE: return "-EADDRINUSE";
|
|
||||||
case -EADDRNOTAVAIL: return "-EADDRNOTAVAIL";
|
|
||||||
case -EAGAIN: return "-EAGAIN";
|
|
||||||
case -EALREADY: return "-EALREADY";
|
|
||||||
case -EBADF: return "-EBADF";
|
|
||||||
case -EBADFD: return "-EBADFD";
|
|
||||||
case -EBUSY: return "-EBUSY";
|
|
||||||
case -ECANCELED: return "-ECANCELED";
|
|
||||||
case -ECHILD: return "-ECHILD";
|
|
||||||
case -EDEADLK: return "-EDEADLK";
|
|
||||||
case -EDOM: return "-EDOM";
|
|
||||||
case -EEXIST: return "-EEXIST";
|
|
||||||
case -EFAULT: return "-EFAULT";
|
|
||||||
case -EFBIG: return "-EFBIG";
|
|
||||||
case -EILSEQ: return "-EILSEQ";
|
|
||||||
case -EINPROGRESS: return "-EINPROGRESS";
|
|
||||||
case -EINTR: return "-EINTR";
|
|
||||||
case -EINVAL: return "-EINVAL";
|
|
||||||
case -EIO: return "-EIO";
|
|
||||||
case -EISDIR: return "-EISDIR";
|
|
||||||
case -ELOOP: return "-ELOOP";
|
|
||||||
case -EMFILE: return "-EMFILE";
|
|
||||||
case -EMLINK: return "-EMLINK";
|
|
||||||
case -EMSGSIZE: return "-EMSGSIZE";
|
|
||||||
case -ENAMETOOLONG: return "-ENAMETOOLONG";
|
|
||||||
case -ENFILE: return "-ENFILE";
|
|
||||||
case -ENODATA: return "-ENODATA";
|
|
||||||
case -ENODEV: return "-ENODEV";
|
|
||||||
case -ENOENT: return "-ENOENT";
|
|
||||||
case -ENOEXEC: return "-ENOEXEC";
|
|
||||||
case -ENOLINK: return "-ENOLINK";
|
|
||||||
case -ENOMEM: return "-ENOMEM";
|
|
||||||
case -ENOSPC: return "-ENOSPC";
|
|
||||||
case -ENOTBLK: return "-ENOTBLK";
|
|
||||||
case -ENOTDIR: return "-ENOTDIR";
|
|
||||||
case -ENOTSUPP: return "-ENOTSUPP";
|
|
||||||
case -ENOTTY: return "-ENOTTY";
|
|
||||||
case -ENXIO: return "-ENXIO";
|
|
||||||
case -EOPNOTSUPP: return "-EOPNOTSUPP";
|
|
||||||
case -EOVERFLOW: return "-EOVERFLOW";
|
|
||||||
case -EPERM: return "-EPERM";
|
|
||||||
case -EPIPE: return "-EPIPE";
|
|
||||||
case -EPROTO: return "-EPROTO";
|
|
||||||
case -EPROTONOSUPPORT: return "-EPROTONOSUPPORT";
|
|
||||||
case -ERANGE: return "-ERANGE";
|
|
||||||
case -EROFS: return "-EROFS";
|
|
||||||
case -ESPIPE: return "-ESPIPE";
|
|
||||||
case -ESRCH: return "-ESRCH";
|
|
||||||
case -ETXTBSY: return "-ETXTBSY";
|
|
||||||
case -EUCLEAN: return "-EUCLEAN";
|
|
||||||
case -EXDEV: return "-EXDEV";
|
|
||||||
default:
|
|
||||||
snprintf(buf, sizeof(buf), "%d", err);
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -2,18 +2,5 @@
|
|||||||
#ifndef __LIBBPF_STR_ERROR_H
|
#ifndef __LIBBPF_STR_ERROR_H
|
||||||
#define __LIBBPF_STR_ERROR_H
|
#define __LIBBPF_STR_ERROR_H
|
||||||
|
|
||||||
#define STRERR_BUFSIZE 128
|
|
||||||
|
|
||||||
char *libbpf_strerror_r(int err, char *dst, int len);
|
char *libbpf_strerror_r(int err, char *dst, int len);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief **libbpf_errstr()** returns string corresponding to numeric errno
|
|
||||||
* @param err negative numeric errno
|
|
||||||
* @return pointer to string representation of the errno, that is invalidated
|
|
||||||
* upon the next call.
|
|
||||||
*/
|
|
||||||
const char *libbpf_errstr(int err);
|
|
||||||
|
|
||||||
#define errstr(err) libbpf_errstr(err)
|
|
||||||
|
|
||||||
#endif /* __LIBBPF_STR_ERROR_H */
|
#endif /* __LIBBPF_STR_ERROR_H */
|
||||||
|
|||||||
@@ -4,8 +4,8 @@
|
|||||||
#define __USDT_BPF_H__
|
#define __USDT_BPF_H__
|
||||||
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include "bpf_helpers.h"
|
#include <bpf/bpf_helpers.h>
|
||||||
#include "bpf_tracing.h"
|
#include <bpf/bpf_tracing.h>
|
||||||
|
|
||||||
/* Below types and maps are internal implementation details of libbpf's USDT
|
/* Below types and maps are internal implementation details of libbpf's USDT
|
||||||
* support and are subjects to change. Also, bpf_usdt_xxx() API helpers should
|
* support and are subjects to change. Also, bpf_usdt_xxx() API helpers should
|
||||||
@@ -39,7 +39,7 @@ enum __bpf_usdt_arg_type {
|
|||||||
struct __bpf_usdt_arg_spec {
|
struct __bpf_usdt_arg_spec {
|
||||||
/* u64 scalar interpreted depending on arg_type, see below */
|
/* u64 scalar interpreted depending on arg_type, see below */
|
||||||
__u64 val_off;
|
__u64 val_off;
|
||||||
/* arg location case, see bpf_usdt_arg() for details */
|
/* arg location case, see bpf_udst_arg() for details */
|
||||||
enum __bpf_usdt_arg_type arg_type;
|
enum __bpf_usdt_arg_type arg_type;
|
||||||
/* offset of referenced register within struct pt_regs */
|
/* offset of referenced register within struct pt_regs */
|
||||||
short reg_off;
|
short reg_off;
|
||||||
@@ -108,38 +108,6 @@ int bpf_usdt_arg_cnt(struct pt_regs *ctx)
|
|||||||
return spec->arg_cnt;
|
return spec->arg_cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns the size in bytes of the #*arg_num* (zero-indexed) USDT argument.
|
|
||||||
* Returns negative error if argument is not found or arg_num is invalid.
|
|
||||||
*/
|
|
||||||
static __always_inline
|
|
||||||
int bpf_usdt_arg_size(struct pt_regs *ctx, __u64 arg_num)
|
|
||||||
{
|
|
||||||
struct __bpf_usdt_arg_spec *arg_spec;
|
|
||||||
struct __bpf_usdt_spec *spec;
|
|
||||||
int spec_id;
|
|
||||||
|
|
||||||
spec_id = __bpf_usdt_spec_id(ctx);
|
|
||||||
if (spec_id < 0)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
|
|
||||||
if (!spec)
|
|
||||||
return -ESRCH;
|
|
||||||
|
|
||||||
if (arg_num >= BPF_USDT_MAX_ARG_CNT)
|
|
||||||
return -ENOENT;
|
|
||||||
barrier_var(arg_num);
|
|
||||||
if (arg_num >= spec->arg_cnt)
|
|
||||||
return -ENOENT;
|
|
||||||
|
|
||||||
arg_spec = &spec->args[arg_num];
|
|
||||||
|
|
||||||
/* arg_spec->arg_bitshift = 64 - arg_sz * 8
|
|
||||||
* so: arg_sz = (64 - arg_spec->arg_bitshift) / 8
|
|
||||||
*/
|
|
||||||
return (unsigned int)(64 - arg_spec->arg_bitshift) / 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Fetch USDT argument #*arg_num* (zero-indexed) and put its value into *res.
|
/* Fetch USDT argument #*arg_num* (zero-indexed) and put its value into *res.
|
||||||
* Returns 0 on success; negative error, otherwise.
|
* Returns 0 on success; negative error, otherwise.
|
||||||
* On error *res is guaranteed to be set to zero.
|
* On error *res is guaranteed to be set to zero.
|
||||||
@@ -246,18 +214,18 @@ long bpf_usdt_cookie(struct pt_regs *ctx)
|
|||||||
|
|
||||||
/* we rely on ___bpf_apply() and ___bpf_narg() macros already defined in bpf_tracing.h */
|
/* we rely on ___bpf_apply() and ___bpf_narg() macros already defined in bpf_tracing.h */
|
||||||
#define ___bpf_usdt_args0() ctx
|
#define ___bpf_usdt_args0() ctx
|
||||||
#define ___bpf_usdt_args1(x) ___bpf_usdt_args0(), ({ long _x; bpf_usdt_arg(ctx, 0, &_x); _x; })
|
#define ___bpf_usdt_args1(x) ___bpf_usdt_args0(), ({ long _x; bpf_usdt_arg(ctx, 0, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args2(x, args...) ___bpf_usdt_args1(args), ({ long _x; bpf_usdt_arg(ctx, 1, &_x); _x; })
|
#define ___bpf_usdt_args2(x, args...) ___bpf_usdt_args1(args), ({ long _x; bpf_usdt_arg(ctx, 1, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args3(x, args...) ___bpf_usdt_args2(args), ({ long _x; bpf_usdt_arg(ctx, 2, &_x); _x; })
|
#define ___bpf_usdt_args3(x, args...) ___bpf_usdt_args2(args), ({ long _x; bpf_usdt_arg(ctx, 2, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args4(x, args...) ___bpf_usdt_args3(args), ({ long _x; bpf_usdt_arg(ctx, 3, &_x); _x; })
|
#define ___bpf_usdt_args4(x, args...) ___bpf_usdt_args3(args), ({ long _x; bpf_usdt_arg(ctx, 3, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args5(x, args...) ___bpf_usdt_args4(args), ({ long _x; bpf_usdt_arg(ctx, 4, &_x); _x; })
|
#define ___bpf_usdt_args5(x, args...) ___bpf_usdt_args4(args), ({ long _x; bpf_usdt_arg(ctx, 4, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args6(x, args...) ___bpf_usdt_args5(args), ({ long _x; bpf_usdt_arg(ctx, 5, &_x); _x; })
|
#define ___bpf_usdt_args6(x, args...) ___bpf_usdt_args5(args), ({ long _x; bpf_usdt_arg(ctx, 5, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args7(x, args...) ___bpf_usdt_args6(args), ({ long _x; bpf_usdt_arg(ctx, 6, &_x); _x; })
|
#define ___bpf_usdt_args7(x, args...) ___bpf_usdt_args6(args), ({ long _x; bpf_usdt_arg(ctx, 6, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args8(x, args...) ___bpf_usdt_args7(args), ({ long _x; bpf_usdt_arg(ctx, 7, &_x); _x; })
|
#define ___bpf_usdt_args8(x, args...) ___bpf_usdt_args7(args), ({ long _x; bpf_usdt_arg(ctx, 7, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args9(x, args...) ___bpf_usdt_args8(args), ({ long _x; bpf_usdt_arg(ctx, 8, &_x); _x; })
|
#define ___bpf_usdt_args9(x, args...) ___bpf_usdt_args8(args), ({ long _x; bpf_usdt_arg(ctx, 8, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args10(x, args...) ___bpf_usdt_args9(args), ({ long _x; bpf_usdt_arg(ctx, 9, &_x); _x; })
|
#define ___bpf_usdt_args10(x, args...) ___bpf_usdt_args9(args), ({ long _x; bpf_usdt_arg(ctx, 9, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args11(x, args...) ___bpf_usdt_args10(args), ({ long _x; bpf_usdt_arg(ctx, 10, &_x); _x; })
|
#define ___bpf_usdt_args11(x, args...) ___bpf_usdt_args10(args), ({ long _x; bpf_usdt_arg(ctx, 10, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args12(x, args...) ___bpf_usdt_args11(args), ({ long _x; bpf_usdt_arg(ctx, 11, &_x); _x; })
|
#define ___bpf_usdt_args12(x, args...) ___bpf_usdt_args11(args), ({ long _x; bpf_usdt_arg(ctx, 11, &_x); (void *)_x; })
|
||||||
#define ___bpf_usdt_args(args...) ___bpf_apply(___bpf_usdt_args, ___bpf_narg(args))(args)
|
#define ___bpf_usdt_args(args...) ___bpf_apply(___bpf_usdt_args, ___bpf_narg(args))(args)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
142
src/usdt.c
142
src/usdt.c
@@ -20,7 +20,6 @@
|
|||||||
#include "libbpf_common.h"
|
#include "libbpf_common.h"
|
||||||
#include "libbpf_internal.h"
|
#include "libbpf_internal.h"
|
||||||
#include "hashmap.h"
|
#include "hashmap.h"
|
||||||
#include "str_error.h"
|
|
||||||
|
|
||||||
/* libbpf's USDT support consists of BPF-side state/code and user-space
|
/* libbpf's USDT support consists of BPF-side state/code and user-space
|
||||||
* state/code working together in concert. BPF-side parts are defined in
|
* state/code working together in concert. BPF-side parts are defined in
|
||||||
@@ -251,7 +250,6 @@ struct usdt_manager {
|
|||||||
|
|
||||||
bool has_bpf_cookie;
|
bool has_bpf_cookie;
|
||||||
bool has_sema_refcnt;
|
bool has_sema_refcnt;
|
||||||
bool has_uprobe_multi;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
|
struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
|
||||||
@@ -286,11 +284,6 @@ struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
|
|||||||
*/
|
*/
|
||||||
man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0;
|
man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0;
|
||||||
|
|
||||||
/*
|
|
||||||
* Detect kernel support for uprobe multi link to be used for attaching
|
|
||||||
* usdt probes.
|
|
||||||
*/
|
|
||||||
man->has_uprobe_multi = kernel_supports(obj, FEAT_UPROBE_MULTI_LINK);
|
|
||||||
return man;
|
return man;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -466,8 +459,8 @@ static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs,
|
|||||||
goto proceed;
|
goto proceed;
|
||||||
|
|
||||||
if (!realpath(lib_path, path)) {
|
if (!realpath(lib_path, path)) {
|
||||||
pr_warn("usdt: failed to get absolute path of '%s' (err %s), using path as is...\n",
|
pr_warn("usdt: failed to get absolute path of '%s' (err %d), using path as is...\n",
|
||||||
lib_path, errstr(-errno));
|
lib_path, -errno);
|
||||||
libbpf_strlcpy(path, lib_path, sizeof(path));
|
libbpf_strlcpy(path, lib_path, sizeof(path));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -476,8 +469,8 @@ proceed:
|
|||||||
f = fopen(line, "re");
|
f = fopen(line, "re");
|
||||||
if (!f) {
|
if (!f) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("usdt: failed to open '%s' to get base addr of '%s': %s\n",
|
pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n",
|
||||||
line, lib_path, errstr(err));
|
line, lib_path, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -607,8 +600,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
|||||||
|
|
||||||
err = parse_elf_segs(elf, path, &segs, &seg_cnt);
|
err = parse_elf_segs(elf, path, &segs, &seg_cnt);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_warn("usdt: failed to process ELF program segments for '%s': %s\n",
|
pr_warn("usdt: failed to process ELF program segments for '%s': %d\n", path, err);
|
||||||
path, errstr(err));
|
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -661,7 +653,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
|||||||
* [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
|
* [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
|
||||||
*/
|
*/
|
||||||
usdt_abs_ip = note.loc_addr;
|
usdt_abs_ip = note.loc_addr;
|
||||||
if (base_addr && note.base_addr)
|
if (base_addr)
|
||||||
usdt_abs_ip += base_addr - note.base_addr;
|
usdt_abs_ip += base_addr - note.base_addr;
|
||||||
|
|
||||||
/* When attaching uprobes (which is what USDTs basically are)
|
/* When attaching uprobes (which is what USDTs basically are)
|
||||||
@@ -710,8 +702,8 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
|||||||
if (vma_seg_cnt == 0) {
|
if (vma_seg_cnt == 0) {
|
||||||
err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
|
err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %s\n",
|
pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n",
|
||||||
pid, path, errstr(err));
|
pid, path, err);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -816,8 +808,6 @@ struct bpf_link_usdt {
|
|||||||
long abs_ip;
|
long abs_ip;
|
||||||
struct bpf_link *link;
|
struct bpf_link *link;
|
||||||
} *uprobes;
|
} *uprobes;
|
||||||
|
|
||||||
struct bpf_link *multi_link;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int bpf_link_usdt_detach(struct bpf_link *link)
|
static int bpf_link_usdt_detach(struct bpf_link *link)
|
||||||
@@ -826,9 +816,6 @@ static int bpf_link_usdt_detach(struct bpf_link *link)
|
|||||||
struct usdt_manager *man = usdt_link->usdt_man;
|
struct usdt_manager *man = usdt_link->usdt_man;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
bpf_link__destroy(usdt_link->multi_link);
|
|
||||||
|
|
||||||
/* When having multi_link, uprobe_cnt is 0 */
|
|
||||||
for (i = 0; i < usdt_link->uprobe_cnt; i++) {
|
for (i = 0; i < usdt_link->uprobe_cnt; i++) {
|
||||||
/* detach underlying uprobe link */
|
/* detach underlying uprobe link */
|
||||||
bpf_link__destroy(usdt_link->uprobes[i].link);
|
bpf_link__destroy(usdt_link->uprobes[i].link);
|
||||||
@@ -959,24 +946,32 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
|
|||||||
const char *usdt_provider, const char *usdt_name,
|
const char *usdt_provider, const char *usdt_name,
|
||||||
__u64 usdt_cookie)
|
__u64 usdt_cookie)
|
||||||
{
|
{
|
||||||
unsigned long *offsets = NULL, *ref_ctr_offsets = NULL;
|
int i, fd, err, spec_map_fd, ip_map_fd;
|
||||||
int i, err, spec_map_fd, ip_map_fd;
|
|
||||||
LIBBPF_OPTS(bpf_uprobe_opts, opts);
|
LIBBPF_OPTS(bpf_uprobe_opts, opts);
|
||||||
struct hashmap *specs_hash = NULL;
|
struct hashmap *specs_hash = NULL;
|
||||||
struct bpf_link_usdt *link = NULL;
|
struct bpf_link_usdt *link = NULL;
|
||||||
struct usdt_target *targets = NULL;
|
struct usdt_target *targets = NULL;
|
||||||
__u64 *cookies = NULL;
|
|
||||||
struct elf_fd elf_fd;
|
|
||||||
size_t target_cnt;
|
size_t target_cnt;
|
||||||
|
Elf *elf;
|
||||||
|
|
||||||
spec_map_fd = bpf_map__fd(man->specs_map);
|
spec_map_fd = bpf_map__fd(man->specs_map);
|
||||||
ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
|
ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
|
||||||
|
|
||||||
err = elf_open(path, &elf_fd);
|
fd = open(path, O_RDONLY | O_CLOEXEC);
|
||||||
if (err)
|
if (fd < 0) {
|
||||||
|
err = -errno;
|
||||||
|
pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err);
|
||||||
return libbpf_err_ptr(err);
|
return libbpf_err_ptr(err);
|
||||||
|
}
|
||||||
|
|
||||||
err = sanity_check_usdt_elf(elf_fd.elf, path);
|
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
|
||||||
|
if (!elf) {
|
||||||
|
err = -EBADF;
|
||||||
|
pr_warn("usdt: failed to parse ELF binary '%s': %s\n", path, elf_errmsg(-1));
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sanity_check_usdt_elf(elf, path);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
@@ -989,7 +984,7 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
|
|||||||
/* discover USDT in given binary, optionally limiting
|
/* discover USDT in given binary, optionally limiting
|
||||||
* activations to a given PID, if pid > 0
|
* activations to a given PID, if pid > 0
|
||||||
*/
|
*/
|
||||||
err = collect_usdt_targets(man, elf_fd.elf, path, pid, usdt_provider, usdt_name,
|
err = collect_usdt_targets(man, elf, path, pid, usdt_provider, usdt_name,
|
||||||
usdt_cookie, &targets, &target_cnt);
|
usdt_cookie, &targets, &target_cnt);
|
||||||
if (err <= 0) {
|
if (err <= 0) {
|
||||||
err = (err == 0) ? -ENOENT : err;
|
err = (err == 0) ? -ENOENT : err;
|
||||||
@@ -1012,21 +1007,10 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
|
|||||||
link->link.detach = &bpf_link_usdt_detach;
|
link->link.detach = &bpf_link_usdt_detach;
|
||||||
link->link.dealloc = &bpf_link_usdt_dealloc;
|
link->link.dealloc = &bpf_link_usdt_dealloc;
|
||||||
|
|
||||||
if (man->has_uprobe_multi) {
|
link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
|
||||||
offsets = calloc(target_cnt, sizeof(*offsets));
|
if (!link->uprobes) {
|
||||||
cookies = calloc(target_cnt, sizeof(*cookies));
|
err = -ENOMEM;
|
||||||
ref_ctr_offsets = calloc(target_cnt, sizeof(*ref_ctr_offsets));
|
goto err_out;
|
||||||
|
|
||||||
if (!offsets || !ref_ctr_offsets || !cookies) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
|
|
||||||
if (!link->uprobes) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < target_cnt; i++) {
|
for (i = 0; i < target_cnt; i++) {
|
||||||
@@ -1049,8 +1033,8 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
|
|||||||
|
|
||||||
if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) {
|
if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) {
|
||||||
err = -errno;
|
err = -errno;
|
||||||
pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %s\n",
|
pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %d\n",
|
||||||
spec_id, usdt_provider, usdt_name, path, errstr(err));
|
spec_id, usdt_provider, usdt_name, path, err);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
if (!man->has_bpf_cookie &&
|
if (!man->has_bpf_cookie &&
|
||||||
@@ -1060,72 +1044,44 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
|
|||||||
pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n",
|
pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n",
|
||||||
spec_id, usdt_provider, usdt_name, path);
|
spec_id, usdt_provider, usdt_name, path);
|
||||||
} else {
|
} else {
|
||||||
pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %s\n",
|
pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %d\n",
|
||||||
target->abs_ip, spec_id, usdt_provider, usdt_name,
|
target->abs_ip, spec_id, usdt_provider, usdt_name,
|
||||||
path, errstr(err));
|
path, err);
|
||||||
}
|
}
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (man->has_uprobe_multi) {
|
opts.ref_ctr_offset = target->sema_off;
|
||||||
offsets[i] = target->rel_ip;
|
opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
|
||||||
ref_ctr_offsets[i] = target->sema_off;
|
uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
|
||||||
cookies[i] = spec_id;
|
target->rel_ip, &opts);
|
||||||
} else {
|
err = libbpf_get_error(uprobe_link);
|
||||||
opts.ref_ctr_offset = target->sema_off;
|
if (err) {
|
||||||
opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
|
pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n",
|
||||||
uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
|
i, usdt_provider, usdt_name, path, err);
|
||||||
target->rel_ip, &opts);
|
|
||||||
err = libbpf_get_error(uprobe_link);
|
|
||||||
if (err) {
|
|
||||||
pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %s\n",
|
|
||||||
i, usdt_provider, usdt_name, path, errstr(err));
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
link->uprobes[i].link = uprobe_link;
|
|
||||||
link->uprobes[i].abs_ip = target->abs_ip;
|
|
||||||
link->uprobe_cnt++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (man->has_uprobe_multi) {
|
|
||||||
LIBBPF_OPTS(bpf_uprobe_multi_opts, opts_multi,
|
|
||||||
.ref_ctr_offsets = ref_ctr_offsets,
|
|
||||||
.offsets = offsets,
|
|
||||||
.cookies = cookies,
|
|
||||||
.cnt = target_cnt,
|
|
||||||
);
|
|
||||||
|
|
||||||
link->multi_link = bpf_program__attach_uprobe_multi(prog, pid, path,
|
|
||||||
NULL, &opts_multi);
|
|
||||||
if (!link->multi_link) {
|
|
||||||
err = -errno;
|
|
||||||
pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %s\n",
|
|
||||||
usdt_provider, usdt_name, path, errstr(err));
|
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
free(offsets);
|
link->uprobes[i].link = uprobe_link;
|
||||||
free(ref_ctr_offsets);
|
link->uprobes[i].abs_ip = target->abs_ip;
|
||||||
free(cookies);
|
link->uprobe_cnt++;
|
||||||
}
|
}
|
||||||
|
|
||||||
free(targets);
|
free(targets);
|
||||||
hashmap__free(specs_hash);
|
hashmap__free(specs_hash);
|
||||||
elf_close(&elf_fd);
|
elf_end(elf);
|
||||||
|
close(fd);
|
||||||
|
|
||||||
return &link->link;
|
return &link->link;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
free(offsets);
|
|
||||||
free(ref_ctr_offsets);
|
|
||||||
free(cookies);
|
|
||||||
|
|
||||||
if (link)
|
if (link)
|
||||||
bpf_link__destroy(&link->link);
|
bpf_link__destroy(&link->link);
|
||||||
free(targets);
|
free(targets);
|
||||||
hashmap__free(specs_hash);
|
hashmap__free(specs_hash);
|
||||||
elf_close(&elf_fd);
|
if (elf)
|
||||||
|
elf_end(elf);
|
||||||
|
close(fd);
|
||||||
return libbpf_err_ptr(err);
|
return libbpf_err_ptr(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -223,7 +223,7 @@ struct zip_archive *zip_archive_open(const char *path)
|
|||||||
if (!archive) {
|
if (!archive) {
|
||||||
munmap(data, size);
|
munmap(data, size);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
};
|
||||||
|
|
||||||
archive->data = data;
|
archive->data = data;
|
||||||
archive->size = size;
|
archive->size = size;
|
||||||
|
|||||||
Reference in New Issue
Block a user