Compare commits

..

1 Commits

Author SHA1 Message Date
thiagoftsm
6d39b27949 netdata_patch_1_4_6: Add patch to run on Debian 10 2024-09-04 01:30:47 +00:00
32 changed files with 5047 additions and 10718 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -70,7 +70,7 @@ jobs:
- run: source /tmp/ci_setup && sudo -E $CI_ROOT/managers/ubuntu.sh
if: matrix.arch == 'x86'
name: Setup
- uses: uraimo/run-on-arch-action@v2.8.1
- uses: uraimo/run-on-arch-action@v2.7.1
name: Build in docker
if: matrix.arch != 'x86'
with:

View File

@@ -33,7 +33,7 @@ jobs:
dry-run: false
sanitizer: ${{ matrix.sanitizer }}
- name: Upload Crash
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v1
if: failure() && steps.build.outcome == 'success'
with:
name: ${{ matrix.sanitizer }}-artifacts

View File

@@ -27,6 +27,9 @@ jobs:
- kernel: '4.9.0'
runs_on: ubuntu-24.04
arch: 'x86_64'
- kernel: 'LATEST'
runs_on: ["s390x", "docker-noble-main"]
arch: 's390x'
steps:
- uses: actions/checkout@v4
name: Checkout

View File

@@ -1 +1 @@
d5fb316e2af1d947f0f6c3666e373a54d9f27c6f
b408473ea01b2e499d23503e2bf898416da9d7ac

View File

@@ -1 +1 @@
c6fb8030b4baa01c850f99fc6da051b1017edc46
2ad6d23f465a4f851e3bcf6d74c315ce7b2c205b

View File

@@ -1,58 +0,0 @@
From affb32e4f056883f285f8535b766293b85752fb4 Mon Sep 17 00:00:00 2001
From: Jiri Olsa <jolsa@kernel.org>
Date: Tue, 24 Sep 2024 13:07:30 +0200
Subject: [PATCH] selftests/bpf: Fix uprobe consumer test
With newly merged code the uprobe behaviour is slightly different
and affects uprobe consumer test.
We no longer need to check if the uprobe object is still preserved
after removing last uretprobe, because it stays as long as there's
pending/installed uretprobe instance.
This allows to run uretprobe consumers registered 'after' uprobe was
hit even if previous uretprobe got unregistered before being hit.
The uprobe object will be now removed after the last uprobe ref is
released and in such case it's held by ri->uprobe (return instance)
which is released after the uretprobe is hit.
Reported-by: Ihor Solodrai <ihor.solodrai@pm.me>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Tested-by: Ihor Solodrai <ihor.solodrai@pm.me>
Closes: https://lore.kernel.org/bpf/w6U8Z9fdhjnkSp2UaFaV1fGqJXvfLEtDKEUyGDkwmoruDJ_AgF_c0FFhrkeKW18OqiP-05s9yDKiT6X-Ns-avN_ABf0dcUkXqbSJN1TQSXo=@pm.me/
---
.../testing/selftests/bpf/prog_tests/uprobe_multi_test.c | 9 +--------
1 file changed, 1 insertion(+), 8 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index 844f6fc8487b..c1ac813ff9ba 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -869,21 +869,14 @@ static void consumer_test(struct uprobe_multi_consumers *skel,
fmt = "prog 0/1: uprobe";
} else {
/*
- * uprobe return is tricky ;-)
- *
* to trigger uretprobe consumer, the uretprobe needs to be installed,
* which means one of the 'return' uprobes was alive when probe was hit:
*
* idxs: 2/3 uprobe return in 'installed' mask
- *
- * in addition if 'after' state removes everything that was installed in
- * 'before' state, then uprobe kernel object goes away and return uprobe
- * is not installed and we won't hit it even if it's in 'after' state.
*/
unsigned long had_uretprobes = before & 0b1100; /* is uretprobe installed */
- unsigned long probe_preserved = before & after; /* did uprobe go away */
- if (had_uretprobes && probe_preserved && test_bit(idx, after))
+ if (had_uretprobes && test_bit(idx, after))
val++;
fmt = "idx 2/3: uretprobe";
}
--
2.34.1

View File

@@ -121,8 +121,6 @@ described in more detail in the footnotes.
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_LWT_XMIT`` | | ``lwt_xmit`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_NETFILTER`` | | ``netfilter`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_PERF_EVENT`` | | ``perf_event`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE`` | | ``raw_tp.w+`` [#rawtp]_ | |
@@ -133,23 +131,11 @@ described in more detail in the footnotes.
+ + +----------------------------------+-----------+
| | | ``raw_tracepoint+`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` [#tc_legacy]_ | |
| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` [#tc_legacy]_ | |
| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` | |
+ + +----------------------------------+-----------+
| | | ``tc`` [#tc_legacy]_ | |
+ +----------------------------------------+----------------------------------+-----------+
| | ``BPF_NETKIT_PRIMARY`` | ``netkit/primary`` | |
+ +----------------------------------------+----------------------------------+-----------+
| | ``BPF_NETKIT_PEER`` | ``netkit/peer`` | |
+ +----------------------------------------+----------------------------------+-----------+
| | ``BPF_TCX_INGRESS`` | ``tc/ingress`` | |
+ +----------------------------------------+----------------------------------+-----------+
| | ``BPF_TCX_EGRESS`` | ``tc/egress`` | |
+ +----------------------------------------+----------------------------------+-----------+
| | ``BPF_TCX_INGRESS`` | ``tcx/ingress`` | |
+ +----------------------------------------+----------------------------------+-----------+
| | ``BPF_TCX_EGRESS`` | ``tcx/egress`` | |
| | | ``tc`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_SK_LOOKUP`` | ``BPF_SK_LOOKUP`` | ``sk_lookup`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
@@ -169,9 +155,7 @@ described in more detail in the footnotes.
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_SOCK_OPS`` | ``BPF_CGROUP_SOCK_OPS`` | ``sockops`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` [#struct_ops]_ | |
+ + +----------------------------------+-----------+
| | | ``struct_ops.s+`` [#struct_ops]_ | Yes |
| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_SYSCALL`` | | ``syscall`` | Yes |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
@@ -225,11 +209,5 @@ described in more detail in the footnotes.
``a-zA-Z0-9_.*?``.
.. [#lsm] The ``lsm`` attachment format is ``lsm[.s]/<hook>``.
.. [#rawtp] The ``raw_tp`` attach format is ``raw_tracepoint[.w]/<tracepoint>``.
.. [#tc_legacy] The ``tc``, ``classifier`` and ``action`` attach types are deprecated, use
``tcx/*`` instead.
.. [#struct_ops] The ``struct_ops`` attach format supports ``struct_ops[.s]/<name>`` convention,
but ``name`` is ignored and it is recommended to just use plain
``SEC("struct_ops[.s]")``. The attachments are defined in a struct initializer
that is tagged with ``SEC(".struct_ops[.link]")``.
.. [#tp] The ``tracepoint`` attach format is ``tracepoint/<category>/<name>``.
.. [#iter] The ``iter`` attach format is ``iter[.s]/<struct-name>``.

View File

@@ -1121,9 +1121,6 @@ enum bpf_attach_type {
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
/* Add BPF_LINK_TYPE(type, name) in bpf_types.h to keep bpf_link_type_strs[]
* in sync with the definitions below.
*/
enum bpf_link_type {
BPF_LINK_TYPE_UNSPEC = 0,
BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
@@ -1973,8 +1970,6 @@ union bpf_attr {
* program.
* Return
* The SMP id of the processor running the program.
* Attributes
* __bpf_fastcall
*
* long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
* Description
@@ -2856,7 +2851,7 @@ union bpf_attr {
* **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
* **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
* **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
* **TCP_BPF_RTO_MIN**, **TCP_BPF_SOCK_OPS_CB_FLAGS**.
* **TCP_BPF_RTO_MIN**.
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
* * **IPPROTO_IPV6**, which supports the following *optname*\ s:
* **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
@@ -3106,6 +3101,10 @@ union bpf_attr {
* with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
* option, and in this case it only works on functions tagged with
* **ALLOW_ERROR_INJECTION** in the kernel code.
*
* Also, the helper is only available for the architectures having
* the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
* x86 architecture is the only one to support this feature.
* Return
* 0
*
@@ -5370,7 +5369,7 @@ union bpf_attr {
* Currently, the **flags** must be 0. Currently, nr_loops is
* limited to 1 << 23 (~8 million) loops.
*
* long (\*callback_fn)(u64 index, void \*ctx);
* long (\*callback_fn)(u32 index, void \*ctx);
*
* where **index** is the current index in the loop. The index
* is zero-indexed.
@@ -5520,12 +5519,11 @@ union bpf_attr {
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
*
* void *bpf_kptr_xchg(void *dst, void *ptr)
* void *bpf_kptr_xchg(void *map_value, void *ptr)
* Description
* Exchange kptr at pointer *dst* with *ptr*, and return the old value.
* *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
* it must be a referenced pointer which will be released when this helper
* is called.
* Exchange kptr at pointer *map_value* with *ptr*, and return the
* old value. *ptr* can be NULL, otherwise it must be a referenced
* pointer which will be released when this helper is called.
* Return
* The old value of kptr (which can be NULL). The returned pointer
* if not NULL, is a reference which must be released using its
@@ -6048,6 +6046,11 @@ enum {
BPF_F_MARK_ENFORCE = (1ULL << 6),
};
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
enum {
BPF_F_INGRESS = (1ULL << 0),
};
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
enum {
BPF_F_TUNINFO_IPV6 = (1ULL << 0),
@@ -6194,12 +6197,10 @@ enum {
BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
};
/* Flags for bpf_redirect and bpf_redirect_map helpers */
/* Flags for bpf_redirect_map helper */
enum {
BPF_F_INGRESS = (1ULL << 0), /* used for skb path */
BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */
BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */
#define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS)
BPF_F_BROADCAST = (1ULL << 3),
BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
};
#define __bpf_md_ptr(type, name) \
@@ -7079,7 +7080,6 @@ enum {
TCP_BPF_SYN = 1005, /* Copy the TCP header */
TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */
};
enum {

View File

@@ -93,7 +93,6 @@ enum {
NETDEV_A_PAGE_POOL_INFLIGHT,
NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
NETDEV_A_PAGE_POOL_DETACH_TIME,
NETDEV_A_PAGE_POOL_DMABUF,
__NETDEV_A_PAGE_POOL_MAX,
NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
@@ -132,7 +131,6 @@ enum {
NETDEV_A_QUEUE_IFINDEX,
NETDEV_A_QUEUE_TYPE,
NETDEV_A_QUEUE_NAPI_ID,
NETDEV_A_QUEUE_DMABUF,
__NETDEV_A_QUEUE_MAX,
NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
@@ -175,16 +173,6 @@ enum {
NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
};
enum {
NETDEV_A_DMABUF_IFINDEX = 1,
NETDEV_A_DMABUF_QUEUES,
NETDEV_A_DMABUF_FD,
NETDEV_A_DMABUF_ID,
__NETDEV_A_DMABUF_MAX,
NETDEV_A_DMABUF_MAX = (__NETDEV_A_DMABUF_MAX - 1)
};
enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
@@ -198,7 +186,6 @@ enum {
NETDEV_CMD_QUEUE_GET,
NETDEV_CMD_NAPI_GET,
NETDEV_CMD_QSTATS_GET,
NETDEV_CMD_BIND_RX,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)

View File

@@ -100,7 +100,7 @@ struct bpf_prog_load_opts {
__u32 log_level;
__u32 log_size;
char *log_buf;
/* output: actual total log contents size (including terminating zero).
/* output: actual total log contents size (including termintaing zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
* If kernel doesn't support this feature, log_size is left unchanged.
@@ -129,7 +129,7 @@ struct bpf_btf_load_opts {
char *log_buf;
__u32 log_level;
__u32 log_size;
/* output: actual total log contents size (including terminating zero).
/* output: actual total log contents size (including termintaing zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
* If kernel doesn't support this feature, log_size is left unchanged.

View File

@@ -34,7 +34,6 @@ struct bpf_gen {
void *data_cur;
void *insn_start;
void *insn_cur;
bool swapped_endian;
ssize_t cleanup_label;
__u32 nr_progs;
__u32 nr_maps;

View File

@@ -44,14 +44,6 @@ struct bpf_dynptr;
struct iphdr;
struct ipv6hdr;
#ifndef __bpf_fastcall
#if __has_attribute(bpf_fastcall)
#define __bpf_fastcall __attribute__((bpf_fastcall))
#else
#define __bpf_fastcall
#endif
#endif
/*
* bpf_map_lookup_elem
*
@@ -211,7 +203,7 @@ static __u32 (* const bpf_get_prandom_u32)(void) = (void *) 7;
* Returns
* The SMP id of the processor running the program.
*/
static __bpf_fastcall __u32 (* const bpf_get_smp_processor_id)(void) = (void *) 8;
static __u32 (* const bpf_get_smp_processor_id)(void) = (void *) 8;
/*
* bpf_skb_store_bytes
@@ -1232,7 +1224,7 @@ static long (* const bpf_set_hash)(struct __sk_buff *skb, __u32 hash) = (void *)
* **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
* **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
* **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
* **TCP_BPF_RTO_MIN**, **TCP_BPF_SOCK_OPS_CB_FLAGS**.
* **TCP_BPF_RTO_MIN**.
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
* * **IPPROTO_IPV6**, which supports the following *optname*\ s:
* **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
@@ -1519,6 +1511,10 @@ static long (* const bpf_getsockopt)(void *bpf_socket, int level, int optname, v
* option, and in this case it only works on functions tagged with
* **ALLOW_ERROR_INJECTION** in the kernel code.
*
* Also, the helper is only available for the architectures having
* the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
* x86 architecture is the only one to support this feature.
*
* Returns
* 0
*/
@@ -4224,7 +4220,7 @@ static long (* const bpf_find_vma)(struct task_struct *task, __u64 addr, void *c
* Currently, the **flags** must be 0. Currently, nr_loops is
* limited to 1 << 23 (~8 million) loops.
*
* long (\*callback_fn)(u64 index, void \*ctx);
* long (\*callback_fn)(u32 index, void \*ctx);
*
* where **index** is the current index in the loop. The index
* is zero-indexed.
@@ -4428,10 +4424,9 @@ static long (* const bpf_ima_file_hash)(struct file *file, void *dst, __u32 size
/*
* bpf_kptr_xchg
*
* Exchange kptr at pointer *dst* with *ptr*, and return the old value.
* *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
* it must be a referenced pointer which will be released when this helper
* is called.
* Exchange kptr at pointer *map_value* with *ptr*, and return the
* old value. *ptr* can be NULL, otherwise it must be a referenced
* pointer which will be released when this helper is called.
*
* Returns
* The old value of kptr (which can be NULL). The returned pointer
@@ -4439,7 +4434,7 @@ static long (* const bpf_ima_file_hash)(struct file *file, void *dst, __u32 size
* corresponding release function, or moved into a BPF map before
* program exit.
*/
static void *(* const bpf_kptr_xchg)(void *dst, void *ptr) = (void *) 194;
static void *(* const bpf_kptr_xchg)(void *map_value, void *ptr) = (void *) 194;
/*
* bpf_map_lookup_percpu_elem

View File

@@ -185,7 +185,6 @@ enum libbpf_tristate {
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
#define __kptr __attribute__((btf_type_tag("kptr")))
#define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr")))
#define __uptr __attribute__((btf_type_tag("uptr")))
#if defined (__clang__)
#define bpf_ksym_exists(sym) ({ \
@@ -342,7 +341,7 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
* I.e., it looks almost like high-level for each loop in other languages,
* supports continue/break, and is verifiable by BPF verifier.
*
* For iterating integers, the difference between bpf_for_each(num, i, N, M)
* For iterating integers, the difference betwen bpf_for_each(num, i, N, M)
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
* *`, not just `int`. So for integers bpf_for() is more convenient.

View File

@@ -163,7 +163,7 @@
struct pt_regs___s390 {
unsigned long orig_gpr2;
} __attribute__((preserve_access_index));
};
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
@@ -179,7 +179,7 @@ struct pt_regs___s390 {
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG gprs[7]
#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___s390 *)(x))->__PT_PARM1_SYSCALL_REG)
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG)
@@ -222,7 +222,7 @@ struct pt_regs___s390 {
struct pt_regs___arm64 {
unsigned long orig_x0;
} __attribute__((preserve_access_index));
};
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
@@ -241,7 +241,7 @@ struct pt_regs___arm64 {
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___arm64 *)(x))->__PT_PARM1_SYSCALL_REG)
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG)
@@ -351,10 +351,6 @@ struct pt_regs___arm64 {
* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
*/
struct pt_regs___riscv {
unsigned long orig_a0;
} __attribute__((preserve_access_index));
/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
#define __PT_PARM1_REG a0
@@ -366,15 +362,12 @@ struct pt_regs___riscv {
#define __PT_PARM7_REG a6
#define __PT_PARM8_REG a7
#define __PT_PARM1_SYSCALL_REG orig_a0
#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___riscv *)(x))->__PT_PARM1_SYSCALL_REG)
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
BPF_CORE_READ((const struct pt_regs___riscv *)(x), __PT_PARM1_SYSCALL_REG)
#define __PT_RET_REG ra
#define __PT_FP_REG s0
@@ -480,7 +473,7 @@ struct pt_regs;
#endif
/*
* Similarly, syscall-specific conventions might differ between function call
* conventions within each architecture. All supported architectures pass
* conventions within each architecutre. All supported architectures pass
* either 6 or 7 syscall arguments in registers.
*
* See syscall(2) manpage for succinct table with information on each arch.
@@ -522,7 +515,7 @@ struct pt_regs;
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#elif defined(bpf_target_sparc) || defined(bpf_target_arm64)
#elif defined(bpf_target_sparc)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
@@ -658,7 +651,7 @@ struct pt_regs;
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
* similar kinds of BPF programs, that accept input arguments as a single
* pointer to untyped u64 array, where each u64 can actually be a typed
* pointer or integer of different size. Instead of requiring user to write
* pointer or integer of different size. Instead of requring user to write
* manual casts and work with array elements by index, BPF_PROG macro
* allows user to declare a list of named and typed input arguments in the
* same syntax as for normal C function. All the casting is hidden and
@@ -808,7 +801,7 @@ struct pt_regs;
* tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
* low-level way of getting kprobe input arguments from struct pt_regs, and
* provides a familiar typed and named function arguments syntax and
* semantics of accessing kprobe input parameters.
* semantics of accessing kprobe input paremeters.
*
* Original struct pt_regs* context is preserved as 'ctx' argument. This might
* be necessary when using BPF helpers like bpf_perf_event_output().

284
src/btf.c
View File

@@ -2885,7 +2885,7 @@ int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
return btf_commit_type(btf, sz);
}
struct btf_ext_sec_info_param {
struct btf_ext_sec_setup_param {
__u32 off;
__u32 len;
__u32 min_rec_size;
@@ -2893,20 +2893,14 @@ struct btf_ext_sec_info_param {
const char *desc;
};
/*
* Parse a single info subsection of the BTF.ext info data:
* - validate subsection structure and elements
* - save info subsection start and sizing details in struct btf_ext
* - endian-independent operation, for calling before byte-swapping
*/
static int btf_ext_parse_sec_info(struct btf_ext *btf_ext,
struct btf_ext_sec_info_param *ext_sec,
bool is_native)
static int btf_ext_setup_info(struct btf_ext *btf_ext,
struct btf_ext_sec_setup_param *ext_sec)
{
const struct btf_ext_info_sec *sinfo;
struct btf_ext_info *ext_info;
__u32 info_left, record_size;
size_t sec_cnt = 0;
/* The start of the info sec (including the __u32 record_size). */
void *info;
if (ext_sec->len == 0)
@@ -2918,7 +2912,6 @@ static int btf_ext_parse_sec_info(struct btf_ext *btf_ext,
return -EINVAL;
}
/* The start of the info sec (including the __u32 record_size). */
info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
info_left = ext_sec->len;
@@ -2934,13 +2927,9 @@ static int btf_ext_parse_sec_info(struct btf_ext *btf_ext,
return -EINVAL;
}
/* The record size needs to meet either the minimum standard or, when
* handling non-native endianness data, the exact standard so as
* to allow safe byte-swapping.
*/
record_size = is_native ? *(__u32 *)info : bswap_32(*(__u32 *)info);
/* The record size needs to meet the minimum standard */
record_size = *(__u32 *)info;
if (record_size < ext_sec->min_rec_size ||
(!is_native && record_size != ext_sec->min_rec_size) ||
record_size & 0x03) {
pr_debug("%s section in .BTF.ext has invalid record size %u\n",
ext_sec->desc, record_size);
@@ -2952,7 +2941,7 @@ static int btf_ext_parse_sec_info(struct btf_ext *btf_ext,
/* If no records, return failure now so .BTF.ext won't be used. */
if (!info_left) {
pr_debug("%s section in .BTF.ext has no records\n", ext_sec->desc);
pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
return -EINVAL;
}
@@ -2967,7 +2956,7 @@ static int btf_ext_parse_sec_info(struct btf_ext *btf_ext,
return -EINVAL;
}
num_records = is_native ? sinfo->num_info : bswap_32(sinfo->num_info);
num_records = sinfo->num_info;
if (num_records == 0) {
pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
@@ -2995,157 +2984,64 @@ static int btf_ext_parse_sec_info(struct btf_ext *btf_ext,
return 0;
}
/* Parse all info secs in the BTF.ext info data */
static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native)
static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
{
struct btf_ext_sec_info_param func_info = {
struct btf_ext_sec_setup_param param = {
.off = btf_ext->hdr->func_info_off,
.len = btf_ext->hdr->func_info_len,
.min_rec_size = sizeof(struct bpf_func_info_min),
.ext_info = &btf_ext->func_info,
.desc = "func_info"
};
struct btf_ext_sec_info_param line_info = {
return btf_ext_setup_info(btf_ext, &param);
}
static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
{
struct btf_ext_sec_setup_param param = {
.off = btf_ext->hdr->line_info_off,
.len = btf_ext->hdr->line_info_len,
.min_rec_size = sizeof(struct bpf_line_info_min),
.ext_info = &btf_ext->line_info,
.desc = "line_info",
};
struct btf_ext_sec_info_param core_relo = {
return btf_ext_setup_info(btf_ext, &param);
}
static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
{
struct btf_ext_sec_setup_param param = {
.off = btf_ext->hdr->core_relo_off,
.len = btf_ext->hdr->core_relo_len,
.min_rec_size = sizeof(struct bpf_core_relo),
.ext_info = &btf_ext->core_relo_info,
.desc = "core_relo",
};
int err;
err = btf_ext_parse_sec_info(btf_ext, &func_info, is_native);
if (err)
return err;
err = btf_ext_parse_sec_info(btf_ext, &line_info, is_native);
if (err)
return err;
if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
return 0; /* skip core relos parsing */
err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native);
if (err)
return err;
return 0;
return btf_ext_setup_info(btf_ext, &param);
}
/* Swap byte-order of BTF.ext header with any endianness */
static void btf_ext_bswap_hdr(struct btf_ext_header *h)
static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
{
bool is_native = h->magic == BTF_MAGIC;
__u32 hdr_len;
const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
hdr_len = is_native ? h->hdr_len : bswap_32(h->hdr_len);
h->magic = bswap_16(h->magic);
h->hdr_len = bswap_32(h->hdr_len);
h->func_info_off = bswap_32(h->func_info_off);
h->func_info_len = bswap_32(h->func_info_len);
h->line_info_off = bswap_32(h->line_info_off);
h->line_info_len = bswap_32(h->line_info_len);
if (hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
return;
h->core_relo_off = bswap_32(h->core_relo_off);
h->core_relo_len = bswap_32(h->core_relo_len);
}
/* Swap byte-order of generic info subsection */
static void btf_ext_bswap_info_sec(void *info, __u32 len, bool is_native,
info_rec_bswap_fn bswap_fn)
{
struct btf_ext_info_sec *sec;
__u32 info_left, rec_size, *rs;
if (len == 0)
return;
rs = info; /* info record size */
rec_size = is_native ? *rs : bswap_32(*rs);
*rs = bswap_32(*rs);
sec = info + sizeof(__u32); /* info sec #1 */
info_left = len - sizeof(__u32);
while (info_left) {
unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
__u32 i, num_recs;
void *p;
num_recs = is_native ? sec->num_info : bswap_32(sec->num_info);
sec->sec_name_off = bswap_32(sec->sec_name_off);
sec->num_info = bswap_32(sec->num_info);
p = sec->data; /* info rec #1 */
for (i = 0; i < num_recs; i++, p += rec_size)
bswap_fn(p);
sec = p;
info_left -= sec_hdrlen + (__u64)rec_size * num_recs;
}
}
/*
* Swap byte-order of all info data in a BTF.ext section
* - requires BTF.ext hdr in native endianness
*/
static void btf_ext_bswap_info(struct btf_ext *btf_ext, void *data)
{
const bool is_native = btf_ext->swapped_endian;
const struct btf_ext_header *h = data;
void *info;
/* Swap func_info subsection byte-order */
info = data + h->hdr_len + h->func_info_off;
btf_ext_bswap_info_sec(info, h->func_info_len, is_native,
(info_rec_bswap_fn)bpf_func_info_bswap);
/* Swap line_info subsection byte-order */
info = data + h->hdr_len + h->line_info_off;
btf_ext_bswap_info_sec(info, h->line_info_len, is_native,
(info_rec_bswap_fn)bpf_line_info_bswap);
/* Swap core_relo subsection byte-order (if present) */
if (h->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
return;
info = data + h->hdr_len + h->core_relo_off;
btf_ext_bswap_info_sec(info, h->core_relo_len, is_native,
(info_rec_bswap_fn)bpf_core_relo_bswap);
}
/* Parse hdr data and info sections: check and convert to native endianness */
static int btf_ext_parse(struct btf_ext *btf_ext)
{
__u32 hdr_len, data_size = btf_ext->data_size;
struct btf_ext_header *hdr = btf_ext->hdr;
bool swapped_endian = false;
int err;
if (data_size < offsetofend(struct btf_ext_header, hdr_len)) {
pr_debug("BTF.ext header too short\n");
if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
data_size < hdr->hdr_len) {
pr_debug("BTF.ext header not found");
return -EINVAL;
}
hdr_len = hdr->hdr_len;
if (hdr->magic == bswap_16(BTF_MAGIC)) {
swapped_endian = true;
hdr_len = bswap_32(hdr_len);
pr_warn("BTF.ext in non-native endianness is not supported\n");
return -ENOTSUP;
} else if (hdr->magic != BTF_MAGIC) {
pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
return -EINVAL;
}
/* Ensure known version of structs, current BTF_VERSION == 1 */
if (hdr->version != 1) {
if (hdr->version != BTF_VERSION) {
pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
return -ENOTSUP;
}
@@ -3155,39 +3051,11 @@ static int btf_ext_parse(struct btf_ext *btf_ext)
return -ENOTSUP;
}
if (data_size < hdr_len) {
pr_debug("BTF.ext header not found\n");
return -EINVAL;
} else if (data_size == hdr_len) {
if (data_size == hdr->hdr_len) {
pr_debug("BTF.ext has no data\n");
return -EINVAL;
}
/* Verify mandatory hdr info details present */
if (hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
pr_warn("BTF.ext header missing func_info, line_info\n");
return -EINVAL;
}
/* Keep hdr native byte-order in memory for introspection */
if (swapped_endian)
btf_ext_bswap_hdr(btf_ext->hdr);
/* Validate info subsections and cache key metadata */
err = btf_ext_parse_info(btf_ext, !swapped_endian);
if (err)
return err;
/* Keep infos native byte-order in memory for introspection */
if (swapped_endian)
btf_ext_bswap_info(btf_ext, btf_ext->data);
/*
* Set btf_ext->swapped_endian only after all header and info data has
* been swapped, helping bswap functions determine if their data are
* in native byte-order when called.
*/
btf_ext->swapped_endian = swapped_endian;
return 0;
}
@@ -3199,7 +3067,6 @@ void btf_ext__free(struct btf_ext *btf_ext)
free(btf_ext->line_info.sec_idxs);
free(btf_ext->core_relo_info.sec_idxs);
free(btf_ext->data);
free(btf_ext->data_swapped);
free(btf_ext);
}
@@ -3220,7 +3087,29 @@ struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
}
memcpy(btf_ext->data, data, size);
err = btf_ext_parse(btf_ext);
err = btf_ext_parse_hdr(btf_ext->data, size);
if (err)
goto done;
if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
err = -EINVAL;
goto done;
}
err = btf_ext_setup_func_info(btf_ext);
if (err)
goto done;
err = btf_ext_setup_line_info(btf_ext);
if (err)
goto done;
if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
goto done; /* skip core relos parsing */
err = btf_ext_setup_core_relos(btf_ext);
if (err)
goto done;
done:
if (err) {
@@ -3231,66 +3120,15 @@ done:
return btf_ext;
}
static void *btf_ext_raw_data(const struct btf_ext *btf_ext_ro, bool swap_endian)
{
struct btf_ext *btf_ext = (struct btf_ext *)btf_ext_ro;
const __u32 data_sz = btf_ext->data_size;
void *data;
/* Return native data (always present) or swapped data if present */
if (!swap_endian)
return btf_ext->data;
else if (btf_ext->data_swapped)
return btf_ext->data_swapped;
/* Recreate missing swapped data, then cache and return */
data = calloc(1, data_sz);
if (!data)
return NULL;
memcpy(data, btf_ext->data, data_sz);
btf_ext_bswap_info(btf_ext, data);
btf_ext_bswap_hdr(data);
btf_ext->data_swapped = data;
return data;
}
const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
{
void *data;
data = btf_ext_raw_data(btf_ext, btf_ext->swapped_endian);
if (!data)
return errno = ENOMEM, NULL;
*size = btf_ext->data_size;
return data;
return btf_ext->data;
}
__attribute__((alias("btf_ext__raw_data")))
const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext)
{
if (is_host_big_endian())
return btf_ext->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
else
return btf_ext->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
}
int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian)
{
if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
return libbpf_err(-EINVAL);
btf_ext->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
if (!btf_ext->swapped_endian) {
free(btf_ext->data_swapped);
btf_ext->data_swapped = NULL;
}
return 0;
}
struct btf_dedup;
@@ -3453,7 +3291,7 @@ int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
d = btf_dedup_new(btf, opts);
if (IS_ERR(d)) {
pr_debug("btf_dedup_new failed: %ld\n", PTR_ERR(d));
pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
return libbpf_err(-EINVAL);
}
@@ -4354,7 +4192,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
* and canonical graphs are not compatible structurally, whole graphs are
* incompatible. If types are structurally equivalent (i.e., all information
* except referenced type IDs is exactly the same), a mapping from `canon_id` to
* a `cand_id` is recoded in hypothetical mapping (`btf_dedup->hypot_map`).
* a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
* If a type references other types, then those referenced types are checked
* for equivalence recursively.
*
@@ -4392,7 +4230,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
* consists of portions of the graph that come from multiple compilation units.
* This is due to the fact that types within single compilation unit are always
* deduplicated and FWDs are already resolved, if referenced struct/union
* definition is available. So, if we had unresolved FWD and found corresponding
* definiton is available. So, if we had unresolved FWD and found corresponding
* STRUCT/UNION, they will be from different compilation units. This
* consequently means that when we "link" FWD to corresponding STRUCT/UNION,
* type graph will likely have at least two different BTF types that describe

View File

@@ -167,9 +167,6 @@ LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size);
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size);
LIBBPF_API enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext);
LIBBPF_API int btf_ext__set_endianness(struct btf_ext *btf_ext,
enum btf_endianness endian);
LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
@@ -289,7 +286,7 @@ LIBBPF_API void btf_dump__free(struct btf_dump *d);
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
struct btf_dump_emit_type_decl_opts {
/* size of this struct, for forward/backward compatibility */
/* size of this struct, for forward/backward compatiblity */
size_t sz;
/* optional field name for type declaration, e.g.:
* - struct my_struct <FNAME>

View File

@@ -304,7 +304,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
* definition, in which case they have to be declared inline as part of field
* type declaration; or as a top-level anonymous enum, typically used for
* declaring global constants. It's impossible to distinguish between two
* without knowing whether given enum type was referenced from other type:
* without knowning whether given enum type was referenced from other type:
* top-level anonymous enum won't be referenced by anything, while embedded
* one will.
*/
@@ -867,8 +867,8 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
} pads[] = {
{"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
};
int new_off = 0, pad_bits = 0, bits, i;
const char *pad_type = NULL;
int new_off, pad_bits, bits, i;
const char *pad_type;
if (cur_off >= next_off)
return; /* no gap */
@@ -1304,7 +1304,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
* chain, restore stack, emit warning, and try to
* proceed nevertheless
*/
pr_warn("not enough memory for decl stack: %d\n", err);
pr_warn("not enough memory for decl stack:%d", err);
d->decl_stack_cnt = stack_start;
return;
}

View File

@@ -428,7 +428,7 @@ static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i)
} else {
off = r->str_map[*str_off];
if (!off) {
pr_warn("string '%s' [offset %u] is not mapped to base BTF\n",
pr_warn("string '%s' [offset %u] is not mapped to base BTF",
btf__str_by_offset(r->btf, off), *str_off);
return -ENOENT;
}

View File

@@ -401,15 +401,6 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
opts->insns_sz = gen->insn_cur - gen->insn_start;
opts->data = gen->data_start;
opts->data_sz = gen->data_cur - gen->data_start;
/* use target endianness for embedded loader */
if (gen->swapped_endian) {
struct bpf_insn *insn = (struct bpf_insn *)opts->insns;
int insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
for (i = 0; i < insn_cnt; i++)
bpf_insn_bswap(insn++);
}
}
return gen->error;
}
@@ -423,28 +414,6 @@ void bpf_gen__free(struct bpf_gen *gen)
free(gen);
}
/*
* Fields of bpf_attr are set to values in native byte-order before being
* written to the target-bound data blob, and may need endian conversion.
* This macro allows providing the correct value in situ more simply than
* writing a separate converter for *all fields* of *all records* included
* in union bpf_attr. Note that sizeof(rval) should match the assignment
* target to avoid runtime problems.
*/
#define tgt_endian(rval) ({ \
typeof(rval) _val = (rval); \
if (gen->swapped_endian) { \
switch (sizeof(_val)) { \
case 1: break; \
case 2: _val = bswap_16(_val); break; \
case 4: _val = bswap_32(_val); break; \
case 8: _val = bswap_64(_val); break; \
default: pr_warn("unsupported bswap size!\n"); \
} \
} \
_val; \
})
void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
__u32 btf_raw_size)
{
@@ -453,12 +422,11 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
union bpf_attr attr;
memset(&attr, 0, attr_size);
pr_debug("gen: load_btf: size %d\n", btf_raw_size);
btf_data = add_data(gen, btf_raw_data, btf_raw_size);
attr.btf_size = tgt_endian(btf_raw_size);
attr.btf_size = btf_raw_size;
btf_load_attr = add_data(gen, &attr, attr_size);
pr_debug("gen: load_btf: off %d size %d, attr: off %d size %d\n",
btf_data, btf_raw_size, btf_load_attr, attr_size);
/* populate union bpf_attr with user provided log details */
move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
@@ -489,29 +457,28 @@ void bpf_gen__map_create(struct bpf_gen *gen,
union bpf_attr attr;
memset(&attr, 0, attr_size);
attr.map_type = tgt_endian(map_type);
attr.key_size = tgt_endian(key_size);
attr.value_size = tgt_endian(value_size);
attr.map_flags = tgt_endian(map_attr->map_flags);
attr.map_extra = tgt_endian(map_attr->map_extra);
attr.map_type = map_type;
attr.key_size = key_size;
attr.value_size = value_size;
attr.map_flags = map_attr->map_flags;
attr.map_extra = map_attr->map_extra;
if (map_name)
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
attr.numa_node = tgt_endian(map_attr->numa_node);
attr.map_ifindex = tgt_endian(map_attr->map_ifindex);
attr.max_entries = tgt_endian(max_entries);
attr.btf_key_type_id = tgt_endian(map_attr->btf_key_type_id);
attr.btf_value_type_id = tgt_endian(map_attr->btf_value_type_id);
attr.numa_node = map_attr->numa_node;
attr.map_ifindex = map_attr->map_ifindex;
attr.max_entries = max_entries;
attr.btf_key_type_id = map_attr->btf_key_type_id;
attr.btf_value_type_id = map_attr->btf_value_type_id;
pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
attr.map_name, map_idx, map_type, attr.btf_value_type_id);
map_create_attr = add_data(gen, &attr, attr_size);
pr_debug("gen: map_create: %s idx %d type %d value_type_id %d, attr: off %d size %d\n",
map_name, map_idx, map_type, map_attr->btf_value_type_id,
map_create_attr, attr_size);
if (map_attr->btf_value_type_id)
if (attr.btf_value_type_id)
/* populate union bpf_attr with btf_fd saved in the stack earlier */
move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
stack_off(btf_fd));
switch (map_type) {
switch (attr.map_type) {
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
@@ -531,8 +498,8 @@ void bpf_gen__map_create(struct bpf_gen *gen,
/* emit MAP_CREATE command */
emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
map_name, map_idx, map_type, value_size,
map_attr->btf_value_type_id);
attr.map_name, map_idx, map_type, value_size,
attr.btf_value_type_id);
emit_check_err(gen);
/* remember map_fd in the stack, if successful */
if (map_idx < 0) {
@@ -817,12 +784,12 @@ log:
emit_ksym_relo_log(gen, relo, kdesc->ref);
}
static __u32 src_reg_mask(struct bpf_gen *gen)
static __u32 src_reg_mask(void)
{
#if defined(__LITTLE_ENDIAN_BITFIELD) /* src_reg,dst_reg,... */
return gen->swapped_endian ? 0xf0 : 0x0f;
#elif defined(__BIG_ENDIAN_BITFIELD) /* dst_reg,src_reg,... */
return gen->swapped_endian ? 0x0f : 0xf0;
#if defined(__LITTLE_ENDIAN_BITFIELD)
return 0x0f; /* src_reg,dst_reg,... */
#elif defined(__BIG_ENDIAN_BITFIELD)
return 0xf0; /* dst_reg,src_reg,... */
#else
#error "Unsupported bit endianness, cannot proceed"
#endif
@@ -873,7 +840,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
clear_src_reg:
/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
reg_mask = src_reg_mask(gen);
reg_mask = src_reg_mask();
emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
@@ -964,94 +931,48 @@ static void cleanup_relos(struct bpf_gen *gen, int insns)
cleanup_core_relo(gen);
}
/* Convert func, line, and core relo info blobs to target endianness */
static void info_blob_bswap(struct bpf_gen *gen, int func_info, int line_info,
int core_relos, struct bpf_prog_load_opts *load_attr)
{
struct bpf_func_info *fi = gen->data_start + func_info;
struct bpf_line_info *li = gen->data_start + line_info;
struct bpf_core_relo *cr = gen->data_start + core_relos;
int i;
for (i = 0; i < load_attr->func_info_cnt; i++)
bpf_func_info_bswap(fi++);
for (i = 0; i < load_attr->line_info_cnt; i++)
bpf_line_info_bswap(li++);
for (i = 0; i < gen->core_relo_cnt; i++)
bpf_core_relo_bswap(cr++);
}
void bpf_gen__prog_load(struct bpf_gen *gen,
enum bpf_prog_type prog_type, const char *prog_name,
const char *license, struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *load_attr, int prog_idx)
{
int func_info_tot_sz = load_attr->func_info_cnt *
load_attr->func_info_rec_size;
int line_info_tot_sz = load_attr->line_info_cnt *
load_attr->line_info_rec_size;
int core_relo_tot_sz = gen->core_relo_cnt *
sizeof(struct bpf_core_relo);
int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
union bpf_attr attr;
memset(&attr, 0, attr_size);
pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n",
prog_type, insn_cnt, prog_idx);
/* add license string to blob of bytes */
license_off = add_data(gen, license, strlen(license) + 1);
/* add insns to blob of bytes */
insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
pr_debug("gen: prog_load: prog_idx %d type %d insn off %d insns_cnt %zd license off %d\n",
prog_idx, prog_type, insns_off, insn_cnt, license_off);
/* convert blob insns to target endianness */
if (gen->swapped_endian) {
struct bpf_insn *insn = gen->data_start + insns_off;
int i;
for (i = 0; i < insn_cnt; i++, insn++)
bpf_insn_bswap(insn);
}
attr.prog_type = tgt_endian(prog_type);
attr.expected_attach_type = tgt_endian(load_attr->expected_attach_type);
attr.attach_btf_id = tgt_endian(load_attr->attach_btf_id);
attr.prog_ifindex = tgt_endian(load_attr->prog_ifindex);
attr.prog_type = prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
attr.attach_btf_id = load_attr->attach_btf_id;
attr.prog_ifindex = load_attr->prog_ifindex;
attr.kern_version = 0;
attr.insn_cnt = tgt_endian((__u32)insn_cnt);
attr.prog_flags = tgt_endian(load_attr->prog_flags);
attr.insn_cnt = (__u32)insn_cnt;
attr.prog_flags = load_attr->prog_flags;
attr.func_info_rec_size = tgt_endian(load_attr->func_info_rec_size);
attr.func_info_cnt = tgt_endian(load_attr->func_info_cnt);
func_info = add_data(gen, load_attr->func_info, func_info_tot_sz);
pr_debug("gen: prog_load: func_info: off %d cnt %d rec size %d\n",
func_info, load_attr->func_info_cnt,
load_attr->func_info_rec_size);
attr.func_info_rec_size = load_attr->func_info_rec_size;
attr.func_info_cnt = load_attr->func_info_cnt;
func_info = add_data(gen, load_attr->func_info,
attr.func_info_cnt * attr.func_info_rec_size);
attr.line_info_rec_size = tgt_endian(load_attr->line_info_rec_size);
attr.line_info_cnt = tgt_endian(load_attr->line_info_cnt);
line_info = add_data(gen, load_attr->line_info, line_info_tot_sz);
pr_debug("gen: prog_load: line_info: off %d cnt %d rec size %d\n",
line_info, load_attr->line_info_cnt,
load_attr->line_info_rec_size);
attr.line_info_rec_size = load_attr->line_info_rec_size;
attr.line_info_cnt = load_attr->line_info_cnt;
line_info = add_data(gen, load_attr->line_info,
attr.line_info_cnt * attr.line_info_rec_size);
attr.core_relo_rec_size = tgt_endian((__u32)sizeof(struct bpf_core_relo));
attr.core_relo_cnt = tgt_endian(gen->core_relo_cnt);
core_relos = add_data(gen, gen->core_relos, core_relo_tot_sz);
pr_debug("gen: prog_load: core_relos: off %d cnt %d rec size %zd\n",
core_relos, gen->core_relo_cnt,
sizeof(struct bpf_core_relo));
/* convert all info blobs to target endianness */
if (gen->swapped_endian)
info_blob_bswap(gen, func_info, line_info, core_relos, load_attr);
attr.core_relo_rec_size = sizeof(struct bpf_core_relo);
attr.core_relo_cnt = gen->core_relo_cnt;
core_relos = add_data(gen, gen->core_relos,
attr.core_relo_cnt * attr.core_relo_rec_size);
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
prog_load_attr = add_data(gen, &attr, attr_size);
pr_debug("gen: prog_load: attr: off %d size %d\n",
prog_load_attr, attr_size);
/* populate union bpf_attr with a pointer to license */
emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
@@ -1119,6 +1040,7 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
int zero = 0;
memset(&attr, 0, attr_size);
pr_debug("gen: map_update_elem: idx %d\n", map_idx);
value = add_data(gen, pvalue, value_size);
key = add_data(gen, &zero, sizeof(zero));
@@ -1146,8 +1068,6 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
map_update_attr = add_data(gen, &attr, attr_size);
pr_debug("gen: map_update_elem: idx %d, value: off %d size %d, attr: off %d size %d\n",
map_idx, value, value_size, map_update_attr, attr_size);
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
blob_fd_array_off(gen, map_idx));
emit_rel_store(gen, attr_field(map_update_attr, key), key);
@@ -1164,16 +1084,14 @@ void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slo
int attr_size = offsetofend(union bpf_attr, flags);
int map_update_attr, key;
union bpf_attr attr;
int tgt_slot;
memset(&attr, 0, attr_size);
pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n",
outer_map_idx, slot, inner_map_idx);
tgt_slot = tgt_endian(slot);
key = add_data(gen, &tgt_slot, sizeof(tgt_slot));
key = add_data(gen, &slot, sizeof(slot));
map_update_attr = add_data(gen, &attr, attr_size);
pr_debug("gen: populate_outer_map: outer %d key %d inner %d, attr: off %d size %d\n",
outer_map_idx, slot, inner_map_idx, map_update_attr, attr_size);
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
blob_fd_array_off(gen, outer_map_idx));
emit_rel_store(gen, attr_field(map_update_attr, key), key);
@@ -1194,9 +1112,8 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
union bpf_attr attr;
memset(&attr, 0, attr_size);
pr_debug("gen: map_freeze: idx %d\n", map_idx);
map_freeze_attr = add_data(gen, &attr, attr_size);
pr_debug("gen: map_freeze: idx %d, attr: off %d size %d\n",
map_idx, map_freeze_attr, attr_size);
move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
blob_fd_array_off(gen, map_idx));
/* emit MAP_FREEZE command */

View File

@@ -166,8 +166,8 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
* @bkt: integer used as a bucket loop cursor
*/
#define hashmap__for_each_entry(map, cur, bkt) \
for (bkt = 0; bkt < (map)->cap; bkt++) \
for (cur = (map)->buckets[bkt]; cur; cur = cur->next)
for (bkt = 0; bkt < map->cap; bkt++) \
for (cur = map->buckets[bkt]; cur; cur = cur->next)
/*
* hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe
@@ -178,8 +178,8 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
* @bkt: integer used as a bucket loop cursor
*/
#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
for (bkt = 0; bkt < (map)->cap; bkt++) \
for (cur = (map)->buckets[bkt]; \
for (bkt = 0; bkt < map->cap; bkt++) \
for (cur = map->buckets[bkt]; \
cur && ({tmp = cur->next; true; }); \
cur = tmp)
@@ -190,19 +190,19 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
* @key: key to iterate entries for
*/
#define hashmap__for_each_key_entry(map, cur, _key) \
for (cur = (map)->buckets \
? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
for (cur = map->buckets \
? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
: NULL; \
cur; \
cur = cur->next) \
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
if (map->equal_fn(cur->key, (_key), map->ctx))
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
for (cur = (map)->buckets \
? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
for (cur = map->buckets \
? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
: NULL; \
cur && ({ tmp = cur->next; true; }); \
cur = tmp) \
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
if (map->equal_fn(cur->key, (_key), map->ctx))
#endif /* __LIBBPF_HASHMAP_H */

View File

@@ -694,8 +694,6 @@ struct bpf_object {
/* Information when doing ELF related work. Only valid if efile.elf is not NULL */
struct elf_state efile;
unsigned char byteorder;
struct btf *btf;
struct btf_ext *btf_ext;
@@ -942,20 +940,6 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
return 0;
}
static void bpf_object_bswap_progs(struct bpf_object *obj)
{
struct bpf_program *prog = obj->programs;
struct bpf_insn *insn;
int p, i;
for (p = 0; p < obj->nr_programs; p++, prog++) {
insn = prog->insns;
for (i = 0; i < prog->insns_cnt; i++, insn++)
bpf_insn_bswap(insn);
}
pr_debug("converted %zu BPF programs to native byte order\n", obj->nr_programs);
}
static const struct btf_member *
find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
{
@@ -1002,7 +986,7 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
{
const struct btf_type *kern_type, *kern_vtype;
const struct btf_member *kern_data_member;
struct btf *btf = NULL;
struct btf *btf;
__s32 kern_vtype_id, kern_type_id;
char tname[256];
__u32 i;
@@ -1132,7 +1116,7 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
const struct btf *btf = obj->btf;
struct bpf_struct_ops *st_ops;
const struct btf *kern_btf;
struct module_btf *mod_btf = NULL;
struct module_btf *mod_btf;
void *data, *kern_data;
const char *tname;
int err;
@@ -1522,7 +1506,6 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
elf_end(obj->efile.elf);
obj->efile.elf = NULL;
obj->efile.ehdr = NULL;
obj->efile.symbols = NULL;
obj->efile.arena_data = NULL;
@@ -1588,16 +1571,6 @@ static int bpf_object__elf_init(struct bpf_object *obj)
goto errout;
}
/* Validate ELF object endianness... */
if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB &&
ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
err = -LIBBPF_ERRNO__ENDIAN;
pr_warn("elf: '%s' has unknown byte order\n", obj->path);
goto errout;
}
/* and save after bpf_object_open() frees ELF data */
obj->byteorder = ehdr->e_ident[EI_DATA];
if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
pr_warn("elf: failed to get section names section index for %s: %s\n",
obj->path, elf_errmsg(-1));
@@ -1626,15 +1599,19 @@ errout:
return err;
}
static bool is_native_endianness(struct bpf_object *obj)
static int bpf_object__check_endianness(struct bpf_object *obj)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
return obj->byteorder == ELFDATA2LSB;
if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
return 0;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return obj->byteorder == ELFDATA2MSB;
if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
return 0;
#else
# error "Unrecognized __BYTE_ORDER__"
#endif
pr_warn("elf: endianness mismatch in %s.\n", obj->path);
return -LIBBPF_ERRNO__ENDIAN;
}
static int
@@ -1871,7 +1848,7 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name)
snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
sfx_len, real_name);
/* sanities map name to characters allowed by kernel */
/* sanitise map name to characters allowed by kernel */
for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
if (!isalnum(*p) && *p != '_' && *p != '.')
*p = '_';
@@ -3604,12 +3581,11 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
report:
if (err) {
btf_mandatory = kernel_needs_btf(obj);
if (btf_mandatory) {
pr_warn("Error loading .BTF into kernel: %d. BTF is mandatory, can't proceed.\n", err);
} else {
pr_info("Error loading .BTF into kernel: %d. BTF is optional, ignoring.\n", err);
pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
btf_mandatory ? "BTF is mandatory, can't proceed."
: "BTF is optional, ignoring.");
if (!btf_mandatory)
err = 0;
}
}
return err;
}
@@ -3977,10 +3953,6 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
return -LIBBPF_ERRNO__FORMAT;
}
/* change BPF program insns to native endianness for introspection */
if (!is_native_endianness(obj))
bpf_object_bswap_progs(obj);
/* sort BPF programs by section name and in-section instruction offset
* for faster search
*/
@@ -4013,7 +3985,7 @@ static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
return true;
/* global function */
return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC;
return bind == STB_GLOBAL && type == STT_FUNC;
}
static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
@@ -4417,7 +4389,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
{
return prog->sec_idx == obj->efile.text_shndx;
return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
}
struct bpf_program *
@@ -5122,7 +5094,6 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
enum libbpf_map_type map_type = map->libbpf_type;
char *cp, errmsg[STRERR_BUFSIZE];
int err, zero = 0;
size_t mmap_sz;
if (obj->gen_loader) {
bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
@@ -5136,8 +5107,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
if (err) {
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warn("map '%s': failed to set initial contents: %s\n",
bpf_map__name(map), cp);
pr_warn("Error setting initial map(%s) contents: %s\n",
map->name, cp);
return err;
}
@@ -5147,43 +5118,11 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
if (err) {
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warn("map '%s': failed to freeze as read-only: %s\n",
bpf_map__name(map), cp);
pr_warn("Error freezing map(%s) as read-only: %s\n",
map->name, cp);
return err;
}
}
/* Remap anonymous mmap()-ed "map initialization image" as
* a BPF map-backed mmap()-ed memory, but preserving the same
* memory address. This will cause kernel to change process'
* page table to point to a different piece of kernel memory,
* but from userspace point of view memory address (and its
* contents, being identical at this point) will stay the
* same. This mapping will be released by bpf_object__close()
* as per normal clean up procedure.
*/
mmap_sz = bpf_map_mmap_sz(map);
if (map->def.map_flags & BPF_F_MMAPABLE) {
void *mmaped;
int prot;
if (map->def.map_flags & BPF_F_RDONLY_PROG)
prot = PROT_READ;
else
prot = PROT_READ | PROT_WRITE;
mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
if (mmaped == MAP_FAILED) {
err = -errno;
pr_warn("map '%s': failed to re-mmap() contents: %d\n",
bpf_map__name(map), err);
return err;
}
map->mmaped = mmaped;
} else if (map->mmaped) {
munmap(map->mmaped, mmap_sz);
map->mmaped = NULL;
}
return 0;
}
@@ -5500,7 +5439,8 @@ retry:
err = bpf_object__populate_internal_map(obj, map);
if (err < 0)
goto err_out;
} else if (map->def.type == BPF_MAP_TYPE_ARENA) {
}
if (map->def.type == BPF_MAP_TYPE_ARENA) {
map->mmaped = mmap((void *)(long)map->map_extra,
bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
@@ -7412,14 +7352,8 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog,
opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
/* special check for usdt to use uprobe_multi link */
if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) {
/* for BPF_TRACE_UPROBE_MULTI, user might want to query expected_attach_type
* in prog, and expected_attach_type we set in kernel is from opts, so we
* update both.
*/
if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
opts->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
}
if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
int btf_obj_fd = 0, btf_type_id = 0, err;
@@ -7509,7 +7443,6 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
load_attr.attach_btf_id = prog->attach_btf_id;
load_attr.kern_version = kern_version;
load_attr.prog_ifindex = prog->prog_ifindex;
load_attr.expected_attach_type = prog->expected_attach_type;
/* specify func_info/line_info only if kernel supports them */
if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
@@ -7541,6 +7474,9 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
insns_cnt = prog->insns_cnt;
}
/* allow prog_prepare_load_fn to change expected_attach_type */
load_attr.expected_attach_type = prog->expected_attach_type;
if (obj->gen_loader) {
bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
license, insns, insns_cnt, &load_attr,
@@ -8056,6 +7992,7 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
}
err = bpf_object__elf_init(obj);
err = err ? : bpf_object__check_endianness(obj);
err = err ? : bpf_object__elf_collect(obj);
err = err ? : bpf_object__collect_externs(obj);
err = err ? : bpf_object_fixup_btf(obj);
@@ -8561,15 +8498,8 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
return libbpf_err(-EINVAL);
}
/* Disallow kernel loading programs of non-native endianness but
* permit cross-endian creation of "light skeleton".
*/
if (obj->gen_loader) {
if (obj->gen_loader)
bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
} else if (!is_native_endianness(obj)) {
pr_warn("object '%s': loading non-native endianness is unsupported\n", obj->name);
return libbpf_err(-LIBBPF_ERRNO__ENDIAN);
}
err = bpf_object_prepare_token(obj);
err = err ? : bpf_object__probe_loading(obj);
@@ -9129,11 +9059,6 @@ unsigned int bpf_object__kversion(const struct bpf_object *obj)
return obj ? obj->kern_version : 0;
}
int bpf_object__token_fd(const struct bpf_object *obj)
{
return obj->token_fd ?: -1;
}
struct btf *bpf_object__btf(const struct bpf_object *obj)
{
return obj ? obj->btf : NULL;
@@ -9166,7 +9091,6 @@ int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
if (!gen)
return -ENOMEM;
gen->opts = opts;
gen->swapped_endian = !is_native_endianness(obj);
obj->gen_loader = gen;
return 0;
}
@@ -11764,7 +11688,7 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru
ret = 0;
break;
case 3:
opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
*link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
ret = libbpf_get_error(*link);
break;
@@ -12831,7 +12755,7 @@ struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
}
if (prog->type != BPF_PROG_TYPE_EXT) {
pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace\n",
pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
prog->name);
return libbpf_err_ptr(-EINVAL);
}
@@ -13905,7 +13829,7 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
map_type = btf__type_by_id(btf, map_type_id);
if (!btf_is_datasec(map_type)) {
pr_warn("type for map '%1$s' is not a datasec: %2$s\n",
pr_warn("type for map '%1$s' is not a datasec: %2$s",
bpf_map__name(map),
__btf_kind_str(btf_kind(map_type)));
return libbpf_err(-EINVAL);
@@ -13948,11 +13872,46 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_map *map = *map_skel->map;
size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = map->fd;
void **mmaped = map_skel->mmaped;
if (!map_skel->mmaped)
if (!mmaped)
continue;
*map_skel->mmaped = map->mmaped;
if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
*mmaped = NULL;
continue;
}
if (map->def.type == BPF_MAP_TYPE_ARENA) {
*mmaped = map->mmaped;
continue;
}
if (map->def.map_flags & BPF_F_RDONLY_PROG)
prot = PROT_READ;
else
prot = PROT_READ | PROT_WRITE;
/* Remap anonymous mmap()-ed "map initialization image" as
* a BPF map-backed mmap()-ed memory, but preserving the same
* memory address. This will cause kernel to change process'
* page table to point to a different piece of kernel memory,
* but from userspace point of view memory address (and its
* contents, being identical at this point) will stay the
* same. This mapping will be released by bpf_object__close()
* as per normal clean up procedure, so we don't need to worry
* about it from skeleton's clean up perspective.
*/
*mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
if (*mmaped == MAP_FAILED) {
err = -errno;
*mmaped = NULL;
pr_warn("failed to re-mmap() map '%s': %d\n",
bpf_map__name(map), err);
return libbpf_err(err);
}
}
return 0;

View File

@@ -152,7 +152,7 @@ struct bpf_object_open_opts {
* log_buf and log_level settings.
*
* If specified, this log buffer will be passed for:
* - each BPF progral load (BPF_PROG_LOAD) attempt, unless overridden
* - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden
* with bpf_program__set_log() on per-program level, to get
* BPF verifier log output.
* - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get
@@ -294,14 +294,6 @@ LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version);
/**
* @brief **bpf_object__token_fd** is an accessor for BPF token FD associated
* with BPF object.
* @param obj Pointer to a valid BPF object
* @return BPF token FD or -1, if it wasn't set
*/
LIBBPF_API int bpf_object__token_fd(const struct bpf_object *obj);
struct btf;
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
@@ -463,7 +455,7 @@ LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
/**
* @brief **bpf_program__attach()** is a generic function for attaching
* a BPF program based on auto-detection of program type, attach type,
* and extra parameters, where applicable.
* and extra paremeters, where applicable.
*
* @param prog BPF program to attach
* @return Reference to the newly created BPF link; or NULL is returned on error,
@@ -687,7 +679,7 @@ struct bpf_uprobe_opts {
/**
* @brief **bpf_program__attach_uprobe()** attaches a BPF program
* to the userspace function which is found by binary path and
* offset. You can optionally specify a particular process to attach
* offset. You can optionally specify a particular proccess to attach
* to. You can also optionally attach the program to the function
* exit instead of entry.
*
@@ -1601,11 +1593,11 @@ LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_i
* memory region of the ring buffer.
* This ring buffer can be used to implement a custom events consumer.
* The ring buffer starts with the *struct perf_event_mmap_page*, which
* holds the ring buffer management fields, when accessing the header
* holds the ring buffer managment fields, when accessing the header
* structure it's important to be SMP aware.
* You can refer to *perf_event_read_simple* for a simple example.
* @param pb the perf buffer structure
* @param buf_idx the buffer index to retrieve
* @param buf_idx the buffer index to retreive
* @param buf (out) gets the base pointer of the mmap()'ed memory
* @param buf_size (out) gets the size of the mmap()'ed region
* @return 0 on success, negative error code for failure

View File

@@ -421,11 +421,8 @@ LIBBPF_1.5.0 {
global:
btf__distill_base;
btf__relocate;
btf_ext__endianness;
btf_ext__set_endianness;
bpf_map__autoattach;
bpf_map__set_autoattach;
bpf_object__token_fd;
bpf_program__attach_sockmap;
ring__consume_n;
ring_buffer__consume_n;

View File

@@ -10,7 +10,6 @@
#define __LIBBPF_LIBBPF_INTERNAL_H
#include <stdlib.h>
#include <byteswap.h>
#include <limits.h>
#include <errno.h>
#include <linux/err.h>
@@ -449,11 +448,11 @@ struct btf_ext_info {
*
* The func_info subsection layout:
* record size for struct bpf_func_info in the func_info subsection
* struct btf_ext_info_sec for section #1
* struct btf_sec_func_info for section #1
* a list of bpf_func_info records for section #1
* where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
* but may not be identical
* struct btf_ext_info_sec for section #2
* struct btf_sec_func_info for section #2
* a list of bpf_func_info records for section #2
* ......
*
@@ -485,8 +484,6 @@ struct btf_ext {
struct btf_ext_header *hdr;
void *data;
};
void *data_swapped;
bool swapped_endian;
struct btf_ext_info func_info;
struct btf_ext_info line_info;
struct btf_ext_info core_relo_info;
@@ -514,32 +511,6 @@ struct bpf_line_info_min {
__u32 line_col;
};
/* Functions to byte-swap info records */
typedef void (*info_rec_bswap_fn)(void *);
static inline void bpf_func_info_bswap(struct bpf_func_info *i)
{
i->insn_off = bswap_32(i->insn_off);
i->type_id = bswap_32(i->type_id);
}
static inline void bpf_line_info_bswap(struct bpf_line_info *i)
{
i->insn_off = bswap_32(i->insn_off);
i->file_name_off = bswap_32(i->file_name_off);
i->line_off = bswap_32(i->line_off);
i->line_col = bswap_32(i->line_col);
}
static inline void bpf_core_relo_bswap(struct bpf_core_relo *i)
{
i->insn_off = bswap_32(i->insn_off);
i->type_id = bswap_32(i->type_id);
i->access_str_off = bswap_32(i->access_str_off);
i->kind = bswap_32(i->kind);
}
enum btf_field_iter_kind {
BTF_FIELD_ITER_IDS,
BTF_FIELD_ITER_STRS,
@@ -617,16 +588,6 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
}
static inline void bpf_insn_bswap(struct bpf_insn *insn)
{
__u8 tmp_reg = insn->dst_reg;
insn->dst_reg = insn->src_reg;
insn->src_reg = tmp_reg;
insn->off = bswap_16(insn->off);
insn->imm = bswap_32(insn->imm);
}
/* Unconditionally dup FD, ensuring it doesn't use [0, 2] range.
* Original FD is not closed or altered in any other way.
* Preserves original FD value, if it's invalid (negative).

View File

@@ -76,7 +76,7 @@ enum libbpf_strict_mode {
* first BPF program or map creation operation. This is done only if
* kernel is too old to support memcg-based memory accounting for BPF
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
* but it can be overridden with libbpf_set_memlock_rlim() API.
* but it can be overriden with libbpf_set_memlock_rlim() API.
* Note that libbpf_set_memlock_rlim() needs to be called before
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
* operation.
@@ -97,7 +97,7 @@ LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
* @brief **libbpf_get_error()** extracts the error code from the passed
* pointer
* @param ptr pointer returned from libbpf API function
* @return error code; or 0 if no error occurred
* @return error code; or 0 if no error occured
*
* Note, as of libbpf 1.0 this function is not necessary and not recommended
* to be used. Libbpf doesn't return error code embedded into the pointer

View File

@@ -97,6 +97,9 @@ __u32 get_kernel_version(void)
if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
return 0;
if (major == 4 && minor == 19 && patch > 255)
return KERNEL_VERSION(major, minor, 255);
return KERNEL_VERSION(major, minor, patch);
}

View File

@@ -135,7 +135,6 @@ struct bpf_linker {
int fd;
Elf *elf;
Elf64_Ehdr *elf_hdr;
bool swapped_endian;
/* Output sections metadata */
struct dst_sec *secs;
@@ -325,8 +324,13 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
linker->elf_hdr->e_machine = EM_BPF;
linker->elf_hdr->e_type = ET_REL;
/* Set unknown ELF endianness, assign later from input files */
linker->elf_hdr->e_ident[EI_DATA] = ELFDATANONE;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB;
#else
#error "Unknown __BYTE_ORDER__"
#endif
/* STRTAB */
/* initialize strset with an empty string to conform to ELF */
@@ -392,8 +396,6 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
pr_warn_elf("failed to create SYMTAB data");
return -EINVAL;
}
/* Ensure libelf translates byte-order of symbol records */
sec->data->d_type = ELF_T_SYM;
str_off = strset__add_str(linker->strtab_strs, sec->sec_name);
if (str_off < 0)
@@ -537,21 +539,19 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
const struct bpf_linker_file_opts *opts,
struct src_obj *obj)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
const int host_endianness = ELFDATA2LSB;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
const int host_endianness = ELFDATA2MSB;
#else
#error "Unknown __BYTE_ORDER__"
#endif
int err = 0;
Elf_Scn *scn;
Elf_Data *data;
Elf64_Ehdr *ehdr;
Elf64_Shdr *shdr;
struct src_sec *sec;
unsigned char obj_byteorder;
unsigned char link_byteorder = linker->elf_hdr->e_ident[EI_DATA];
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
const unsigned char host_byteorder = ELFDATA2LSB;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
const unsigned char host_byteorder = ELFDATA2MSB;
#else
#error "Unknown __BYTE_ORDER__"
#endif
pr_debug("linker: adding object file '%s'...\n", filename);
@@ -577,25 +577,11 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
pr_warn_elf("failed to get ELF header for %s", filename);
return err;
}
/* Linker output endianness set by first input object */
obj_byteorder = ehdr->e_ident[EI_DATA];
if (obj_byteorder != ELFDATA2LSB && obj_byteorder != ELFDATA2MSB) {
if (ehdr->e_ident[EI_DATA] != host_endianness) {
err = -EOPNOTSUPP;
pr_warn("unknown byte order of ELF file %s\n", filename);
pr_warn_elf("unsupported byte order of ELF file %s", filename);
return err;
}
if (link_byteorder == ELFDATANONE) {
linker->elf_hdr->e_ident[EI_DATA] = obj_byteorder;
linker->swapped_endian = obj_byteorder != host_byteorder;
pr_debug("linker: set %s-endian output byte order\n",
obj_byteorder == ELFDATA2MSB ? "big" : "little");
} else if (link_byteorder != obj_byteorder) {
err = -EOPNOTSUPP;
pr_warn("byte order mismatch with ELF file %s\n", filename);
return err;
}
if (ehdr->e_type != ET_REL
|| ehdr->e_machine != EM_BPF
|| ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
@@ -1123,24 +1109,6 @@ static bool sec_content_is_same(struct dst_sec *dst_sec, struct src_sec *src_sec
return true;
}
static bool is_exec_sec(struct dst_sec *sec)
{
if (!sec || sec->ephemeral)
return false;
return (sec->shdr->sh_type == SHT_PROGBITS) &&
(sec->shdr->sh_flags & SHF_EXECINSTR);
}
static void exec_sec_bswap(void *raw_data, int size)
{
const int insn_cnt = size / sizeof(struct bpf_insn);
struct bpf_insn *insn = raw_data;
int i;
for (i = 0; i < insn_cnt; i++, insn++)
bpf_insn_bswap(insn);
}
static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src_sec *src)
{
void *tmp;
@@ -1200,10 +1168,6 @@ static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src
memset(dst->raw_data + dst->sec_sz, 0, dst_align_sz - dst->sec_sz);
/* now copy src data at a properly aligned offset */
memcpy(dst->raw_data + dst_align_sz, src->data->d_buf, src->shdr->sh_size);
/* convert added bpf insns to native byte-order */
if (linker->swapped_endian && is_exec_sec(dst))
exec_sec_bswap(dst->raw_data + dst_align_sz, src->shdr->sh_size);
}
dst->sec_sz = dst_final_sz;
@@ -1449,7 +1413,7 @@ recur:
return true;
case BTF_KIND_PTR:
/* just validate overall shape of the referenced type, so no
* contents comparison for struct/union, and allowed fwd vs
* contents comparison for struct/union, and allowd fwd vs
* struct/union
*/
exact = false;
@@ -1998,7 +1962,7 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
/* If existing symbol is a strong resolved symbol, bail out,
* because we lost resolution battle have nothing to
* contribute. We already checked above that there is no
* contribute. We already checked abover that there is no
* strong-strong conflict. We also already tightened binding
* and visibility, so nothing else to contribute at that point.
*/
@@ -2451,10 +2415,6 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
if (glob_sym && glob_sym->var_idx >= 0) {
__s64 sz;
/* FUNCs don't have size, nothing to update */
if (btf_is_func(t))
continue;
dst_var = &dst_sec->sec_vars[glob_sym->var_idx];
/* Because underlying BTF type might have
* changed, so might its size have changed, so
@@ -2668,10 +2628,6 @@ int bpf_linker__finalize(struct bpf_linker *linker)
if (!sec->scn)
continue;
/* restore sections with bpf insns to target byte-order */
if (linker->swapped_endian && is_exec_sec(sec))
exec_sec_bswap(sec->raw_data, sec->sec_sz);
sec->data->d_buf = sec->raw_data;
}
@@ -2740,7 +2696,6 @@ static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name,
static int finalize_btf(struct bpf_linker *linker)
{
enum btf_endianness link_endianness;
LIBBPF_OPTS(btf_dedup_opts, opts);
struct btf *btf = linker->btf;
const void *raw_data;
@@ -2785,13 +2740,6 @@ static int finalize_btf(struct bpf_linker *linker)
return err;
}
/* Set .BTF and .BTF.ext output byte order */
link_endianness = linker->elf_hdr->e_ident[EI_DATA] == ELFDATA2MSB ?
BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
btf__set_endianness(linker->btf, link_endianness);
if (linker->btf_ext)
btf_ext__set_endianness(linker->btf_ext, link_endianness);
/* Emit .BTF section */
raw_data = btf__raw_data(linker->btf, &raw_sz);
if (!raw_data)

View File

@@ -1339,7 +1339,7 @@ int bpf_core_calc_relo_insn(const char *prog_name,
cands->cands[i].id, cand_spec);
if (err < 0) {
bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n",
pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ",
prog_name, relo_idx, i, spec_buf, err);
return err;
}

View File

@@ -107,7 +107,7 @@ static inline void skel_free(const void *p)
* The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
* skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
* does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
* is not necessary.
* is not nessary.
*
* For user space:
* skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
@@ -351,11 +351,10 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
attr.test.ctx_size_in = opts->ctx->sz;
err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz);
if (err < 0 || (int)attr.test.retval < 0) {
opts->errstr = "failed to execute loader prog";
if (err < 0) {
opts->errstr = "failed to execute loader prog";
set_err;
} else {
opts->errstr = "error returned by loader prog";
err = (int)attr.test.retval;
#ifndef __KERNEL__
errno = -err;

View File

@@ -39,7 +39,7 @@ enum __bpf_usdt_arg_type {
struct __bpf_usdt_arg_spec {
/* u64 scalar interpreted depending on arg_type, see below */
__u64 val_off;
/* arg location case, see bpf_usdt_arg() for details */
/* arg location case, see bpf_udst_arg() for details */
enum __bpf_usdt_arg_type arg_type;
/* offset of referenced register within struct pt_regs */
short reg_off;

View File

@@ -223,7 +223,7 @@ struct zip_archive *zip_archive_open(const char *path)
if (!archive) {
munmap(data, size);
return ERR_PTR(-ENOMEM);
}
};
archive->data = data;
archive->size = size;