mirror of
https://github.com/netdata/libbpf.git
synced 2026-03-14 05:19:06 +08:00
Compare commits
47 Commits
netdata_pa
...
netdata_pa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ad7c3a4266 | ||
|
|
54a4576b5b | ||
|
|
09b9e83102 | ||
|
|
891438c086 | ||
|
|
2d7a79a984 | ||
|
|
ee92f521ab | ||
|
|
2dea4b86ee | ||
|
|
fdbdbb6b8a | ||
|
|
fc064eb41e | ||
|
|
db8a210964 | ||
|
|
f69995d909 | ||
|
|
ac9ced9eb3 | ||
|
|
ba8bd24bbb | ||
|
|
8ea6e12372 | ||
|
|
0e3971339f | ||
|
|
ecf998ed8f | ||
|
|
89df6536bf | ||
|
|
8244006267 | ||
|
|
6ac8762ecd | ||
|
|
628b21dbcd | ||
|
|
5ae8432d15 | ||
|
|
f2668a0a71 | ||
|
|
5060f172cc | ||
|
|
ceeb7211c9 | ||
|
|
3fb92e63e0 | ||
|
|
ad633fb142 | ||
|
|
3ea36843b3 | ||
|
|
80b16457cb | ||
|
|
7827ca87d1 | ||
|
|
91ccd57ca9 | ||
|
|
f0a307f61c | ||
|
|
80b97bd0b8 | ||
|
|
7c2f492a88 | ||
|
|
114f6ce2fd | ||
|
|
69671302df | ||
|
|
e1833cff9c | ||
|
|
81ac790dc8 | ||
|
|
3b301cf75d | ||
|
|
6c8dde3554 | ||
|
|
9045c3ab53 | ||
|
|
53a645402f | ||
|
|
6d01681b02 | ||
|
|
9a37057800 | ||
|
|
3578ab89fb | ||
|
|
178df3d885 | ||
|
|
1f98105e54 | ||
|
|
a4161e00f9 |
14704
.github/actions/build-selftests/vmlinux.h
vendored
14704
.github/actions/build-selftests/vmlinux.h
vendored
File diff suppressed because it is too large
Load Diff
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -70,7 +70,7 @@ jobs:
|
||||
- run: source /tmp/ci_setup && sudo -E $CI_ROOT/managers/ubuntu.sh
|
||||
if: matrix.arch == 'x86'
|
||||
name: Setup
|
||||
- uses: uraimo/run-on-arch-action@v2.7.1
|
||||
- uses: uraimo/run-on-arch-action@v2.8.1
|
||||
name: Build in docker
|
||||
if: matrix.arch != 'x86'
|
||||
with:
|
||||
|
||||
2
.github/workflows/cifuzz.yml
vendored
2
.github/workflows/cifuzz.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
dry-run: false
|
||||
sanitizer: ${{ matrix.sanitizer }}
|
||||
- name: Upload Crash
|
||||
uses: actions/upload-artifact@v1
|
||||
uses: actions/upload-artifact@v4
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: ${{ matrix.sanitizer }}-artifacts
|
||||
|
||||
3
.github/workflows/test.yml
vendored
3
.github/workflows/test.yml
vendored
@@ -27,9 +27,6 @@ jobs:
|
||||
- kernel: '4.9.0'
|
||||
runs_on: ubuntu-24.04
|
||||
arch: 'x86_64'
|
||||
- kernel: 'LATEST'
|
||||
runs_on: ["s390x", "docker-noble-main"]
|
||||
arch: 's390x'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
name: Checkout
|
||||
|
||||
@@ -1 +1 @@
|
||||
b408473ea01b2e499d23503e2bf898416da9d7ac
|
||||
d5fb316e2af1d947f0f6c3666e373a54d9f27c6f
|
||||
|
||||
@@ -1 +1 @@
|
||||
2ad6d23f465a4f851e3bcf6d74c315ce7b2c205b
|
||||
c6fb8030b4baa01c850f99fc6da051b1017edc46
|
||||
|
||||
58
ci/diffs/0003-selftests-bpf-Fix-uprobe-consumer-test.patch
Normal file
58
ci/diffs/0003-selftests-bpf-Fix-uprobe-consumer-test.patch
Normal file
@@ -0,0 +1,58 @@
|
||||
From affb32e4f056883f285f8535b766293b85752fb4 Mon Sep 17 00:00:00 2001
|
||||
From: Jiri Olsa <jolsa@kernel.org>
|
||||
Date: Tue, 24 Sep 2024 13:07:30 +0200
|
||||
Subject: [PATCH] selftests/bpf: Fix uprobe consumer test
|
||||
|
||||
With newly merged code the uprobe behaviour is slightly different
|
||||
and affects uprobe consumer test.
|
||||
|
||||
We no longer need to check if the uprobe object is still preserved
|
||||
after removing last uretprobe, because it stays as long as there's
|
||||
pending/installed uretprobe instance.
|
||||
|
||||
This allows to run uretprobe consumers registered 'after' uprobe was
|
||||
hit even if previous uretprobe got unregistered before being hit.
|
||||
|
||||
The uprobe object will be now removed after the last uprobe ref is
|
||||
released and in such case it's held by ri->uprobe (return instance)
|
||||
which is released after the uretprobe is hit.
|
||||
|
||||
Reported-by: Ihor Solodrai <ihor.solodrai@pm.me>
|
||||
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
|
||||
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
|
||||
Tested-by: Ihor Solodrai <ihor.solodrai@pm.me>
|
||||
Closes: https://lore.kernel.org/bpf/w6U8Z9fdhjnkSp2UaFaV1fGqJXvfLEtDKEUyGDkwmoruDJ_AgF_c0FFhrkeKW18OqiP-05s9yDKiT6X-Ns-avN_ABf0dcUkXqbSJN1TQSXo=@pm.me/
|
||||
---
|
||||
.../testing/selftests/bpf/prog_tests/uprobe_multi_test.c | 9 +--------
|
||||
1 file changed, 1 insertion(+), 8 deletions(-)
|
||||
|
||||
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
|
||||
index 844f6fc8487b..c1ac813ff9ba 100644
|
||||
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
|
||||
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
|
||||
@@ -869,21 +869,14 @@ static void consumer_test(struct uprobe_multi_consumers *skel,
|
||||
fmt = "prog 0/1: uprobe";
|
||||
} else {
|
||||
/*
|
||||
- * uprobe return is tricky ;-)
|
||||
- *
|
||||
* to trigger uretprobe consumer, the uretprobe needs to be installed,
|
||||
* which means one of the 'return' uprobes was alive when probe was hit:
|
||||
*
|
||||
* idxs: 2/3 uprobe return in 'installed' mask
|
||||
- *
|
||||
- * in addition if 'after' state removes everything that was installed in
|
||||
- * 'before' state, then uprobe kernel object goes away and return uprobe
|
||||
- * is not installed and we won't hit it even if it's in 'after' state.
|
||||
*/
|
||||
unsigned long had_uretprobes = before & 0b1100; /* is uretprobe installed */
|
||||
- unsigned long probe_preserved = before & after; /* did uprobe go away */
|
||||
|
||||
- if (had_uretprobes && probe_preserved && test_bit(idx, after))
|
||||
+ if (had_uretprobes && test_bit(idx, after))
|
||||
val++;
|
||||
fmt = "idx 2/3: uretprobe";
|
||||
}
|
||||
--
|
||||
2.34.1
|
||||
|
||||
@@ -121,6 +121,8 @@ described in more detail in the footnotes.
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
| ``BPF_PROG_TYPE_LWT_XMIT`` | | ``lwt_xmit`` | |
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
| ``BPF_PROG_TYPE_NETFILTER`` | | ``netfilter`` | |
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
| ``BPF_PROG_TYPE_PERF_EVENT`` | | ``perf_event`` | |
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
| ``BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE`` | | ``raw_tp.w+`` [#rawtp]_ | |
|
||||
@@ -131,11 +133,23 @@ described in more detail in the footnotes.
|
||||
+ + +----------------------------------+-----------+
|
||||
| | | ``raw_tracepoint+`` | |
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` | |
|
||||
| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` [#tc_legacy]_ | |
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` | |
|
||||
| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` [#tc_legacy]_ | |
|
||||
+ + +----------------------------------+-----------+
|
||||
| | | ``tc`` | |
|
||||
| | | ``tc`` [#tc_legacy]_ | |
|
||||
+ +----------------------------------------+----------------------------------+-----------+
|
||||
| | ``BPF_NETKIT_PRIMARY`` | ``netkit/primary`` | |
|
||||
+ +----------------------------------------+----------------------------------+-----------+
|
||||
| | ``BPF_NETKIT_PEER`` | ``netkit/peer`` | |
|
||||
+ +----------------------------------------+----------------------------------+-----------+
|
||||
| | ``BPF_TCX_INGRESS`` | ``tc/ingress`` | |
|
||||
+ +----------------------------------------+----------------------------------+-----------+
|
||||
| | ``BPF_TCX_EGRESS`` | ``tc/egress`` | |
|
||||
+ +----------------------------------------+----------------------------------+-----------+
|
||||
| | ``BPF_TCX_INGRESS`` | ``tcx/ingress`` | |
|
||||
+ +----------------------------------------+----------------------------------+-----------+
|
||||
| | ``BPF_TCX_EGRESS`` | ``tcx/egress`` | |
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
| ``BPF_PROG_TYPE_SK_LOOKUP`` | ``BPF_SK_LOOKUP`` | ``sk_lookup`` | |
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
@@ -155,7 +169,9 @@ described in more detail in the footnotes.
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
| ``BPF_PROG_TYPE_SOCK_OPS`` | ``BPF_CGROUP_SOCK_OPS`` | ``sockops`` | |
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` | |
|
||||
| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` [#struct_ops]_ | |
|
||||
+ + +----------------------------------+-----------+
|
||||
| | | ``struct_ops.s+`` [#struct_ops]_ | Yes |
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
| ``BPF_PROG_TYPE_SYSCALL`` | | ``syscall`` | Yes |
|
||||
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
|
||||
@@ -209,5 +225,11 @@ described in more detail in the footnotes.
|
||||
``a-zA-Z0-9_.*?``.
|
||||
.. [#lsm] The ``lsm`` attachment format is ``lsm[.s]/<hook>``.
|
||||
.. [#rawtp] The ``raw_tp`` attach format is ``raw_tracepoint[.w]/<tracepoint>``.
|
||||
.. [#tc_legacy] The ``tc``, ``classifier`` and ``action`` attach types are deprecated, use
|
||||
``tcx/*`` instead.
|
||||
.. [#struct_ops] The ``struct_ops`` attach format supports ``struct_ops[.s]/<name>`` convention,
|
||||
but ``name`` is ignored and it is recommended to just use plain
|
||||
``SEC("struct_ops[.s]")``. The attachments are defined in a struct initializer
|
||||
that is tagged with ``SEC(".struct_ops[.link]")``.
|
||||
.. [#tp] The ``tracepoint`` attach format is ``tracepoint/<category>/<name>``.
|
||||
.. [#iter] The ``iter`` attach format is ``iter[.s]/<struct-name>``.
|
||||
|
||||
@@ -1121,6 +1121,9 @@ enum bpf_attach_type {
|
||||
|
||||
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
|
||||
|
||||
/* Add BPF_LINK_TYPE(type, name) in bpf_types.h to keep bpf_link_type_strs[]
|
||||
* in sync with the definitions below.
|
||||
*/
|
||||
enum bpf_link_type {
|
||||
BPF_LINK_TYPE_UNSPEC = 0,
|
||||
BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
|
||||
@@ -1970,6 +1973,8 @@ union bpf_attr {
|
||||
* program.
|
||||
* Return
|
||||
* The SMP id of the processor running the program.
|
||||
* Attributes
|
||||
* __bpf_fastcall
|
||||
*
|
||||
* long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
|
||||
* Description
|
||||
@@ -2851,7 +2856,7 @@ union bpf_attr {
|
||||
* **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
|
||||
* **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
|
||||
* **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
|
||||
* **TCP_BPF_RTO_MIN**.
|
||||
* **TCP_BPF_RTO_MIN**, **TCP_BPF_SOCK_OPS_CB_FLAGS**.
|
||||
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
|
||||
* * **IPPROTO_IPV6**, which supports the following *optname*\ s:
|
||||
* **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
|
||||
@@ -3101,10 +3106,6 @@ union bpf_attr {
|
||||
* with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
|
||||
* option, and in this case it only works on functions tagged with
|
||||
* **ALLOW_ERROR_INJECTION** in the kernel code.
|
||||
*
|
||||
* Also, the helper is only available for the architectures having
|
||||
* the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
|
||||
* x86 architecture is the only one to support this feature.
|
||||
* Return
|
||||
* 0
|
||||
*
|
||||
@@ -5369,7 +5370,7 @@ union bpf_attr {
|
||||
* Currently, the **flags** must be 0. Currently, nr_loops is
|
||||
* limited to 1 << 23 (~8 million) loops.
|
||||
*
|
||||
* long (\*callback_fn)(u32 index, void \*ctx);
|
||||
* long (\*callback_fn)(u64 index, void \*ctx);
|
||||
*
|
||||
* where **index** is the current index in the loop. The index
|
||||
* is zero-indexed.
|
||||
@@ -5519,11 +5520,12 @@ union bpf_attr {
|
||||
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
|
||||
* invalid arguments are passed.
|
||||
*
|
||||
* void *bpf_kptr_xchg(void *map_value, void *ptr)
|
||||
* void *bpf_kptr_xchg(void *dst, void *ptr)
|
||||
* Description
|
||||
* Exchange kptr at pointer *map_value* with *ptr*, and return the
|
||||
* old value. *ptr* can be NULL, otherwise it must be a referenced
|
||||
* pointer which will be released when this helper is called.
|
||||
* Exchange kptr at pointer *dst* with *ptr*, and return the old value.
|
||||
* *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
|
||||
* it must be a referenced pointer which will be released when this helper
|
||||
* is called.
|
||||
* Return
|
||||
* The old value of kptr (which can be NULL). The returned pointer
|
||||
* if not NULL, is a reference which must be released using its
|
||||
@@ -6046,11 +6048,6 @@ enum {
|
||||
BPF_F_MARK_ENFORCE = (1ULL << 6),
|
||||
};
|
||||
|
||||
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
|
||||
enum {
|
||||
BPF_F_INGRESS = (1ULL << 0),
|
||||
};
|
||||
|
||||
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
|
||||
enum {
|
||||
BPF_F_TUNINFO_IPV6 = (1ULL << 0),
|
||||
@@ -6197,10 +6194,12 @@ enum {
|
||||
BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
|
||||
};
|
||||
|
||||
/* Flags for bpf_redirect_map helper */
|
||||
/* Flags for bpf_redirect and bpf_redirect_map helpers */
|
||||
enum {
|
||||
BPF_F_BROADCAST = (1ULL << 3),
|
||||
BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
|
||||
BPF_F_INGRESS = (1ULL << 0), /* used for skb path */
|
||||
BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */
|
||||
BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */
|
||||
#define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS)
|
||||
};
|
||||
|
||||
#define __bpf_md_ptr(type, name) \
|
||||
@@ -7080,6 +7079,7 @@ enum {
|
||||
TCP_BPF_SYN = 1005, /* Copy the TCP header */
|
||||
TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
|
||||
TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
|
||||
TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */
|
||||
};
|
||||
|
||||
enum {
|
||||
|
||||
@@ -93,6 +93,7 @@ enum {
|
||||
NETDEV_A_PAGE_POOL_INFLIGHT,
|
||||
NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
|
||||
NETDEV_A_PAGE_POOL_DETACH_TIME,
|
||||
NETDEV_A_PAGE_POOL_DMABUF,
|
||||
|
||||
__NETDEV_A_PAGE_POOL_MAX,
|
||||
NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
|
||||
@@ -131,6 +132,7 @@ enum {
|
||||
NETDEV_A_QUEUE_IFINDEX,
|
||||
NETDEV_A_QUEUE_TYPE,
|
||||
NETDEV_A_QUEUE_NAPI_ID,
|
||||
NETDEV_A_QUEUE_DMABUF,
|
||||
|
||||
__NETDEV_A_QUEUE_MAX,
|
||||
NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
|
||||
@@ -173,6 +175,16 @@ enum {
|
||||
NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
|
||||
};
|
||||
|
||||
enum {
|
||||
NETDEV_A_DMABUF_IFINDEX = 1,
|
||||
NETDEV_A_DMABUF_QUEUES,
|
||||
NETDEV_A_DMABUF_FD,
|
||||
NETDEV_A_DMABUF_ID,
|
||||
|
||||
__NETDEV_A_DMABUF_MAX,
|
||||
NETDEV_A_DMABUF_MAX = (__NETDEV_A_DMABUF_MAX - 1)
|
||||
};
|
||||
|
||||
enum {
|
||||
NETDEV_CMD_DEV_GET = 1,
|
||||
NETDEV_CMD_DEV_ADD_NTF,
|
||||
@@ -186,6 +198,7 @@ enum {
|
||||
NETDEV_CMD_QUEUE_GET,
|
||||
NETDEV_CMD_NAPI_GET,
|
||||
NETDEV_CMD_QSTATS_GET,
|
||||
NETDEV_CMD_BIND_RX,
|
||||
|
||||
__NETDEV_CMD_MAX,
|
||||
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
|
||||
|
||||
@@ -100,7 +100,7 @@ struct bpf_prog_load_opts {
|
||||
__u32 log_level;
|
||||
__u32 log_size;
|
||||
char *log_buf;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
/* output: actual total log contents size (including terminating zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||
@@ -129,7 +129,7 @@ struct bpf_btf_load_opts {
|
||||
char *log_buf;
|
||||
__u32 log_level;
|
||||
__u32 log_size;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
/* output: actual total log contents size (including terminating zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||
|
||||
@@ -34,6 +34,7 @@ struct bpf_gen {
|
||||
void *data_cur;
|
||||
void *insn_start;
|
||||
void *insn_cur;
|
||||
bool swapped_endian;
|
||||
ssize_t cleanup_label;
|
||||
__u32 nr_progs;
|
||||
__u32 nr_maps;
|
||||
|
||||
@@ -44,6 +44,14 @@ struct bpf_dynptr;
|
||||
struct iphdr;
|
||||
struct ipv6hdr;
|
||||
|
||||
#ifndef __bpf_fastcall
|
||||
#if __has_attribute(bpf_fastcall)
|
||||
#define __bpf_fastcall __attribute__((bpf_fastcall))
|
||||
#else
|
||||
#define __bpf_fastcall
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* bpf_map_lookup_elem
|
||||
*
|
||||
@@ -203,7 +211,7 @@ static __u32 (* const bpf_get_prandom_u32)(void) = (void *) 7;
|
||||
* Returns
|
||||
* The SMP id of the processor running the program.
|
||||
*/
|
||||
static __u32 (* const bpf_get_smp_processor_id)(void) = (void *) 8;
|
||||
static __bpf_fastcall __u32 (* const bpf_get_smp_processor_id)(void) = (void *) 8;
|
||||
|
||||
/*
|
||||
* bpf_skb_store_bytes
|
||||
@@ -1224,7 +1232,7 @@ static long (* const bpf_set_hash)(struct __sk_buff *skb, __u32 hash) = (void *)
|
||||
* **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
|
||||
* **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
|
||||
* **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
|
||||
* **TCP_BPF_RTO_MIN**.
|
||||
* **TCP_BPF_RTO_MIN**, **TCP_BPF_SOCK_OPS_CB_FLAGS**.
|
||||
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
|
||||
* * **IPPROTO_IPV6**, which supports the following *optname*\ s:
|
||||
* **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
|
||||
@@ -1511,10 +1519,6 @@ static long (* const bpf_getsockopt)(void *bpf_socket, int level, int optname, v
|
||||
* option, and in this case it only works on functions tagged with
|
||||
* **ALLOW_ERROR_INJECTION** in the kernel code.
|
||||
*
|
||||
* Also, the helper is only available for the architectures having
|
||||
* the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
|
||||
* x86 architecture is the only one to support this feature.
|
||||
*
|
||||
* Returns
|
||||
* 0
|
||||
*/
|
||||
@@ -4220,7 +4224,7 @@ static long (* const bpf_find_vma)(struct task_struct *task, __u64 addr, void *c
|
||||
* Currently, the **flags** must be 0. Currently, nr_loops is
|
||||
* limited to 1 << 23 (~8 million) loops.
|
||||
*
|
||||
* long (\*callback_fn)(u32 index, void \*ctx);
|
||||
* long (\*callback_fn)(u64 index, void \*ctx);
|
||||
*
|
||||
* where **index** is the current index in the loop. The index
|
||||
* is zero-indexed.
|
||||
@@ -4424,9 +4428,10 @@ static long (* const bpf_ima_file_hash)(struct file *file, void *dst, __u32 size
|
||||
/*
|
||||
* bpf_kptr_xchg
|
||||
*
|
||||
* Exchange kptr at pointer *map_value* with *ptr*, and return the
|
||||
* old value. *ptr* can be NULL, otherwise it must be a referenced
|
||||
* pointer which will be released when this helper is called.
|
||||
* Exchange kptr at pointer *dst* with *ptr*, and return the old value.
|
||||
* *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
|
||||
* it must be a referenced pointer which will be released when this helper
|
||||
* is called.
|
||||
*
|
||||
* Returns
|
||||
* The old value of kptr (which can be NULL). The returned pointer
|
||||
@@ -4434,7 +4439,7 @@ static long (* const bpf_ima_file_hash)(struct file *file, void *dst, __u32 size
|
||||
* corresponding release function, or moved into a BPF map before
|
||||
* program exit.
|
||||
*/
|
||||
static void *(* const bpf_kptr_xchg)(void *map_value, void *ptr) = (void *) 194;
|
||||
static void *(* const bpf_kptr_xchg)(void *dst, void *ptr) = (void *) 194;
|
||||
|
||||
/*
|
||||
* bpf_map_lookup_percpu_elem
|
||||
|
||||
@@ -185,6 +185,7 @@ enum libbpf_tristate {
|
||||
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
|
||||
#define __kptr __attribute__((btf_type_tag("kptr")))
|
||||
#define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr")))
|
||||
#define __uptr __attribute__((btf_type_tag("uptr")))
|
||||
|
||||
#if defined (__clang__)
|
||||
#define bpf_ksym_exists(sym) ({ \
|
||||
@@ -341,7 +342,7 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
|
||||
* I.e., it looks almost like high-level for each loop in other languages,
|
||||
* supports continue/break, and is verifiable by BPF verifier.
|
||||
*
|
||||
* For iterating integers, the difference betwen bpf_for_each(num, i, N, M)
|
||||
* For iterating integers, the difference between bpf_for_each(num, i, N, M)
|
||||
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
|
||||
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
|
||||
* *`, not just `int`. So for integers bpf_for() is more convenient.
|
||||
|
||||
@@ -163,7 +163,7 @@
|
||||
|
||||
struct pt_regs___s390 {
|
||||
unsigned long orig_gpr2;
|
||||
};
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
|
||||
#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
|
||||
@@ -179,7 +179,7 @@ struct pt_regs___s390 {
|
||||
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
|
||||
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
|
||||
#define __PT_PARM6_SYSCALL_REG gprs[7]
|
||||
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
|
||||
#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___s390 *)(x))->__PT_PARM1_SYSCALL_REG)
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
|
||||
BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG)
|
||||
|
||||
@@ -222,7 +222,7 @@ struct pt_regs___s390 {
|
||||
|
||||
struct pt_regs___arm64 {
|
||||
unsigned long orig_x0;
|
||||
};
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
|
||||
@@ -241,7 +241,7 @@ struct pt_regs___arm64 {
|
||||
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
|
||||
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
|
||||
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
|
||||
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
|
||||
#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___arm64 *)(x))->__PT_PARM1_SYSCALL_REG)
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
|
||||
BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG)
|
||||
|
||||
@@ -351,6 +351,10 @@ struct pt_regs___arm64 {
|
||||
* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
|
||||
*/
|
||||
|
||||
struct pt_regs___riscv {
|
||||
unsigned long orig_a0;
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */
|
||||
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
|
||||
#define __PT_PARM1_REG a0
|
||||
@@ -362,12 +366,15 @@ struct pt_regs___arm64 {
|
||||
#define __PT_PARM7_REG a6
|
||||
#define __PT_PARM8_REG a7
|
||||
|
||||
#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
|
||||
#define __PT_PARM1_SYSCALL_REG orig_a0
|
||||
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
|
||||
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
|
||||
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
|
||||
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
|
||||
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
|
||||
#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___riscv *)(x))->__PT_PARM1_SYSCALL_REG)
|
||||
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
|
||||
BPF_CORE_READ((const struct pt_regs___riscv *)(x), __PT_PARM1_SYSCALL_REG)
|
||||
|
||||
#define __PT_RET_REG ra
|
||||
#define __PT_FP_REG s0
|
||||
@@ -473,7 +480,7 @@ struct pt_regs;
|
||||
#endif
|
||||
/*
|
||||
* Similarly, syscall-specific conventions might differ between function call
|
||||
* conventions within each architecutre. All supported architectures pass
|
||||
* conventions within each architecture. All supported architectures pass
|
||||
* either 6 or 7 syscall arguments in registers.
|
||||
*
|
||||
* See syscall(2) manpage for succinct table with information on each arch.
|
||||
@@ -515,7 +522,7 @@ struct pt_regs;
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
|
||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||
|
||||
#elif defined(bpf_target_sparc)
|
||||
#elif defined(bpf_target_sparc) || defined(bpf_target_arm64)
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
|
||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||
@@ -651,7 +658,7 @@ struct pt_regs;
|
||||
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
|
||||
* similar kinds of BPF programs, that accept input arguments as a single
|
||||
* pointer to untyped u64 array, where each u64 can actually be a typed
|
||||
* pointer or integer of different size. Instead of requring user to write
|
||||
* pointer or integer of different size. Instead of requiring user to write
|
||||
* manual casts and work with array elements by index, BPF_PROG macro
|
||||
* allows user to declare a list of named and typed input arguments in the
|
||||
* same syntax as for normal C function. All the casting is hidden and
|
||||
@@ -801,7 +808,7 @@ struct pt_regs;
|
||||
* tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
|
||||
* low-level way of getting kprobe input arguments from struct pt_regs, and
|
||||
* provides a familiar typed and named function arguments syntax and
|
||||
* semantics of accessing kprobe input paremeters.
|
||||
* semantics of accessing kprobe input parameters.
|
||||
*
|
||||
* Original struct pt_regs* context is preserved as 'ctx' argument. This might
|
||||
* be necessary when using BPF helpers like bpf_perf_event_output().
|
||||
|
||||
284
src/btf.c
284
src/btf.c
@@ -2885,7 +2885,7 @@ int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||
return btf_commit_type(btf, sz);
|
||||
}
|
||||
|
||||
struct btf_ext_sec_setup_param {
|
||||
struct btf_ext_sec_info_param {
|
||||
__u32 off;
|
||||
__u32 len;
|
||||
__u32 min_rec_size;
|
||||
@@ -2893,14 +2893,20 @@ struct btf_ext_sec_setup_param {
|
||||
const char *desc;
|
||||
};
|
||||
|
||||
static int btf_ext_setup_info(struct btf_ext *btf_ext,
|
||||
struct btf_ext_sec_setup_param *ext_sec)
|
||||
/*
|
||||
* Parse a single info subsection of the BTF.ext info data:
|
||||
* - validate subsection structure and elements
|
||||
* - save info subsection start and sizing details in struct btf_ext
|
||||
* - endian-independent operation, for calling before byte-swapping
|
||||
*/
|
||||
static int btf_ext_parse_sec_info(struct btf_ext *btf_ext,
|
||||
struct btf_ext_sec_info_param *ext_sec,
|
||||
bool is_native)
|
||||
{
|
||||
const struct btf_ext_info_sec *sinfo;
|
||||
struct btf_ext_info *ext_info;
|
||||
__u32 info_left, record_size;
|
||||
size_t sec_cnt = 0;
|
||||
/* The start of the info sec (including the __u32 record_size). */
|
||||
void *info;
|
||||
|
||||
if (ext_sec->len == 0)
|
||||
@@ -2912,6 +2918,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* The start of the info sec (including the __u32 record_size). */
|
||||
info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
|
||||
info_left = ext_sec->len;
|
||||
|
||||
@@ -2927,9 +2934,13 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* The record size needs to meet the minimum standard */
|
||||
record_size = *(__u32 *)info;
|
||||
/* The record size needs to meet either the minimum standard or, when
|
||||
* handling non-native endianness data, the exact standard so as
|
||||
* to allow safe byte-swapping.
|
||||
*/
|
||||
record_size = is_native ? *(__u32 *)info : bswap_32(*(__u32 *)info);
|
||||
if (record_size < ext_sec->min_rec_size ||
|
||||
(!is_native && record_size != ext_sec->min_rec_size) ||
|
||||
record_size & 0x03) {
|
||||
pr_debug("%s section in .BTF.ext has invalid record size %u\n",
|
||||
ext_sec->desc, record_size);
|
||||
@@ -2941,7 +2952,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
|
||||
|
||||
/* If no records, return failure now so .BTF.ext won't be used. */
|
||||
if (!info_left) {
|
||||
pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
|
||||
pr_debug("%s section in .BTF.ext has no records\n", ext_sec->desc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -2956,7 +2967,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
num_records = sinfo->num_info;
|
||||
num_records = is_native ? sinfo->num_info : bswap_32(sinfo->num_info);
|
||||
if (num_records == 0) {
|
||||
pr_debug("%s section has incorrect num_records in .BTF.ext\n",
|
||||
ext_sec->desc);
|
||||
@@ -2984,64 +2995,157 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
|
||||
/* Parse all info secs in the BTF.ext info data */
|
||||
static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native)
|
||||
{
|
||||
struct btf_ext_sec_setup_param param = {
|
||||
struct btf_ext_sec_info_param func_info = {
|
||||
.off = btf_ext->hdr->func_info_off,
|
||||
.len = btf_ext->hdr->func_info_len,
|
||||
.min_rec_size = sizeof(struct bpf_func_info_min),
|
||||
.ext_info = &btf_ext->func_info,
|
||||
.desc = "func_info"
|
||||
};
|
||||
|
||||
return btf_ext_setup_info(btf_ext, ¶m);
|
||||
}
|
||||
|
||||
static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
|
||||
{
|
||||
struct btf_ext_sec_setup_param param = {
|
||||
struct btf_ext_sec_info_param line_info = {
|
||||
.off = btf_ext->hdr->line_info_off,
|
||||
.len = btf_ext->hdr->line_info_len,
|
||||
.min_rec_size = sizeof(struct bpf_line_info_min),
|
||||
.ext_info = &btf_ext->line_info,
|
||||
.desc = "line_info",
|
||||
};
|
||||
|
||||
return btf_ext_setup_info(btf_ext, ¶m);
|
||||
}
|
||||
|
||||
static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
|
||||
{
|
||||
struct btf_ext_sec_setup_param param = {
|
||||
struct btf_ext_sec_info_param core_relo = {
|
||||
.off = btf_ext->hdr->core_relo_off,
|
||||
.len = btf_ext->hdr->core_relo_len,
|
||||
.min_rec_size = sizeof(struct bpf_core_relo),
|
||||
.ext_info = &btf_ext->core_relo_info,
|
||||
.desc = "core_relo",
|
||||
};
|
||||
int err;
|
||||
|
||||
return btf_ext_setup_info(btf_ext, ¶m);
|
||||
err = btf_ext_parse_sec_info(btf_ext, &func_info, is_native);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = btf_ext_parse_sec_info(btf_ext, &line_info, is_native);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
|
||||
return 0; /* skip core relos parsing */
|
||||
|
||||
err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
|
||||
/* Swap byte-order of BTF.ext header with any endianness */
|
||||
static void btf_ext_bswap_hdr(struct btf_ext_header *h)
|
||||
{
|
||||
const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
|
||||
bool is_native = h->magic == BTF_MAGIC;
|
||||
__u32 hdr_len;
|
||||
|
||||
if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
|
||||
data_size < hdr->hdr_len) {
|
||||
pr_debug("BTF.ext header not found");
|
||||
hdr_len = is_native ? h->hdr_len : bswap_32(h->hdr_len);
|
||||
|
||||
h->magic = bswap_16(h->magic);
|
||||
h->hdr_len = bswap_32(h->hdr_len);
|
||||
h->func_info_off = bswap_32(h->func_info_off);
|
||||
h->func_info_len = bswap_32(h->func_info_len);
|
||||
h->line_info_off = bswap_32(h->line_info_off);
|
||||
h->line_info_len = bswap_32(h->line_info_len);
|
||||
|
||||
if (hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
|
||||
return;
|
||||
|
||||
h->core_relo_off = bswap_32(h->core_relo_off);
|
||||
h->core_relo_len = bswap_32(h->core_relo_len);
|
||||
}
|
||||
|
||||
/* Swap byte-order of generic info subsection */
|
||||
static void btf_ext_bswap_info_sec(void *info, __u32 len, bool is_native,
|
||||
info_rec_bswap_fn bswap_fn)
|
||||
{
|
||||
struct btf_ext_info_sec *sec;
|
||||
__u32 info_left, rec_size, *rs;
|
||||
|
||||
if (len == 0)
|
||||
return;
|
||||
|
||||
rs = info; /* info record size */
|
||||
rec_size = is_native ? *rs : bswap_32(*rs);
|
||||
*rs = bswap_32(*rs);
|
||||
|
||||
sec = info + sizeof(__u32); /* info sec #1 */
|
||||
info_left = len - sizeof(__u32);
|
||||
while (info_left) {
|
||||
unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
|
||||
__u32 i, num_recs;
|
||||
void *p;
|
||||
|
||||
num_recs = is_native ? sec->num_info : bswap_32(sec->num_info);
|
||||
sec->sec_name_off = bswap_32(sec->sec_name_off);
|
||||
sec->num_info = bswap_32(sec->num_info);
|
||||
p = sec->data; /* info rec #1 */
|
||||
for (i = 0; i < num_recs; i++, p += rec_size)
|
||||
bswap_fn(p);
|
||||
sec = p;
|
||||
info_left -= sec_hdrlen + (__u64)rec_size * num_recs;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Swap byte-order of all info data in a BTF.ext section
|
||||
* - requires BTF.ext hdr in native endianness
|
||||
*/
|
||||
static void btf_ext_bswap_info(struct btf_ext *btf_ext, void *data)
|
||||
{
|
||||
const bool is_native = btf_ext->swapped_endian;
|
||||
const struct btf_ext_header *h = data;
|
||||
void *info;
|
||||
|
||||
/* Swap func_info subsection byte-order */
|
||||
info = data + h->hdr_len + h->func_info_off;
|
||||
btf_ext_bswap_info_sec(info, h->func_info_len, is_native,
|
||||
(info_rec_bswap_fn)bpf_func_info_bswap);
|
||||
|
||||
/* Swap line_info subsection byte-order */
|
||||
info = data + h->hdr_len + h->line_info_off;
|
||||
btf_ext_bswap_info_sec(info, h->line_info_len, is_native,
|
||||
(info_rec_bswap_fn)bpf_line_info_bswap);
|
||||
|
||||
/* Swap core_relo subsection byte-order (if present) */
|
||||
if (h->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
|
||||
return;
|
||||
|
||||
info = data + h->hdr_len + h->core_relo_off;
|
||||
btf_ext_bswap_info_sec(info, h->core_relo_len, is_native,
|
||||
(info_rec_bswap_fn)bpf_core_relo_bswap);
|
||||
}
|
||||
|
||||
/* Parse hdr data and info sections: check and convert to native endianness */
|
||||
static int btf_ext_parse(struct btf_ext *btf_ext)
|
||||
{
|
||||
__u32 hdr_len, data_size = btf_ext->data_size;
|
||||
struct btf_ext_header *hdr = btf_ext->hdr;
|
||||
bool swapped_endian = false;
|
||||
int err;
|
||||
|
||||
if (data_size < offsetofend(struct btf_ext_header, hdr_len)) {
|
||||
pr_debug("BTF.ext header too short\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdr_len = hdr->hdr_len;
|
||||
if (hdr->magic == bswap_16(BTF_MAGIC)) {
|
||||
pr_warn("BTF.ext in non-native endianness is not supported\n");
|
||||
return -ENOTSUP;
|
||||
swapped_endian = true;
|
||||
hdr_len = bswap_32(hdr_len);
|
||||
} else if (hdr->magic != BTF_MAGIC) {
|
||||
pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hdr->version != BTF_VERSION) {
|
||||
/* Ensure known version of structs, current BTF_VERSION == 1 */
|
||||
if (hdr->version != 1) {
|
||||
pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
@@ -3051,11 +3155,39 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
if (data_size == hdr->hdr_len) {
|
||||
if (data_size < hdr_len) {
|
||||
pr_debug("BTF.ext header not found\n");
|
||||
return -EINVAL;
|
||||
} else if (data_size == hdr_len) {
|
||||
pr_debug("BTF.ext has no data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Verify mandatory hdr info details present */
|
||||
if (hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
|
||||
pr_warn("BTF.ext header missing func_info, line_info\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Keep hdr native byte-order in memory for introspection */
|
||||
if (swapped_endian)
|
||||
btf_ext_bswap_hdr(btf_ext->hdr);
|
||||
|
||||
/* Validate info subsections and cache key metadata */
|
||||
err = btf_ext_parse_info(btf_ext, !swapped_endian);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Keep infos native byte-order in memory for introspection */
|
||||
if (swapped_endian)
|
||||
btf_ext_bswap_info(btf_ext, btf_ext->data);
|
||||
|
||||
/*
|
||||
* Set btf_ext->swapped_endian only after all header and info data has
|
||||
* been swapped, helping bswap functions determine if their data are
|
||||
* in native byte-order when called.
|
||||
*/
|
||||
btf_ext->swapped_endian = swapped_endian;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3067,6 +3199,7 @@ void btf_ext__free(struct btf_ext *btf_ext)
|
||||
free(btf_ext->line_info.sec_idxs);
|
||||
free(btf_ext->core_relo_info.sec_idxs);
|
||||
free(btf_ext->data);
|
||||
free(btf_ext->data_swapped);
|
||||
free(btf_ext);
|
||||
}
|
||||
|
||||
@@ -3087,29 +3220,7 @@ struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
|
||||
}
|
||||
memcpy(btf_ext->data, data, size);
|
||||
|
||||
err = btf_ext_parse_hdr(btf_ext->data, size);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = btf_ext_setup_func_info(btf_ext);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
err = btf_ext_setup_line_info(btf_ext);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
|
||||
goto done; /* skip core relos parsing */
|
||||
|
||||
err = btf_ext_setup_core_relos(btf_ext);
|
||||
if (err)
|
||||
goto done;
|
||||
err = btf_ext_parse(btf_ext);
|
||||
|
||||
done:
|
||||
if (err) {
|
||||
@@ -3120,15 +3231,66 @@ done:
|
||||
return btf_ext;
|
||||
}
|
||||
|
||||
static void *btf_ext_raw_data(const struct btf_ext *btf_ext_ro, bool swap_endian)
|
||||
{
|
||||
struct btf_ext *btf_ext = (struct btf_ext *)btf_ext_ro;
|
||||
const __u32 data_sz = btf_ext->data_size;
|
||||
void *data;
|
||||
|
||||
/* Return native data (always present) or swapped data if present */
|
||||
if (!swap_endian)
|
||||
return btf_ext->data;
|
||||
else if (btf_ext->data_swapped)
|
||||
return btf_ext->data_swapped;
|
||||
|
||||
/* Recreate missing swapped data, then cache and return */
|
||||
data = calloc(1, data_sz);
|
||||
if (!data)
|
||||
return NULL;
|
||||
memcpy(data, btf_ext->data, data_sz);
|
||||
|
||||
btf_ext_bswap_info(btf_ext, data);
|
||||
btf_ext_bswap_hdr(data);
|
||||
btf_ext->data_swapped = data;
|
||||
return data;
|
||||
}
|
||||
|
||||
const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
|
||||
{
|
||||
void *data;
|
||||
|
||||
data = btf_ext_raw_data(btf_ext, btf_ext->swapped_endian);
|
||||
if (!data)
|
||||
return errno = ENOMEM, NULL;
|
||||
|
||||
*size = btf_ext->data_size;
|
||||
return btf_ext->data;
|
||||
return data;
|
||||
}
|
||||
|
||||
__attribute__((alias("btf_ext__raw_data")))
|
||||
const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
|
||||
|
||||
enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext)
|
||||
{
|
||||
if (is_host_big_endian())
|
||||
return btf_ext->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
|
||||
else
|
||||
return btf_ext->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
|
||||
}
|
||||
|
||||
int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian)
|
||||
{
|
||||
if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
btf_ext->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
|
||||
|
||||
if (!btf_ext->swapped_endian) {
|
||||
free(btf_ext->data_swapped);
|
||||
btf_ext->data_swapped = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct btf_dedup;
|
||||
|
||||
@@ -3291,7 +3453,7 @@ int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
|
||||
|
||||
d = btf_dedup_new(btf, opts);
|
||||
if (IS_ERR(d)) {
|
||||
pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
|
||||
pr_debug("btf_dedup_new failed: %ld\n", PTR_ERR(d));
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
@@ -4192,7 +4354,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
|
||||
* and canonical graphs are not compatible structurally, whole graphs are
|
||||
* incompatible. If types are structurally equivalent (i.e., all information
|
||||
* except referenced type IDs is exactly the same), a mapping from `canon_id` to
|
||||
* a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
|
||||
* a `cand_id` is recoded in hypothetical mapping (`btf_dedup->hypot_map`).
|
||||
* If a type references other types, then those referenced types are checked
|
||||
* for equivalence recursively.
|
||||
*
|
||||
@@ -4230,7 +4392,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
|
||||
* consists of portions of the graph that come from multiple compilation units.
|
||||
* This is due to the fact that types within single compilation unit are always
|
||||
* deduplicated and FWDs are already resolved, if referenced struct/union
|
||||
* definiton is available. So, if we had unresolved FWD and found corresponding
|
||||
* definition is available. So, if we had unresolved FWD and found corresponding
|
||||
* STRUCT/UNION, they will be from different compilation units. This
|
||||
* consequently means that when we "link" FWD to corresponding STRUCT/UNION,
|
||||
* type graph will likely have at least two different BTF types that describe
|
||||
|
||||
@@ -167,6 +167,9 @@ LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size);
|
||||
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
|
||||
LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size);
|
||||
LIBBPF_API enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext);
|
||||
LIBBPF_API int btf_ext__set_endianness(struct btf_ext *btf_ext,
|
||||
enum btf_endianness endian);
|
||||
|
||||
LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
|
||||
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
|
||||
@@ -286,7 +289,7 @@ LIBBPF_API void btf_dump__free(struct btf_dump *d);
|
||||
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
|
||||
|
||||
struct btf_dump_emit_type_decl_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
/* optional field name for type declaration, e.g.:
|
||||
* - struct my_struct <FNAME>
|
||||
|
||||
@@ -304,7 +304,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
|
||||
* definition, in which case they have to be declared inline as part of field
|
||||
* type declaration; or as a top-level anonymous enum, typically used for
|
||||
* declaring global constants. It's impossible to distinguish between two
|
||||
* without knowning whether given enum type was referenced from other type:
|
||||
* without knowing whether given enum type was referenced from other type:
|
||||
* top-level anonymous enum won't be referenced by anything, while embedded
|
||||
* one will.
|
||||
*/
|
||||
@@ -867,8 +867,8 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
|
||||
} pads[] = {
|
||||
{"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
|
||||
};
|
||||
int new_off, pad_bits, bits, i;
|
||||
const char *pad_type;
|
||||
int new_off = 0, pad_bits = 0, bits, i;
|
||||
const char *pad_type = NULL;
|
||||
|
||||
if (cur_off >= next_off)
|
||||
return; /* no gap */
|
||||
@@ -1304,7 +1304,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
|
||||
* chain, restore stack, emit warning, and try to
|
||||
* proceed nevertheless
|
||||
*/
|
||||
pr_warn("not enough memory for decl stack:%d", err);
|
||||
pr_warn("not enough memory for decl stack: %d\n", err);
|
||||
d->decl_stack_cnt = stack_start;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -428,7 +428,7 @@ static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i)
|
||||
} else {
|
||||
off = r->str_map[*str_off];
|
||||
if (!off) {
|
||||
pr_warn("string '%s' [offset %u] is not mapped to base BTF",
|
||||
pr_warn("string '%s' [offset %u] is not mapped to base BTF\n",
|
||||
btf__str_by_offset(r->btf, off), *str_off);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
183
src/gen_loader.c
183
src/gen_loader.c
@@ -401,6 +401,15 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
|
||||
opts->insns_sz = gen->insn_cur - gen->insn_start;
|
||||
opts->data = gen->data_start;
|
||||
opts->data_sz = gen->data_cur - gen->data_start;
|
||||
|
||||
/* use target endianness for embedded loader */
|
||||
if (gen->swapped_endian) {
|
||||
struct bpf_insn *insn = (struct bpf_insn *)opts->insns;
|
||||
int insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
|
||||
|
||||
for (i = 0; i < insn_cnt; i++)
|
||||
bpf_insn_bswap(insn++);
|
||||
}
|
||||
}
|
||||
return gen->error;
|
||||
}
|
||||
@@ -414,6 +423,28 @@ void bpf_gen__free(struct bpf_gen *gen)
|
||||
free(gen);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fields of bpf_attr are set to values in native byte-order before being
|
||||
* written to the target-bound data blob, and may need endian conversion.
|
||||
* This macro allows providing the correct value in situ more simply than
|
||||
* writing a separate converter for *all fields* of *all records* included
|
||||
* in union bpf_attr. Note that sizeof(rval) should match the assignment
|
||||
* target to avoid runtime problems.
|
||||
*/
|
||||
#define tgt_endian(rval) ({ \
|
||||
typeof(rval) _val = (rval); \
|
||||
if (gen->swapped_endian) { \
|
||||
switch (sizeof(_val)) { \
|
||||
case 1: break; \
|
||||
case 2: _val = bswap_16(_val); break; \
|
||||
case 4: _val = bswap_32(_val); break; \
|
||||
case 8: _val = bswap_64(_val); break; \
|
||||
default: pr_warn("unsupported bswap size!\n"); \
|
||||
} \
|
||||
} \
|
||||
_val; \
|
||||
})
|
||||
|
||||
void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
|
||||
__u32 btf_raw_size)
|
||||
{
|
||||
@@ -422,11 +453,12 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_size);
|
||||
pr_debug("gen: load_btf: size %d\n", btf_raw_size);
|
||||
btf_data = add_data(gen, btf_raw_data, btf_raw_size);
|
||||
|
||||
attr.btf_size = btf_raw_size;
|
||||
attr.btf_size = tgt_endian(btf_raw_size);
|
||||
btf_load_attr = add_data(gen, &attr, attr_size);
|
||||
pr_debug("gen: load_btf: off %d size %d, attr: off %d size %d\n",
|
||||
btf_data, btf_raw_size, btf_load_attr, attr_size);
|
||||
|
||||
/* populate union bpf_attr with user provided log details */
|
||||
move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
|
||||
@@ -457,28 +489,29 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_size);
|
||||
attr.map_type = map_type;
|
||||
attr.key_size = key_size;
|
||||
attr.value_size = value_size;
|
||||
attr.map_flags = map_attr->map_flags;
|
||||
attr.map_extra = map_attr->map_extra;
|
||||
attr.map_type = tgt_endian(map_type);
|
||||
attr.key_size = tgt_endian(key_size);
|
||||
attr.value_size = tgt_endian(value_size);
|
||||
attr.map_flags = tgt_endian(map_attr->map_flags);
|
||||
attr.map_extra = tgt_endian(map_attr->map_extra);
|
||||
if (map_name)
|
||||
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
|
||||
attr.numa_node = map_attr->numa_node;
|
||||
attr.map_ifindex = map_attr->map_ifindex;
|
||||
attr.max_entries = max_entries;
|
||||
attr.btf_key_type_id = map_attr->btf_key_type_id;
|
||||
attr.btf_value_type_id = map_attr->btf_value_type_id;
|
||||
|
||||
pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
|
||||
attr.map_name, map_idx, map_type, attr.btf_value_type_id);
|
||||
attr.numa_node = tgt_endian(map_attr->numa_node);
|
||||
attr.map_ifindex = tgt_endian(map_attr->map_ifindex);
|
||||
attr.max_entries = tgt_endian(max_entries);
|
||||
attr.btf_key_type_id = tgt_endian(map_attr->btf_key_type_id);
|
||||
attr.btf_value_type_id = tgt_endian(map_attr->btf_value_type_id);
|
||||
|
||||
map_create_attr = add_data(gen, &attr, attr_size);
|
||||
if (attr.btf_value_type_id)
|
||||
pr_debug("gen: map_create: %s idx %d type %d value_type_id %d, attr: off %d size %d\n",
|
||||
map_name, map_idx, map_type, map_attr->btf_value_type_id,
|
||||
map_create_attr, attr_size);
|
||||
|
||||
if (map_attr->btf_value_type_id)
|
||||
/* populate union bpf_attr with btf_fd saved in the stack earlier */
|
||||
move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
|
||||
stack_off(btf_fd));
|
||||
switch (attr.map_type) {
|
||||
switch (map_type) {
|
||||
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
|
||||
case BPF_MAP_TYPE_HASH_OF_MAPS:
|
||||
move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
|
||||
@@ -498,8 +531,8 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
/* emit MAP_CREATE command */
|
||||
emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
|
||||
debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
|
||||
attr.map_name, map_idx, map_type, value_size,
|
||||
attr.btf_value_type_id);
|
||||
map_name, map_idx, map_type, value_size,
|
||||
map_attr->btf_value_type_id);
|
||||
emit_check_err(gen);
|
||||
/* remember map_fd in the stack, if successful */
|
||||
if (map_idx < 0) {
|
||||
@@ -784,12 +817,12 @@ log:
|
||||
emit_ksym_relo_log(gen, relo, kdesc->ref);
|
||||
}
|
||||
|
||||
static __u32 src_reg_mask(void)
|
||||
static __u32 src_reg_mask(struct bpf_gen *gen)
|
||||
{
|
||||
#if defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
return 0x0f; /* src_reg,dst_reg,... */
|
||||
#elif defined(__BIG_ENDIAN_BITFIELD)
|
||||
return 0xf0; /* dst_reg,src_reg,... */
|
||||
#if defined(__LITTLE_ENDIAN_BITFIELD) /* src_reg,dst_reg,... */
|
||||
return gen->swapped_endian ? 0xf0 : 0x0f;
|
||||
#elif defined(__BIG_ENDIAN_BITFIELD) /* dst_reg,src_reg,... */
|
||||
return gen->swapped_endian ? 0x0f : 0xf0;
|
||||
#else
|
||||
#error "Unsupported bit endianness, cannot proceed"
|
||||
#endif
|
||||
@@ -840,7 +873,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
|
||||
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
|
||||
clear_src_reg:
|
||||
/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
|
||||
reg_mask = src_reg_mask();
|
||||
reg_mask = src_reg_mask(gen);
|
||||
emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
|
||||
emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
|
||||
emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
|
||||
@@ -931,48 +964,94 @@ static void cleanup_relos(struct bpf_gen *gen, int insns)
|
||||
cleanup_core_relo(gen);
|
||||
}
|
||||
|
||||
/* Convert func, line, and core relo info blobs to target endianness */
|
||||
static void info_blob_bswap(struct bpf_gen *gen, int func_info, int line_info,
|
||||
int core_relos, struct bpf_prog_load_opts *load_attr)
|
||||
{
|
||||
struct bpf_func_info *fi = gen->data_start + func_info;
|
||||
struct bpf_line_info *li = gen->data_start + line_info;
|
||||
struct bpf_core_relo *cr = gen->data_start + core_relos;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < load_attr->func_info_cnt; i++)
|
||||
bpf_func_info_bswap(fi++);
|
||||
|
||||
for (i = 0; i < load_attr->line_info_cnt; i++)
|
||||
bpf_line_info_bswap(li++);
|
||||
|
||||
for (i = 0; i < gen->core_relo_cnt; i++)
|
||||
bpf_core_relo_bswap(cr++);
|
||||
}
|
||||
|
||||
void bpf_gen__prog_load(struct bpf_gen *gen,
|
||||
enum bpf_prog_type prog_type, const char *prog_name,
|
||||
const char *license, struct bpf_insn *insns, size_t insn_cnt,
|
||||
struct bpf_prog_load_opts *load_attr, int prog_idx)
|
||||
{
|
||||
int func_info_tot_sz = load_attr->func_info_cnt *
|
||||
load_attr->func_info_rec_size;
|
||||
int line_info_tot_sz = load_attr->line_info_cnt *
|
||||
load_attr->line_info_rec_size;
|
||||
int core_relo_tot_sz = gen->core_relo_cnt *
|
||||
sizeof(struct bpf_core_relo);
|
||||
int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
|
||||
int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_size);
|
||||
pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n",
|
||||
prog_type, insn_cnt, prog_idx);
|
||||
/* add license string to blob of bytes */
|
||||
license_off = add_data(gen, license, strlen(license) + 1);
|
||||
/* add insns to blob of bytes */
|
||||
insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
|
||||
pr_debug("gen: prog_load: prog_idx %d type %d insn off %d insns_cnt %zd license off %d\n",
|
||||
prog_idx, prog_type, insns_off, insn_cnt, license_off);
|
||||
|
||||
attr.prog_type = prog_type;
|
||||
attr.expected_attach_type = load_attr->expected_attach_type;
|
||||
attr.attach_btf_id = load_attr->attach_btf_id;
|
||||
attr.prog_ifindex = load_attr->prog_ifindex;
|
||||
/* convert blob insns to target endianness */
|
||||
if (gen->swapped_endian) {
|
||||
struct bpf_insn *insn = gen->data_start + insns_off;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++)
|
||||
bpf_insn_bswap(insn);
|
||||
}
|
||||
|
||||
attr.prog_type = tgt_endian(prog_type);
|
||||
attr.expected_attach_type = tgt_endian(load_attr->expected_attach_type);
|
||||
attr.attach_btf_id = tgt_endian(load_attr->attach_btf_id);
|
||||
attr.prog_ifindex = tgt_endian(load_attr->prog_ifindex);
|
||||
attr.kern_version = 0;
|
||||
attr.insn_cnt = (__u32)insn_cnt;
|
||||
attr.prog_flags = load_attr->prog_flags;
|
||||
attr.insn_cnt = tgt_endian((__u32)insn_cnt);
|
||||
attr.prog_flags = tgt_endian(load_attr->prog_flags);
|
||||
|
||||
attr.func_info_rec_size = load_attr->func_info_rec_size;
|
||||
attr.func_info_cnt = load_attr->func_info_cnt;
|
||||
func_info = add_data(gen, load_attr->func_info,
|
||||
attr.func_info_cnt * attr.func_info_rec_size);
|
||||
attr.func_info_rec_size = tgt_endian(load_attr->func_info_rec_size);
|
||||
attr.func_info_cnt = tgt_endian(load_attr->func_info_cnt);
|
||||
func_info = add_data(gen, load_attr->func_info, func_info_tot_sz);
|
||||
pr_debug("gen: prog_load: func_info: off %d cnt %d rec size %d\n",
|
||||
func_info, load_attr->func_info_cnt,
|
||||
load_attr->func_info_rec_size);
|
||||
|
||||
attr.line_info_rec_size = load_attr->line_info_rec_size;
|
||||
attr.line_info_cnt = load_attr->line_info_cnt;
|
||||
line_info = add_data(gen, load_attr->line_info,
|
||||
attr.line_info_cnt * attr.line_info_rec_size);
|
||||
attr.line_info_rec_size = tgt_endian(load_attr->line_info_rec_size);
|
||||
attr.line_info_cnt = tgt_endian(load_attr->line_info_cnt);
|
||||
line_info = add_data(gen, load_attr->line_info, line_info_tot_sz);
|
||||
pr_debug("gen: prog_load: line_info: off %d cnt %d rec size %d\n",
|
||||
line_info, load_attr->line_info_cnt,
|
||||
load_attr->line_info_rec_size);
|
||||
|
||||
attr.core_relo_rec_size = sizeof(struct bpf_core_relo);
|
||||
attr.core_relo_cnt = gen->core_relo_cnt;
|
||||
core_relos = add_data(gen, gen->core_relos,
|
||||
attr.core_relo_cnt * attr.core_relo_rec_size);
|
||||
attr.core_relo_rec_size = tgt_endian((__u32)sizeof(struct bpf_core_relo));
|
||||
attr.core_relo_cnt = tgt_endian(gen->core_relo_cnt);
|
||||
core_relos = add_data(gen, gen->core_relos, core_relo_tot_sz);
|
||||
pr_debug("gen: prog_load: core_relos: off %d cnt %d rec size %zd\n",
|
||||
core_relos, gen->core_relo_cnt,
|
||||
sizeof(struct bpf_core_relo));
|
||||
|
||||
/* convert all info blobs to target endianness */
|
||||
if (gen->swapped_endian)
|
||||
info_blob_bswap(gen, func_info, line_info, core_relos, load_attr);
|
||||
|
||||
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
|
||||
prog_load_attr = add_data(gen, &attr, attr_size);
|
||||
pr_debug("gen: prog_load: attr: off %d size %d\n",
|
||||
prog_load_attr, attr_size);
|
||||
|
||||
/* populate union bpf_attr with a pointer to license */
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
|
||||
@@ -1040,7 +1119,6 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
|
||||
int zero = 0;
|
||||
|
||||
memset(&attr, 0, attr_size);
|
||||
pr_debug("gen: map_update_elem: idx %d\n", map_idx);
|
||||
|
||||
value = add_data(gen, pvalue, value_size);
|
||||
key = add_data(gen, &zero, sizeof(zero));
|
||||
@@ -1068,6 +1146,8 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
|
||||
|
||||
map_update_attr = add_data(gen, &attr, attr_size);
|
||||
pr_debug("gen: map_update_elem: idx %d, value: off %d size %d, attr: off %d size %d\n",
|
||||
map_idx, value, value_size, map_update_attr, attr_size);
|
||||
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
||||
blob_fd_array_off(gen, map_idx));
|
||||
emit_rel_store(gen, attr_field(map_update_attr, key), key);
|
||||
@@ -1084,14 +1164,16 @@ void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slo
|
||||
int attr_size = offsetofend(union bpf_attr, flags);
|
||||
int map_update_attr, key;
|
||||
union bpf_attr attr;
|
||||
int tgt_slot;
|
||||
|
||||
memset(&attr, 0, attr_size);
|
||||
pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n",
|
||||
outer_map_idx, slot, inner_map_idx);
|
||||
|
||||
key = add_data(gen, &slot, sizeof(slot));
|
||||
tgt_slot = tgt_endian(slot);
|
||||
key = add_data(gen, &tgt_slot, sizeof(tgt_slot));
|
||||
|
||||
map_update_attr = add_data(gen, &attr, attr_size);
|
||||
pr_debug("gen: populate_outer_map: outer %d key %d inner %d, attr: off %d size %d\n",
|
||||
outer_map_idx, slot, inner_map_idx, map_update_attr, attr_size);
|
||||
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
||||
blob_fd_array_off(gen, outer_map_idx));
|
||||
emit_rel_store(gen, attr_field(map_update_attr, key), key);
|
||||
@@ -1112,8 +1194,9 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_size);
|
||||
pr_debug("gen: map_freeze: idx %d\n", map_idx);
|
||||
map_freeze_attr = add_data(gen, &attr, attr_size);
|
||||
pr_debug("gen: map_freeze: idx %d, attr: off %d size %d\n",
|
||||
map_idx, map_freeze_attr, attr_size);
|
||||
move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
|
||||
blob_fd_array_off(gen, map_idx));
|
||||
/* emit MAP_FREEZE command */
|
||||
|
||||
@@ -166,8 +166,8 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
|
||||
* @bkt: integer used as a bucket loop cursor
|
||||
*/
|
||||
#define hashmap__for_each_entry(map, cur, bkt) \
|
||||
for (bkt = 0; bkt < map->cap; bkt++) \
|
||||
for (cur = map->buckets[bkt]; cur; cur = cur->next)
|
||||
for (bkt = 0; bkt < (map)->cap; bkt++) \
|
||||
for (cur = (map)->buckets[bkt]; cur; cur = cur->next)
|
||||
|
||||
/*
|
||||
* hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe
|
||||
@@ -178,8 +178,8 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
|
||||
* @bkt: integer used as a bucket loop cursor
|
||||
*/
|
||||
#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
|
||||
for (bkt = 0; bkt < map->cap; bkt++) \
|
||||
for (cur = map->buckets[bkt]; \
|
||||
for (bkt = 0; bkt < (map)->cap; bkt++) \
|
||||
for (cur = (map)->buckets[bkt]; \
|
||||
cur && ({tmp = cur->next; true; }); \
|
||||
cur = tmp)
|
||||
|
||||
@@ -190,19 +190,19 @@ bool hashmap_find(const struct hashmap *map, long key, long *value);
|
||||
* @key: key to iterate entries for
|
||||
*/
|
||||
#define hashmap__for_each_key_entry(map, cur, _key) \
|
||||
for (cur = map->buckets \
|
||||
? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
|
||||
for (cur = (map)->buckets \
|
||||
? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
|
||||
: NULL; \
|
||||
cur; \
|
||||
cur = cur->next) \
|
||||
if (map->equal_fn(cur->key, (_key), map->ctx))
|
||||
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
|
||||
|
||||
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
|
||||
for (cur = map->buckets \
|
||||
? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
|
||||
for (cur = (map)->buckets \
|
||||
? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \
|
||||
: NULL; \
|
||||
cur && ({ tmp = cur->next; true; }); \
|
||||
cur = tmp) \
|
||||
if (map->equal_fn(cur->key, (_key), map->ctx))
|
||||
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
|
||||
|
||||
#endif /* __LIBBPF_HASHMAP_H */
|
||||
|
||||
177
src/libbpf.c
177
src/libbpf.c
@@ -694,6 +694,8 @@ struct bpf_object {
|
||||
/* Information when doing ELF related work. Only valid if efile.elf is not NULL */
|
||||
struct elf_state efile;
|
||||
|
||||
unsigned char byteorder;
|
||||
|
||||
struct btf *btf;
|
||||
struct btf_ext *btf_ext;
|
||||
|
||||
@@ -940,6 +942,20 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_object_bswap_progs(struct bpf_object *obj)
|
||||
{
|
||||
struct bpf_program *prog = obj->programs;
|
||||
struct bpf_insn *insn;
|
||||
int p, i;
|
||||
|
||||
for (p = 0; p < obj->nr_programs; p++, prog++) {
|
||||
insn = prog->insns;
|
||||
for (i = 0; i < prog->insns_cnt; i++, insn++)
|
||||
bpf_insn_bswap(insn);
|
||||
}
|
||||
pr_debug("converted %zu BPF programs to native byte order\n", obj->nr_programs);
|
||||
}
|
||||
|
||||
static const struct btf_member *
|
||||
find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
|
||||
{
|
||||
@@ -986,7 +1002,7 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
|
||||
{
|
||||
const struct btf_type *kern_type, *kern_vtype;
|
||||
const struct btf_member *kern_data_member;
|
||||
struct btf *btf;
|
||||
struct btf *btf = NULL;
|
||||
__s32 kern_vtype_id, kern_type_id;
|
||||
char tname[256];
|
||||
__u32 i;
|
||||
@@ -1116,7 +1132,7 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
|
||||
const struct btf *btf = obj->btf;
|
||||
struct bpf_struct_ops *st_ops;
|
||||
const struct btf *kern_btf;
|
||||
struct module_btf *mod_btf;
|
||||
struct module_btf *mod_btf = NULL;
|
||||
void *data, *kern_data;
|
||||
const char *tname;
|
||||
int err;
|
||||
@@ -1506,6 +1522,7 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
|
||||
|
||||
elf_end(obj->efile.elf);
|
||||
obj->efile.elf = NULL;
|
||||
obj->efile.ehdr = NULL;
|
||||
obj->efile.symbols = NULL;
|
||||
obj->efile.arena_data = NULL;
|
||||
|
||||
@@ -1571,6 +1588,16 @@ static int bpf_object__elf_init(struct bpf_object *obj)
|
||||
goto errout;
|
||||
}
|
||||
|
||||
/* Validate ELF object endianness... */
|
||||
if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB &&
|
||||
ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
|
||||
err = -LIBBPF_ERRNO__ENDIAN;
|
||||
pr_warn("elf: '%s' has unknown byte order\n", obj->path);
|
||||
goto errout;
|
||||
}
|
||||
/* and save after bpf_object_open() frees ELF data */
|
||||
obj->byteorder = ehdr->e_ident[EI_DATA];
|
||||
|
||||
if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
|
||||
pr_warn("elf: failed to get section names section index for %s: %s\n",
|
||||
obj->path, elf_errmsg(-1));
|
||||
@@ -1599,19 +1626,15 @@ errout:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bpf_object__check_endianness(struct bpf_object *obj)
|
||||
static bool is_native_endianness(struct bpf_object *obj)
|
||||
{
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
|
||||
return 0;
|
||||
return obj->byteorder == ELFDATA2LSB;
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
|
||||
return 0;
|
||||
return obj->byteorder == ELFDATA2MSB;
|
||||
#else
|
||||
# error "Unrecognized __BYTE_ORDER__"
|
||||
#endif
|
||||
pr_warn("elf: endianness mismatch in %s.\n", obj->path);
|
||||
return -LIBBPF_ERRNO__ENDIAN;
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -1848,7 +1871,7 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name)
|
||||
snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
|
||||
sfx_len, real_name);
|
||||
|
||||
/* sanitise map name to characters allowed by kernel */
|
||||
/* sanities map name to characters allowed by kernel */
|
||||
for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
|
||||
if (!isalnum(*p) && *p != '_' && *p != '.')
|
||||
*p = '_';
|
||||
@@ -3581,11 +3604,12 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
|
||||
report:
|
||||
if (err) {
|
||||
btf_mandatory = kernel_needs_btf(obj);
|
||||
pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
|
||||
btf_mandatory ? "BTF is mandatory, can't proceed."
|
||||
: "BTF is optional, ignoring.");
|
||||
if (!btf_mandatory)
|
||||
if (btf_mandatory) {
|
||||
pr_warn("Error loading .BTF into kernel: %d. BTF is mandatory, can't proceed.\n", err);
|
||||
} else {
|
||||
pr_info("Error loading .BTF into kernel: %d. BTF is optional, ignoring.\n", err);
|
||||
err = 0;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@@ -3953,6 +3977,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
|
||||
return -LIBBPF_ERRNO__FORMAT;
|
||||
}
|
||||
|
||||
/* change BPF program insns to native endianness for introspection */
|
||||
if (!is_native_endianness(obj))
|
||||
bpf_object_bswap_progs(obj);
|
||||
|
||||
/* sort BPF programs by section name and in-section instruction offset
|
||||
* for faster search
|
||||
*/
|
||||
@@ -3985,7 +4013,7 @@ static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
|
||||
return true;
|
||||
|
||||
/* global function */
|
||||
return bind == STB_GLOBAL && type == STT_FUNC;
|
||||
return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC;
|
||||
}
|
||||
|
||||
static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
|
||||
@@ -4389,7 +4417,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
|
||||
|
||||
static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
|
||||
{
|
||||
return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
|
||||
return prog->sec_idx == obj->efile.text_shndx;
|
||||
}
|
||||
|
||||
struct bpf_program *
|
||||
@@ -5094,6 +5122,7 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
|
||||
enum libbpf_map_type map_type = map->libbpf_type;
|
||||
char *cp, errmsg[STRERR_BUFSIZE];
|
||||
int err, zero = 0;
|
||||
size_t mmap_sz;
|
||||
|
||||
if (obj->gen_loader) {
|
||||
bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
|
||||
@@ -5107,8 +5136,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
|
||||
if (err) {
|
||||
err = -errno;
|
||||
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
|
||||
pr_warn("Error setting initial map(%s) contents: %s\n",
|
||||
map->name, cp);
|
||||
pr_warn("map '%s': failed to set initial contents: %s\n",
|
||||
bpf_map__name(map), cp);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -5118,11 +5147,43 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
|
||||
if (err) {
|
||||
err = -errno;
|
||||
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
|
||||
pr_warn("Error freezing map(%s) as read-only: %s\n",
|
||||
map->name, cp);
|
||||
pr_warn("map '%s': failed to freeze as read-only: %s\n",
|
||||
bpf_map__name(map), cp);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Remap anonymous mmap()-ed "map initialization image" as
|
||||
* a BPF map-backed mmap()-ed memory, but preserving the same
|
||||
* memory address. This will cause kernel to change process'
|
||||
* page table to point to a different piece of kernel memory,
|
||||
* but from userspace point of view memory address (and its
|
||||
* contents, being identical at this point) will stay the
|
||||
* same. This mapping will be released by bpf_object__close()
|
||||
* as per normal clean up procedure.
|
||||
*/
|
||||
mmap_sz = bpf_map_mmap_sz(map);
|
||||
if (map->def.map_flags & BPF_F_MMAPABLE) {
|
||||
void *mmaped;
|
||||
int prot;
|
||||
|
||||
if (map->def.map_flags & BPF_F_RDONLY_PROG)
|
||||
prot = PROT_READ;
|
||||
else
|
||||
prot = PROT_READ | PROT_WRITE;
|
||||
mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
|
||||
if (mmaped == MAP_FAILED) {
|
||||
err = -errno;
|
||||
pr_warn("map '%s': failed to re-mmap() contents: %d\n",
|
||||
bpf_map__name(map), err);
|
||||
return err;
|
||||
}
|
||||
map->mmaped = mmaped;
|
||||
} else if (map->mmaped) {
|
||||
munmap(map->mmaped, mmap_sz);
|
||||
map->mmaped = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -5439,8 +5500,7 @@ retry:
|
||||
err = bpf_object__populate_internal_map(obj, map);
|
||||
if (err < 0)
|
||||
goto err_out;
|
||||
}
|
||||
if (map->def.type == BPF_MAP_TYPE_ARENA) {
|
||||
} else if (map->def.type == BPF_MAP_TYPE_ARENA) {
|
||||
map->mmaped = mmap((void *)(long)map->map_extra,
|
||||
bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
|
||||
map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
|
||||
@@ -7352,8 +7412,14 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog,
|
||||
opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
|
||||
|
||||
/* special check for usdt to use uprobe_multi link */
|
||||
if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
|
||||
if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) {
|
||||
/* for BPF_TRACE_UPROBE_MULTI, user might want to query expected_attach_type
|
||||
* in prog, and expected_attach_type we set in kernel is from opts, so we
|
||||
* update both.
|
||||
*/
|
||||
prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
|
||||
opts->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
|
||||
}
|
||||
|
||||
if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
|
||||
int btf_obj_fd = 0, btf_type_id = 0, err;
|
||||
@@ -7443,6 +7509,7 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
|
||||
load_attr.attach_btf_id = prog->attach_btf_id;
|
||||
load_attr.kern_version = kern_version;
|
||||
load_attr.prog_ifindex = prog->prog_ifindex;
|
||||
load_attr.expected_attach_type = prog->expected_attach_type;
|
||||
|
||||
/* specify func_info/line_info only if kernel supports them */
|
||||
if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
|
||||
@@ -7474,9 +7541,6 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
|
||||
insns_cnt = prog->insns_cnt;
|
||||
}
|
||||
|
||||
/* allow prog_prepare_load_fn to change expected_attach_type */
|
||||
load_attr.expected_attach_type = prog->expected_attach_type;
|
||||
|
||||
if (obj->gen_loader) {
|
||||
bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
|
||||
license, insns, insns_cnt, &load_attr,
|
||||
@@ -7992,7 +8056,6 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
|
||||
}
|
||||
|
||||
err = bpf_object__elf_init(obj);
|
||||
err = err ? : bpf_object__check_endianness(obj);
|
||||
err = err ? : bpf_object__elf_collect(obj);
|
||||
err = err ? : bpf_object__collect_externs(obj);
|
||||
err = err ? : bpf_object_fixup_btf(obj);
|
||||
@@ -8498,8 +8561,15 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
if (obj->gen_loader)
|
||||
/* Disallow kernel loading programs of non-native endianness but
|
||||
* permit cross-endian creation of "light skeleton".
|
||||
*/
|
||||
if (obj->gen_loader) {
|
||||
bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
|
||||
} else if (!is_native_endianness(obj)) {
|
||||
pr_warn("object '%s': loading non-native endianness is unsupported\n", obj->name);
|
||||
return libbpf_err(-LIBBPF_ERRNO__ENDIAN);
|
||||
}
|
||||
|
||||
err = bpf_object_prepare_token(obj);
|
||||
err = err ? : bpf_object__probe_loading(obj);
|
||||
@@ -9059,6 +9129,11 @@ unsigned int bpf_object__kversion(const struct bpf_object *obj)
|
||||
return obj ? obj->kern_version : 0;
|
||||
}
|
||||
|
||||
int bpf_object__token_fd(const struct bpf_object *obj)
|
||||
{
|
||||
return obj->token_fd ?: -1;
|
||||
}
|
||||
|
||||
struct btf *bpf_object__btf(const struct bpf_object *obj)
|
||||
{
|
||||
return obj ? obj->btf : NULL;
|
||||
@@ -9091,6 +9166,7 @@ int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
|
||||
if (!gen)
|
||||
return -ENOMEM;
|
||||
gen->opts = opts;
|
||||
gen->swapped_endian = !is_native_endianness(obj);
|
||||
obj->gen_loader = gen;
|
||||
return 0;
|
||||
}
|
||||
@@ -11688,7 +11764,7 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru
|
||||
ret = 0;
|
||||
break;
|
||||
case 3:
|
||||
opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
|
||||
opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
|
||||
*link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
|
||||
ret = libbpf_get_error(*link);
|
||||
break;
|
||||
@@ -12755,7 +12831,7 @@ struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
|
||||
}
|
||||
|
||||
if (prog->type != BPF_PROG_TYPE_EXT) {
|
||||
pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
|
||||
pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace\n",
|
||||
prog->name);
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
}
|
||||
@@ -13829,7 +13905,7 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
|
||||
map_type = btf__type_by_id(btf, map_type_id);
|
||||
|
||||
if (!btf_is_datasec(map_type)) {
|
||||
pr_warn("type for map '%1$s' is not a datasec: %2$s",
|
||||
pr_warn("type for map '%1$s' is not a datasec: %2$s\n",
|
||||
bpf_map__name(map),
|
||||
__btf_kind_str(btf_kind(map_type)));
|
||||
return libbpf_err(-EINVAL);
|
||||
@@ -13872,46 +13948,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
|
||||
for (i = 0; i < s->map_cnt; i++) {
|
||||
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
|
||||
struct bpf_map *map = *map_skel->map;
|
||||
size_t mmap_sz = bpf_map_mmap_sz(map);
|
||||
int prot, map_fd = map->fd;
|
||||
void **mmaped = map_skel->mmaped;
|
||||
|
||||
if (!mmaped)
|
||||
if (!map_skel->mmaped)
|
||||
continue;
|
||||
|
||||
if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
|
||||
*mmaped = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (map->def.type == BPF_MAP_TYPE_ARENA) {
|
||||
*mmaped = map->mmaped;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (map->def.map_flags & BPF_F_RDONLY_PROG)
|
||||
prot = PROT_READ;
|
||||
else
|
||||
prot = PROT_READ | PROT_WRITE;
|
||||
|
||||
/* Remap anonymous mmap()-ed "map initialization image" as
|
||||
* a BPF map-backed mmap()-ed memory, but preserving the same
|
||||
* memory address. This will cause kernel to change process'
|
||||
* page table to point to a different piece of kernel memory,
|
||||
* but from userspace point of view memory address (and its
|
||||
* contents, being identical at this point) will stay the
|
||||
* same. This mapping will be released by bpf_object__close()
|
||||
* as per normal clean up procedure, so we don't need to worry
|
||||
* about it from skeleton's clean up perspective.
|
||||
*/
|
||||
*mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
|
||||
if (*mmaped == MAP_FAILED) {
|
||||
err = -errno;
|
||||
*mmaped = NULL;
|
||||
pr_warn("failed to re-mmap() map '%s': %d\n",
|
||||
bpf_map__name(map), err);
|
||||
return libbpf_err(err);
|
||||
}
|
||||
*map_skel->mmaped = map->mmaped;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
18
src/libbpf.h
18
src/libbpf.h
@@ -152,7 +152,7 @@ struct bpf_object_open_opts {
|
||||
* log_buf and log_level settings.
|
||||
*
|
||||
* If specified, this log buffer will be passed for:
|
||||
* - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden
|
||||
* - each BPF progral load (BPF_PROG_LOAD) attempt, unless overridden
|
||||
* with bpf_program__set_log() on per-program level, to get
|
||||
* BPF verifier log output.
|
||||
* - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get
|
||||
@@ -294,6 +294,14 @@ LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
|
||||
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
|
||||
LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version);
|
||||
|
||||
/**
|
||||
* @brief **bpf_object__token_fd** is an accessor for BPF token FD associated
|
||||
* with BPF object.
|
||||
* @param obj Pointer to a valid BPF object
|
||||
* @return BPF token FD or -1, if it wasn't set
|
||||
*/
|
||||
LIBBPF_API int bpf_object__token_fd(const struct bpf_object *obj);
|
||||
|
||||
struct btf;
|
||||
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
|
||||
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
|
||||
@@ -455,7 +463,7 @@ LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
|
||||
/**
|
||||
* @brief **bpf_program__attach()** is a generic function for attaching
|
||||
* a BPF program based on auto-detection of program type, attach type,
|
||||
* and extra paremeters, where applicable.
|
||||
* and extra parameters, where applicable.
|
||||
*
|
||||
* @param prog BPF program to attach
|
||||
* @return Reference to the newly created BPF link; or NULL is returned on error,
|
||||
@@ -679,7 +687,7 @@ struct bpf_uprobe_opts {
|
||||
/**
|
||||
* @brief **bpf_program__attach_uprobe()** attaches a BPF program
|
||||
* to the userspace function which is found by binary path and
|
||||
* offset. You can optionally specify a particular proccess to attach
|
||||
* offset. You can optionally specify a particular process to attach
|
||||
* to. You can also optionally attach the program to the function
|
||||
* exit instead of entry.
|
||||
*
|
||||
@@ -1593,11 +1601,11 @@ LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_i
|
||||
* memory region of the ring buffer.
|
||||
* This ring buffer can be used to implement a custom events consumer.
|
||||
* The ring buffer starts with the *struct perf_event_mmap_page*, which
|
||||
* holds the ring buffer managment fields, when accessing the header
|
||||
* holds the ring buffer management fields, when accessing the header
|
||||
* structure it's important to be SMP aware.
|
||||
* You can refer to *perf_event_read_simple* for a simple example.
|
||||
* @param pb the perf buffer structure
|
||||
* @param buf_idx the buffer index to retreive
|
||||
* @param buf_idx the buffer index to retrieve
|
||||
* @param buf (out) gets the base pointer of the mmap()'ed memory
|
||||
* @param buf_size (out) gets the size of the mmap()'ed region
|
||||
* @return 0 on success, negative error code for failure
|
||||
|
||||
@@ -421,8 +421,11 @@ LIBBPF_1.5.0 {
|
||||
global:
|
||||
btf__distill_base;
|
||||
btf__relocate;
|
||||
btf_ext__endianness;
|
||||
btf_ext__set_endianness;
|
||||
bpf_map__autoattach;
|
||||
bpf_map__set_autoattach;
|
||||
bpf_object__token_fd;
|
||||
bpf_program__attach_sockmap;
|
||||
ring__consume_n;
|
||||
ring_buffer__consume_n;
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#define __LIBBPF_LIBBPF_INTERNAL_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <byteswap.h>
|
||||
#include <limits.h>
|
||||
#include <errno.h>
|
||||
#include <linux/err.h>
|
||||
@@ -448,11 +449,11 @@ struct btf_ext_info {
|
||||
*
|
||||
* The func_info subsection layout:
|
||||
* record size for struct bpf_func_info in the func_info subsection
|
||||
* struct btf_sec_func_info for section #1
|
||||
* struct btf_ext_info_sec for section #1
|
||||
* a list of bpf_func_info records for section #1
|
||||
* where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
|
||||
* but may not be identical
|
||||
* struct btf_sec_func_info for section #2
|
||||
* struct btf_ext_info_sec for section #2
|
||||
* a list of bpf_func_info records for section #2
|
||||
* ......
|
||||
*
|
||||
@@ -484,6 +485,8 @@ struct btf_ext {
|
||||
struct btf_ext_header *hdr;
|
||||
void *data;
|
||||
};
|
||||
void *data_swapped;
|
||||
bool swapped_endian;
|
||||
struct btf_ext_info func_info;
|
||||
struct btf_ext_info line_info;
|
||||
struct btf_ext_info core_relo_info;
|
||||
@@ -511,6 +514,32 @@ struct bpf_line_info_min {
|
||||
__u32 line_col;
|
||||
};
|
||||
|
||||
/* Functions to byte-swap info records */
|
||||
|
||||
typedef void (*info_rec_bswap_fn)(void *);
|
||||
|
||||
static inline void bpf_func_info_bswap(struct bpf_func_info *i)
|
||||
{
|
||||
i->insn_off = bswap_32(i->insn_off);
|
||||
i->type_id = bswap_32(i->type_id);
|
||||
}
|
||||
|
||||
static inline void bpf_line_info_bswap(struct bpf_line_info *i)
|
||||
{
|
||||
i->insn_off = bswap_32(i->insn_off);
|
||||
i->file_name_off = bswap_32(i->file_name_off);
|
||||
i->line_off = bswap_32(i->line_off);
|
||||
i->line_col = bswap_32(i->line_col);
|
||||
}
|
||||
|
||||
static inline void bpf_core_relo_bswap(struct bpf_core_relo *i)
|
||||
{
|
||||
i->insn_off = bswap_32(i->insn_off);
|
||||
i->type_id = bswap_32(i->type_id);
|
||||
i->access_str_off = bswap_32(i->access_str_off);
|
||||
i->kind = bswap_32(i->kind);
|
||||
}
|
||||
|
||||
enum btf_field_iter_kind {
|
||||
BTF_FIELD_ITER_IDS,
|
||||
BTF_FIELD_ITER_STRS,
|
||||
@@ -588,6 +617,16 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
|
||||
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
}
|
||||
|
||||
static inline void bpf_insn_bswap(struct bpf_insn *insn)
|
||||
{
|
||||
__u8 tmp_reg = insn->dst_reg;
|
||||
|
||||
insn->dst_reg = insn->src_reg;
|
||||
insn->src_reg = tmp_reg;
|
||||
insn->off = bswap_16(insn->off);
|
||||
insn->imm = bswap_32(insn->imm);
|
||||
}
|
||||
|
||||
/* Unconditionally dup FD, ensuring it doesn't use [0, 2] range.
|
||||
* Original FD is not closed or altered in any other way.
|
||||
* Preserves original FD value, if it's invalid (negative).
|
||||
|
||||
@@ -76,7 +76,7 @@ enum libbpf_strict_mode {
|
||||
* first BPF program or map creation operation. This is done only if
|
||||
* kernel is too old to support memcg-based memory accounting for BPF
|
||||
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
|
||||
* but it can be overriden with libbpf_set_memlock_rlim() API.
|
||||
* but it can be overridden with libbpf_set_memlock_rlim() API.
|
||||
* Note that libbpf_set_memlock_rlim() needs to be called before
|
||||
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
|
||||
* operation.
|
||||
@@ -97,7 +97,7 @@ LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
|
||||
* @brief **libbpf_get_error()** extracts the error code from the passed
|
||||
* pointer
|
||||
* @param ptr pointer returned from libbpf API function
|
||||
* @return error code; or 0 if no error occured
|
||||
* @return error code; or 0 if no error occurred
|
||||
*
|
||||
* Note, as of libbpf 1.0 this function is not necessary and not recommended
|
||||
* to be used. Libbpf doesn't return error code embedded into the pointer
|
||||
|
||||
@@ -97,6 +97,9 @@ __u32 get_kernel_version(void)
|
||||
if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
|
||||
return 0;
|
||||
|
||||
if (major == 4 && minor == 19 && patch > 255)
|
||||
return KERNEL_VERSION(major, minor, 255);
|
||||
|
||||
return KERNEL_VERSION(major, minor, patch);
|
||||
}
|
||||
|
||||
|
||||
88
src/linker.c
88
src/linker.c
@@ -135,6 +135,7 @@ struct bpf_linker {
|
||||
int fd;
|
||||
Elf *elf;
|
||||
Elf64_Ehdr *elf_hdr;
|
||||
bool swapped_endian;
|
||||
|
||||
/* Output sections metadata */
|
||||
struct dst_sec *secs;
|
||||
@@ -324,13 +325,8 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
|
||||
|
||||
linker->elf_hdr->e_machine = EM_BPF;
|
||||
linker->elf_hdr->e_type = ET_REL;
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB;
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB;
|
||||
#else
|
||||
#error "Unknown __BYTE_ORDER__"
|
||||
#endif
|
||||
/* Set unknown ELF endianness, assign later from input files */
|
||||
linker->elf_hdr->e_ident[EI_DATA] = ELFDATANONE;
|
||||
|
||||
/* STRTAB */
|
||||
/* initialize strset with an empty string to conform to ELF */
|
||||
@@ -396,6 +392,8 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
|
||||
pr_warn_elf("failed to create SYMTAB data");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Ensure libelf translates byte-order of symbol records */
|
||||
sec->data->d_type = ELF_T_SYM;
|
||||
|
||||
str_off = strset__add_str(linker->strtab_strs, sec->sec_name);
|
||||
if (str_off < 0)
|
||||
@@ -539,19 +537,21 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||
const struct bpf_linker_file_opts *opts,
|
||||
struct src_obj *obj)
|
||||
{
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
const int host_endianness = ELFDATA2LSB;
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
const int host_endianness = ELFDATA2MSB;
|
||||
#else
|
||||
#error "Unknown __BYTE_ORDER__"
|
||||
#endif
|
||||
int err = 0;
|
||||
Elf_Scn *scn;
|
||||
Elf_Data *data;
|
||||
Elf64_Ehdr *ehdr;
|
||||
Elf64_Shdr *shdr;
|
||||
struct src_sec *sec;
|
||||
unsigned char obj_byteorder;
|
||||
unsigned char link_byteorder = linker->elf_hdr->e_ident[EI_DATA];
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
const unsigned char host_byteorder = ELFDATA2LSB;
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
const unsigned char host_byteorder = ELFDATA2MSB;
|
||||
#else
|
||||
#error "Unknown __BYTE_ORDER__"
|
||||
#endif
|
||||
|
||||
pr_debug("linker: adding object file '%s'...\n", filename);
|
||||
|
||||
@@ -577,11 +577,25 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||
pr_warn_elf("failed to get ELF header for %s", filename);
|
||||
return err;
|
||||
}
|
||||
if (ehdr->e_ident[EI_DATA] != host_endianness) {
|
||||
|
||||
/* Linker output endianness set by first input object */
|
||||
obj_byteorder = ehdr->e_ident[EI_DATA];
|
||||
if (obj_byteorder != ELFDATA2LSB && obj_byteorder != ELFDATA2MSB) {
|
||||
err = -EOPNOTSUPP;
|
||||
pr_warn_elf("unsupported byte order of ELF file %s", filename);
|
||||
pr_warn("unknown byte order of ELF file %s\n", filename);
|
||||
return err;
|
||||
}
|
||||
if (link_byteorder == ELFDATANONE) {
|
||||
linker->elf_hdr->e_ident[EI_DATA] = obj_byteorder;
|
||||
linker->swapped_endian = obj_byteorder != host_byteorder;
|
||||
pr_debug("linker: set %s-endian output byte order\n",
|
||||
obj_byteorder == ELFDATA2MSB ? "big" : "little");
|
||||
} else if (link_byteorder != obj_byteorder) {
|
||||
err = -EOPNOTSUPP;
|
||||
pr_warn("byte order mismatch with ELF file %s\n", filename);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (ehdr->e_type != ET_REL
|
||||
|| ehdr->e_machine != EM_BPF
|
||||
|| ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
|
||||
@@ -1109,6 +1123,24 @@ static bool sec_content_is_same(struct dst_sec *dst_sec, struct src_sec *src_sec
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool is_exec_sec(struct dst_sec *sec)
|
||||
{
|
||||
if (!sec || sec->ephemeral)
|
||||
return false;
|
||||
return (sec->shdr->sh_type == SHT_PROGBITS) &&
|
||||
(sec->shdr->sh_flags & SHF_EXECINSTR);
|
||||
}
|
||||
|
||||
static void exec_sec_bswap(void *raw_data, int size)
|
||||
{
|
||||
const int insn_cnt = size / sizeof(struct bpf_insn);
|
||||
struct bpf_insn *insn = raw_data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++)
|
||||
bpf_insn_bswap(insn);
|
||||
}
|
||||
|
||||
static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src_sec *src)
|
||||
{
|
||||
void *tmp;
|
||||
@@ -1168,6 +1200,10 @@ static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src
|
||||
memset(dst->raw_data + dst->sec_sz, 0, dst_align_sz - dst->sec_sz);
|
||||
/* now copy src data at a properly aligned offset */
|
||||
memcpy(dst->raw_data + dst_align_sz, src->data->d_buf, src->shdr->sh_size);
|
||||
|
||||
/* convert added bpf insns to native byte-order */
|
||||
if (linker->swapped_endian && is_exec_sec(dst))
|
||||
exec_sec_bswap(dst->raw_data + dst_align_sz, src->shdr->sh_size);
|
||||
}
|
||||
|
||||
dst->sec_sz = dst_final_sz;
|
||||
@@ -1413,7 +1449,7 @@ recur:
|
||||
return true;
|
||||
case BTF_KIND_PTR:
|
||||
/* just validate overall shape of the referenced type, so no
|
||||
* contents comparison for struct/union, and allowd fwd vs
|
||||
* contents comparison for struct/union, and allowed fwd vs
|
||||
* struct/union
|
||||
*/
|
||||
exact = false;
|
||||
@@ -1962,7 +1998,7 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
|
||||
|
||||
/* If existing symbol is a strong resolved symbol, bail out,
|
||||
* because we lost resolution battle have nothing to
|
||||
* contribute. We already checked abover that there is no
|
||||
* contribute. We already checked above that there is no
|
||||
* strong-strong conflict. We also already tightened binding
|
||||
* and visibility, so nothing else to contribute at that point.
|
||||
*/
|
||||
@@ -2415,6 +2451,10 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
||||
if (glob_sym && glob_sym->var_idx >= 0) {
|
||||
__s64 sz;
|
||||
|
||||
/* FUNCs don't have size, nothing to update */
|
||||
if (btf_is_func(t))
|
||||
continue;
|
||||
|
||||
dst_var = &dst_sec->sec_vars[glob_sym->var_idx];
|
||||
/* Because underlying BTF type might have
|
||||
* changed, so might its size have changed, so
|
||||
@@ -2628,6 +2668,10 @@ int bpf_linker__finalize(struct bpf_linker *linker)
|
||||
if (!sec->scn)
|
||||
continue;
|
||||
|
||||
/* restore sections with bpf insns to target byte-order */
|
||||
if (linker->swapped_endian && is_exec_sec(sec))
|
||||
exec_sec_bswap(sec->raw_data, sec->sec_sz);
|
||||
|
||||
sec->data->d_buf = sec->raw_data;
|
||||
}
|
||||
|
||||
@@ -2696,6 +2740,7 @@ static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name,
|
||||
|
||||
static int finalize_btf(struct bpf_linker *linker)
|
||||
{
|
||||
enum btf_endianness link_endianness;
|
||||
LIBBPF_OPTS(btf_dedup_opts, opts);
|
||||
struct btf *btf = linker->btf;
|
||||
const void *raw_data;
|
||||
@@ -2740,6 +2785,13 @@ static int finalize_btf(struct bpf_linker *linker)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Set .BTF and .BTF.ext output byte order */
|
||||
link_endianness = linker->elf_hdr->e_ident[EI_DATA] == ELFDATA2MSB ?
|
||||
BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
|
||||
btf__set_endianness(linker->btf, link_endianness);
|
||||
if (linker->btf_ext)
|
||||
btf_ext__set_endianness(linker->btf_ext, link_endianness);
|
||||
|
||||
/* Emit .BTF section */
|
||||
raw_data = btf__raw_data(linker->btf, &raw_sz);
|
||||
if (!raw_data)
|
||||
|
||||
@@ -1339,7 +1339,7 @@ int bpf_core_calc_relo_insn(const char *prog_name,
|
||||
cands->cands[i].id, cand_spec);
|
||||
if (err < 0) {
|
||||
bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
|
||||
pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ",
|
||||
pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n",
|
||||
prog_name, relo_idx, i, spec_buf, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -107,7 +107,7 @@ static inline void skel_free(const void *p)
|
||||
* The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
|
||||
* skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
|
||||
* does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
|
||||
* is not nessary.
|
||||
* is not necessary.
|
||||
*
|
||||
* For user space:
|
||||
* skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
|
||||
@@ -351,10 +351,11 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
|
||||
attr.test.ctx_size_in = opts->ctx->sz;
|
||||
err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz);
|
||||
if (err < 0 || (int)attr.test.retval < 0) {
|
||||
opts->errstr = "failed to execute loader prog";
|
||||
if (err < 0) {
|
||||
opts->errstr = "failed to execute loader prog";
|
||||
set_err;
|
||||
} else {
|
||||
opts->errstr = "error returned by loader prog";
|
||||
err = (int)attr.test.retval;
|
||||
#ifndef __KERNEL__
|
||||
errno = -err;
|
||||
|
||||
@@ -39,7 +39,7 @@ enum __bpf_usdt_arg_type {
|
||||
struct __bpf_usdt_arg_spec {
|
||||
/* u64 scalar interpreted depending on arg_type, see below */
|
||||
__u64 val_off;
|
||||
/* arg location case, see bpf_udst_arg() for details */
|
||||
/* arg location case, see bpf_usdt_arg() for details */
|
||||
enum __bpf_usdt_arg_type arg_type;
|
||||
/* offset of referenced register within struct pt_regs */
|
||||
short reg_off;
|
||||
|
||||
Reference in New Issue
Block a user