mirror of
https://github.com/netdata/libbpf.git
synced 2026-03-24 10:19:07 +08:00
AF_XDP: add xsk.{c,h} to Makefile and fix build
This patch makes sure we build AF_XDP-related code as part of libbpf. This also required copying few uapi/linux headers and adding few used definitions in include headers. Signed-off-by: Andrii Nakryiko <andriin@fb.com>
This commit is contained in:
committed by
Andrii Nakryiko
parent
7a431904c8
commit
cb658e9724
52
include/linux/compiler.h
Normal file
52
include/linux/compiler.h
Normal file
@@ -0,0 +1,52 @@
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
|
||||
#ifndef __LINUX_COMPILER_H
|
||||
#define __LINUX_COMPILER_H
|
||||
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
|
||||
#define READ_ONCE(x) (*(volatile typeof(x) *)&x)
|
||||
#define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
|
||||
|
||||
#define barrier() asm volatile("" ::: "memory")
|
||||
|
||||
#if defined(__x86_64__)
|
||||
|
||||
# define smp_rmb() asm volatile("lfence" ::: "memory")
|
||||
# define smp_wmb() asm volatile("sfence" ::: "memory")
|
||||
|
||||
# define smp_store_release(p, v) \
|
||||
do { \
|
||||
barrier(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
# define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p = READ_ONCE(*p); \
|
||||
barrier(); \
|
||||
___p; \
|
||||
})
|
||||
|
||||
#else
|
||||
|
||||
# define smp_mb() __sync_synchronize()
|
||||
# define smp_rmb() smp_mb()
|
||||
# define smp_wmb() smp_mb()
|
||||
|
||||
# define smp_store_release(p, v) \
|
||||
do { \
|
||||
smp_mb(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
# define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p = READ_ONCE(*p); \
|
||||
smp_mb(); \
|
||||
___p; \
|
||||
})
|
||||
|
||||
#endif /* defined(__x86_64__) */
|
||||
#endif
|
||||
@@ -5,6 +5,14 @@
|
||||
|
||||
#include <linux/bpf.h>
|
||||
|
||||
#define BPF_ALU64_IMM(OP, DST, IMM) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
.off = 0, \
|
||||
.imm = IMM })
|
||||
|
||||
#define BPF_MOV64_IMM(DST, IMM) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_MOV | BPF_K, \
|
||||
@@ -29,4 +37,62 @@
|
||||
.off = 0, \
|
||||
.imm = ((FUNC) - BPF_FUNC_unspec) })
|
||||
|
||||
#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
|
||||
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
|
||||
#define BPF_MOV64_REG(DST, SRC) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_MOV | BPF_X, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = 0, \
|
||||
.imm = 0 })
|
||||
|
||||
#define BPF_MOV32_IMM(DST, IMM) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU | BPF_MOV | BPF_K, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
.off = 0, \
|
||||
.imm = IMM })
|
||||
|
||||
#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_LD | BPF_DW | BPF_IMM, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = 0, \
|
||||
.imm = (__u32) (IMM) }), \
|
||||
((struct bpf_insn) { \
|
||||
.code = 0, \
|
||||
.dst_reg = 0, \
|
||||
.src_reg = 0, \
|
||||
.off = 0, \
|
||||
.imm = ((__u64) (IMM)) >> 32 })
|
||||
|
||||
#define BPF_LD_MAP_FD(DST, MAP_FD) \
|
||||
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
|
||||
|
||||
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_JMP | BPF_OP(OP) | BPF_K, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
.off = OFF, \
|
||||
.imm = IMM })
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
@@ -39,5 +39,6 @@
|
||||
#endif
|
||||
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
|
||||
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
|
||||
|
||||
#endif
|
||||
|
||||
@@ -2,40 +2,7 @@
|
||||
#ifndef _TOOLS_LINUX_RING_BUFFER_H_
|
||||
#define _TOOLS_LINUX_RING_BUFFER_H_
|
||||
|
||||
#define READ_ONCE(x) (*(volatile typeof(x) *)&x)
|
||||
#define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
|
||||
|
||||
#define barrier() asm volatile("" ::: "memory")
|
||||
|
||||
#if defined(__x86_64__)
|
||||
# define smp_store_release(p, v) \
|
||||
do { \
|
||||
barrier(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
# define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p = READ_ONCE(*p); \
|
||||
barrier(); \
|
||||
___p; \
|
||||
})
|
||||
#else
|
||||
# define smp_mb() __sync_synchronize()
|
||||
|
||||
# define smp_store_release(p, v) \
|
||||
do { \
|
||||
smp_mb(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
# define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p = READ_ONCE(*p); \
|
||||
smp_mb(); \
|
||||
___p; \
|
||||
})
|
||||
#endif /* defined(__x86_64__) */
|
||||
#include <linux/compiler.h>
|
||||
|
||||
static inline __u64 ring_buffer_read_head(struct perf_event_mmap_page *base)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user