mirror of
https://github.com/netdata/libbpf.git
synced 2026-03-27 11:49:07 +08:00
Without lskel the CO-RE relocations are processed by libbpf before any other work is done. Instead, when lskel is needed, remember relocation as RELO_CORE kind. Then when loader prog is generated for a given bpf program pass CO-RE relos of that program to gen loader via bpf_gen__record_relo_core(). The gen loader will remember them as-is and pass it later as-is into the kernel. The normal libbpf flow is to process CO-RE early before call relos happen. In case of gen_loader the core relos have to be added to other relos to be copied together when bpf static function is appended in different places to other main bpf progs. During the copy the append_subprog_relos() will adjust insn_idx for normal relos and for RELO_CORE kind too. When that is done each struct reloc_desc has good relos for specific main prog. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20211201181040.23337-10-alexei.starovoitov@gmail.com
72 lines
2.0 KiB
C
72 lines
2.0 KiB
C
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
|
/* Copyright (c) 2021 Facebook */
|
|
#ifndef __BPF_GEN_INTERNAL_H
|
|
#define __BPF_GEN_INTERNAL_H
|
|
|
|
#include "bpf.h"
|
|
|
|
struct ksym_relo_desc {
|
|
const char *name;
|
|
int kind;
|
|
int insn_idx;
|
|
bool is_weak;
|
|
bool is_typeless;
|
|
};
|
|
|
|
struct ksym_desc {
|
|
const char *name;
|
|
int ref;
|
|
int kind;
|
|
union {
|
|
/* used for kfunc */
|
|
int off;
|
|
/* used for typeless ksym */
|
|
bool typeless;
|
|
};
|
|
int insn;
|
|
};
|
|
|
|
struct bpf_gen {
|
|
struct gen_loader_opts *opts;
|
|
void *data_start;
|
|
void *data_cur;
|
|
void *insn_start;
|
|
void *insn_cur;
|
|
ssize_t cleanup_label;
|
|
__u32 nr_progs;
|
|
__u32 nr_maps;
|
|
int log_level;
|
|
int error;
|
|
struct ksym_relo_desc *relos;
|
|
int relo_cnt;
|
|
struct bpf_core_relo *core_relos;
|
|
int core_relo_cnt;
|
|
char attach_target[128];
|
|
int attach_kind;
|
|
struct ksym_desc *ksyms;
|
|
__u32 nr_ksyms;
|
|
int fd_array;
|
|
int nr_fd_array;
|
|
};
|
|
|
|
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps);
|
|
int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps);
|
|
void bpf_gen__free(struct bpf_gen *gen);
|
|
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
|
|
void bpf_gen__map_create(struct bpf_gen *gen,
|
|
enum bpf_map_type map_type, const char *map_name,
|
|
__u32 key_size, __u32 value_size, __u32 max_entries,
|
|
struct bpf_map_create_opts *map_attr, int map_idx);
|
|
void bpf_gen__prog_load(struct bpf_gen *gen,
|
|
enum bpf_prog_type prog_type, const char *prog_name,
|
|
const char *license, struct bpf_insn *insns, size_t insn_cnt,
|
|
struct bpf_prog_load_opts *load_attr, int prog_idx);
|
|
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
|
|
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
|
|
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
|
|
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
|
bool is_typeless, int kind, int insn_idx);
|
|
void bpf_gen__record_relo_core(struct bpf_gen *gen, const struct bpf_core_relo *core_relo);
|
|
|
|
#endif
|