bpf, net: introduce bpf_struct_ops_desc.

Move some of members of bpf_struct_ops to bpf_struct_ops_desc.  type_id is
unavailabe in bpf_struct_ops anymore. Modules should get it from the btf
received by kmod's init function.

Cc: netdev@vger.kernel.org
Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
Link: https://lore.kernel.org/r/20240119225005.668602-4-thinker.li@gmail.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
This commit is contained in:
Kui-Feng Lee 2024-01-19 14:49:54 -08:00 committed by Martin KaFai Lau
parent 9567839538
commit 4c5763ed99
5 changed files with 73 additions and 49 deletions

View file

@ -1673,18 +1673,23 @@ struct bpf_struct_ops {
void (*unreg)(void *kdata);
int (*update)(void *kdata, void *old_kdata);
int (*validate)(void *kdata);
const struct btf_type *type;
const struct btf_type *value_type;
void *cfi_stubs;
const char *name;
struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
};
struct bpf_struct_ops_desc {
struct bpf_struct_ops *st_ops;
const struct btf_type *type;
const struct btf_type *value_type;
u32 type_id;
u32 value_id;
void *cfi_stubs;
};
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
const struct bpf_struct_ops_desc *bpf_struct_ops_find(u32 type_id);
void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
@ -1728,7 +1733,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
#endif
#else
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(u32 type_id)
{
return NULL;
}

View file

@ -32,7 +32,7 @@ struct bpf_struct_ops_value {
struct bpf_struct_ops_map {
struct bpf_map map;
struct rcu_head rcu;
const struct bpf_struct_ops *st_ops;
const struct bpf_struct_ops_desc *st_ops_desc;
/* protect map_update */
struct mutex lock;
/* link has all the bpf_links that is populated
@ -92,9 +92,9 @@ enum {
__NR_BPF_STRUCT_OPS_TYPE,
};
static struct bpf_struct_ops * const bpf_struct_ops[] = {
static struct bpf_struct_ops_desc bpf_struct_ops[] = {
#define BPF_STRUCT_OPS_TYPE(_name) \
[BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
[BPF_STRUCT_OPS_TYPE_##_name] = { .st_ops = &bpf_##_name },
#include "bpf_struct_ops_types.h"
#undef BPF_STRUCT_OPS_TYPE
};
@ -115,10 +115,11 @@ enum {
IDX_MODULE_ID,
};
static void bpf_struct_ops_init_one(struct bpf_struct_ops *st_ops,
struct btf *btf,
struct bpf_verifier_log *log)
static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
struct btf *btf,
struct bpf_verifier_log *log)
{
struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
const struct btf_member *member;
const struct btf_type *t;
s32 type_id, value_id;
@ -190,18 +191,18 @@ static void bpf_struct_ops_init_one(struct bpf_struct_ops *st_ops,
pr_warn("Error in init bpf_struct_ops %s\n",
st_ops->name);
} else {
st_ops->type_id = type_id;
st_ops->type = t;
st_ops->value_id = value_id;
st_ops->value_type = btf_type_by_id(btf,
value_id);
st_ops_desc->type_id = type_id;
st_ops_desc->type = t;
st_ops_desc->value_id = value_id;
st_ops_desc->value_type = btf_type_by_id(btf,
value_id);
}
}
}
void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
{
struct bpf_struct_ops *st_ops;
struct bpf_struct_ops_desc *st_ops_desc;
u32 i;
/* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
@ -210,14 +211,14 @@ void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
#undef BPF_STRUCT_OPS_TYPE
for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
st_ops = bpf_struct_ops[i];
bpf_struct_ops_init_one(st_ops, btf, log);
st_ops_desc = &bpf_struct_ops[i];
bpf_struct_ops_desc_init(st_ops_desc, btf, log);
}
}
extern struct btf *btf_vmlinux;
static const struct bpf_struct_ops *
static const struct bpf_struct_ops_desc *
bpf_struct_ops_find_value(u32 value_id)
{
unsigned int i;
@ -226,14 +227,14 @@ bpf_struct_ops_find_value(u32 value_id)
return NULL;
for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
if (bpf_struct_ops[i]->value_id == value_id)
return bpf_struct_ops[i];
if (bpf_struct_ops[i].value_id == value_id)
return &bpf_struct_ops[i];
}
return NULL;
}
const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
const struct bpf_struct_ops_desc *bpf_struct_ops_find(u32 type_id)
{
unsigned int i;
@ -241,8 +242,8 @@ const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
return NULL;
for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
if (bpf_struct_ops[i]->type_id == type_id)
return bpf_struct_ops[i];
if (bpf_struct_ops[i].type_id == type_id)
return &bpf_struct_ops[i];
}
return NULL;
@ -302,7 +303,7 @@ static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
{
const struct btf_type *t = st_map->st_ops->type;
const struct btf_type *t = st_map->st_ops_desc->type;
u32 i;
for (i = 0; i < btf_type_vlen(t); i++) {
@ -382,11 +383,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 flags)
{
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
const struct bpf_struct_ops *st_ops = st_map->st_ops;
const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
struct bpf_struct_ops_value *uvalue, *kvalue;
const struct btf_type *module_type;
const struct btf_member *member;
const struct btf_type *t = st_ops->type;
const struct btf_type *t = st_ops_desc->type;
struct bpf_tramp_links *tlinks;
void *udata, *kdata;
int prog_fd, err;
@ -399,7 +401,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
if (*(u32 *)key != 0)
return -E2BIG;
err = check_zero_holes(st_ops->value_type, value);
err = check_zero_holes(st_ops_desc->value_type, value);
if (err)
return err;
@ -492,7 +494,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
}
if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
prog->aux->attach_btf_id != st_ops->type_id ||
prog->aux->attach_btf_id != st_ops_desc->type_id ||
prog->expected_attach_type != i) {
bpf_prog_put(prog);
err = -EINVAL;
@ -588,7 +590,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
BPF_STRUCT_OPS_STATE_TOBEFREE);
switch (prev_state) {
case BPF_STRUCT_OPS_STATE_INUSE:
st_map->st_ops->unreg(&st_map->kvalue.data);
st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
bpf_map_put(map);
return 0;
case BPF_STRUCT_OPS_STATE_TOBEFREE:
@ -669,22 +671,22 @@ static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
{
const struct bpf_struct_ops *st_ops;
const struct bpf_struct_ops_desc *st_ops_desc;
size_t st_map_size;
struct bpf_struct_ops_map *st_map;
const struct btf_type *t, *vt;
struct bpf_map *map;
int ret;
st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
if (!st_ops)
st_ops_desc = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
if (!st_ops_desc)
return ERR_PTR(-ENOTSUPP);
vt = st_ops->value_type;
vt = st_ops_desc->value_type;
if (attr->value_size != vt->size)
return ERR_PTR(-EINVAL);
t = st_ops->type;
t = st_ops_desc->type;
st_map_size = sizeof(*st_map) +
/* kvalue stores the
@ -696,7 +698,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
if (!st_map)
return ERR_PTR(-ENOMEM);
st_map->st_ops = st_ops;
st_map->st_ops_desc = st_ops_desc;
map = &st_map->map;
ret = bpf_jit_charge_modmem(PAGE_SIZE);
@ -733,8 +735,8 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
{
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
const struct bpf_struct_ops *st_ops = st_map->st_ops;
const struct btf_type *vt = st_ops->value_type;
const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
const struct btf_type *vt = st_ops_desc->value_type;
u64 usage;
usage = sizeof(*st_map) +
@ -808,7 +810,7 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
/* st_link->map can be NULL if
* bpf_struct_ops_link_create() fails to register.
*/
st_map->st_ops->unreg(&st_map->kvalue.data);
st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
bpf_map_put(&st_map->map);
}
kfree(st_link);
@ -855,7 +857,7 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
if (!bpf_struct_ops_valid_to_reg(new_map))
return -EINVAL;
if (!st_map->st_ops->update)
if (!st_map->st_ops_desc->st_ops->update)
return -EOPNOTSUPP;
mutex_lock(&update_mutex);
@ -868,12 +870,12 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
/* The new and old struct_ops must be the same type. */
if (st_map->st_ops != old_st_map->st_ops) {
if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
err = -EINVAL;
goto err_out;
}
err = st_map->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
if (err)
goto err_out;
@ -924,7 +926,7 @@ int bpf_struct_ops_link_create(union bpf_attr *attr)
if (err)
goto err_out;
err = st_map->st_ops->reg(st_map->kvalue.data);
err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data);
if (err) {
bpf_link_cleanup(&link_primer);
link = NULL;

View file

@ -20285,6 +20285,7 @@ static void print_verification_stats(struct bpf_verifier_env *env)
static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
{
const struct btf_type *t, *func_proto;
const struct bpf_struct_ops_desc *st_ops_desc;
const struct bpf_struct_ops *st_ops;
const struct btf_member *member;
struct bpf_prog *prog = env->prog;
@ -20297,14 +20298,15 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
}
btf_id = prog->aux->attach_btf_id;
st_ops = bpf_struct_ops_find(btf_id);
if (!st_ops) {
st_ops_desc = bpf_struct_ops_find(btf_id);
if (!st_ops_desc) {
verbose(env, "attach_btf_id %u is not a supported struct\n",
btf_id);
return -ENOTSUPP;
}
st_ops = st_ops_desc->st_ops;
t = st_ops->type;
t = st_ops_desc->type;
member_idx = prog->expected_attach_type;
if (member_idx >= btf_type_vlen(t)) {
verbose(env, "attach to invalid member idx %u of struct %s\n",

View file

@ -22,6 +22,8 @@ struct bpf_dummy_ops_test_args {
struct bpf_dummy_ops_state state;
};
static struct btf *bpf_dummy_ops_btf;
static struct bpf_dummy_ops_test_args *
dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
{
@ -90,9 +92,15 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
void *image = NULL;
unsigned int op_idx;
int prog_ret;
s32 type_id;
int err;
if (prog->aux->attach_btf_id != st_ops->type_id)
type_id = btf_find_by_name_kind(bpf_dummy_ops_btf,
bpf_bpf_dummy_ops.name,
BTF_KIND_STRUCT);
if (type_id < 0)
return -EINVAL;
if (prog->aux->attach_btf_id != type_id)
return -EOPNOTSUPP;
func_proto = prog->aux->attach_func_proto;
@ -148,6 +156,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
static int bpf_dummy_init(struct btf *btf)
{
bpf_dummy_ops_btf = btf;
return 0;
}

View file

@ -20,6 +20,7 @@ static u32 unsupported_ops[] = {
static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id;
static const struct btf_type *tcp_congestion_ops_type;
static int bpf_tcp_ca_init(struct btf *btf)
{
@ -36,6 +37,11 @@ static int bpf_tcp_ca_init(struct btf *btf)
tcp_sock_id = type_id;
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
type_id = btf_find_by_name_kind(btf, "tcp_congestion_ops", BTF_KIND_STRUCT);
if (type_id < 0)
return -EINVAL;
tcp_congestion_ops_type = btf_type_by_id(btf, type_id);
return 0;
}
@ -149,7 +155,7 @@ static u32 prog_ops_moff(const struct bpf_prog *prog)
u32 midx;
midx = prog->expected_attach_type;
t = bpf_tcp_congestion_ops.type;
t = tcp_congestion_ops_type;
m = &btf_type_member(t)[midx];
return __btf_member_bit_offset(t, m) / 8;