Skip to content

libbpf and BPF Skeletons

The standard userspace library for loading and interacting with BPF programs

What libbpf does

libbpf is the official userspace library for BPF (tools/lib/bpf/ in the kernel tree), originally introduced in Linux 4.3 by Wang Nan. (commit) It handles:

  • Loading BPF ELF objects (.bpf.o files)
  • CO-RE relocation patching at load time
  • Map creation from BPF program declarations
  • Program attachment to hooks
  • Map access from userspace
  • Ring buffer and perf buffer polling

The BPF skeleton workflow

The recommended modern approach uses BPF skeletons — generated C structs that wrap a specific BPF program, providing type-safe access to its maps and programs:

                 ┌──────────────────────────────────────┐
                 │ Write BPF program (minimal.bpf.c)    │
                 └───────────────┬──────────────────────┘
                                 │ clang -target bpf -g
                 ┌──────────────────────────────────────┐
                 │ BPF object (minimal.bpf.o)           │
                 └───────────────┬──────────────────────┘
                                 │ bpftool gen skeleton
                 ┌──────────────────────────────────────┐
                 │ Generated header (minimal.skel.h)    │
                 │   minimal_bpf__open()                │
                 │   minimal_bpf__load()                │
                 │   minimal_bpf__attach()              │
                 │   minimal_bpf__destroy()             │
                 └───────────────┬──────────────────────┘
                                 │ #include in userspace code
                 ┌──────────────────────────────────────┐
                 │ Userspace program (minimal.c)        │
                 └──────────────────────────────────────┘

Complete minimal example

BPF side: minimal.bpf.c

// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>

char LICENSE[] SEC("license") = "GPL";

/* Map: counts syscalls per PID */
struct {
    __uint(type, BPF_MAP_TYPE_HASH);
    __uint(max_entries, 8192);
    __type(key, u32);    /* pid */
    __type(value, u64);  /* count */
} syscall_count SEC(".maps");

/* Global variable (stored in .bss, backed by a MAP_TYPE_ARRAY) */
volatile u64 total_syscalls = 0;

SEC("tracepoint/raw_syscalls/sys_enter")
int count_syscall(struct trace_event_raw_sys_enter *ctx)
{
    u32 pid = bpf_get_current_pid_tgid() >> 32;
    u64 *count;

    count = bpf_map_lookup_elem(&syscall_count, &pid);
    if (count) {
        (*count)++;
    } else {
        u64 one = 1;
        bpf_map_update_elem(&syscall_count, &pid, &one, BPF_NOEXIST);
    }

    __sync_fetch_and_add(&total_syscalls, 1);
    return 0;
}

Generate the skeleton

# Compile BPF program
clang -target bpf -O2 -g \
    -I/usr/include/bpf \
    -c minimal.bpf.c -o minimal.bpf.o

# Generate skeleton header
bpftool gen skeleton minimal.bpf.o > minimal.skel.h

The generated minimal.skel.h contains:

/* Autogenerated by bpftool */
struct minimal_bpf {
    struct bpf_object_skeleton *skeleton;
    struct bpf_object *obj;

    struct {
        struct bpf_map *syscall_count;    /* typed map reference */
        struct bpf_map *bss;
        struct bpf_map *rodata;
    } maps;

    struct {
        struct bpf_program *count_syscall; /* typed program reference */
    } progs;

    struct {
        struct bpf_link *count_syscall;    /* typed link reference */
    } links;

    struct minimal_bpf__bss {
        volatile u64 total_syscalls;       /* directly accessible! */
    } *bss;
};

static inline struct minimal_bpf *minimal_bpf__open(void);
static inline int minimal_bpf__load(struct minimal_bpf *obj);
static inline int minimal_bpf__attach(struct minimal_bpf *obj);
static inline void minimal_bpf__destroy(struct minimal_bpf *obj);

Userspace side: minimal.c

#include <stdio.h>
#include <unistd.h>
#include <signal.h>
#include <bpf/libbpf.h>
#include "minimal.skel.h"

static volatile bool exiting = false;

static void sig_handler(int sig)
{
    exiting = true;
}

int main(void)
{
    struct minimal_bpf *skel;
    int err;

    /* 1. Open: parse ELF, create map/prog objects */
    skel = minimal_bpf__open();
    if (!skel) {
        fprintf(stderr, "Failed to open BPF skeleton\n");
        return 1;
    }

    /* Optional: configure before load */
    /* skel->rodata->my_config = 42; */

    /* 2. Load: create maps, verify+JIT programs, CO-RE relocations */
    err = minimal_bpf__load(skel);
    if (err) {
        fprintf(stderr, "Failed to load BPF skeleton: %d\n", err);
        goto cleanup;
    }

    /* 3. Attach: connect programs to their hooks */
    err = minimal_bpf__attach(skel);
    if (err) {
        fprintf(stderr, "Failed to attach BPF skeleton: %d\n", err);
        goto cleanup;
    }

    signal(SIGINT, sig_handler);
    printf("Counting syscalls... Press Ctrl+C to stop\n");

    while (!exiting) {
        sleep(1);

        /* Access global variables directly through skeleton */
        printf("Total syscalls: %llu\n", skel->bss->total_syscalls);

        /* Access map values */
        u32 pid = getpid();
        u64 count;
        int map_fd = bpf_map__fd(skel->maps.syscall_count);
        if (bpf_map_lookup_elem(map_fd, &pid, &count) == 0)
            printf("This process syscalls: %llu\n", count);
    }

cleanup:
    /* 4. Destroy: detach programs, close maps, free memory */
    minimal_bpf__destroy(skel);
    return err;
}
# Compile userspace
gcc -o minimal minimal.c -lbpf -lelf -lz
# Run
sudo ./minimal

Ring buffer polling

For event-driven programs using BPF_MAP_TYPE_RINGBUF:

/* Userspace polling with ring_buffer API */
#include <bpf/libbpf.h>
#include "events.skel.h"

struct event {
    u32 pid;
    char comm[16];
};

static int handle_event(void *ctx, void *data, size_t data_sz)
{
    struct event *e = data;
    printf("pid=%u comm=%s\n", e->pid, e->comm);
    return 0;
}

int main(void)
{
    struct events_bpf *skel = events_bpf__open_and_load();
    events_bpf__attach(skel);

    /* Create ring buffer manager */
    struct ring_buffer *rb = ring_buffer__new(
        bpf_map__fd(skel->maps.events),
        handle_event,
        NULL,  /* ctx passed to handle_event */
        NULL   /* opts */
    );

    while (true) {
        /* Poll with 100ms timeout */
        int err = ring_buffer__poll(rb, 100);
        if (err == -EINTR)
            break;
    }

    ring_buffer__free(rb);
    events_bpf__destroy(skel);
    return 0;
}

libbpf core objects

/* Object: represents a .bpf.o file */
struct bpf_object *obj = bpf_object__open("prog.bpf.o");
bpf_object__load(obj);   /* creates maps, verifies programs */
bpf_object__close(obj);

/* Program: represents one BPF function */
struct bpf_program *prog =
    bpf_object__find_program_by_name(obj, "my_prog");

/* Change program type before load */
bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);

/* Attach manually */
struct bpf_link *link = bpf_program__attach(prog);
bpf_link__destroy(link);  /* detaches */

/* Map: represents a BPF map */
struct bpf_map *map =
    bpf_object__find_map_by_name(obj, "my_map");
int fd = bpf_map__fd(map);

/* Resize map before load */
bpf_map__set_max_entries(map, 65536);

Manual program attachment

For program types that need explicit setup:

/* Attach to a specific network interface (XDP) */
struct bpf_link *link = bpf_program__attach_xdp(prog, ifindex);

/* Attach kprobe by function name */
struct bpf_link *link = bpf_program__attach_kprobe(prog,
    false,          /* false=kprobe, true=kretprobe */
    "tcp_sendmsg"   /* kernel function name */
);

/* Attach tracepoint */
struct bpf_link *link = bpf_program__attach_tracepoint(prog,
    "syscalls",         /* tracepoint category */
    "sys_enter_execve"  /* tracepoint name */
);

/* Attach uprobe */
struct bpf_link *link = bpf_program__attach_uprobe(prog,
    false,              /* false=uprobe, true=uretprobe */
    -1,                 /* -1 = all processes */
    "/lib/libc.so.6",  /* binary path */
    0x12345             /* offset in binary */
);

/* Attach to cgroup */
struct bpf_link *link = bpf_program__attach_cgroup(prog, cgroup_fd);

Error handling and logging

/* Set up libbpf logging */
static int libbpf_print_fn(enum libbpf_print_level level,
                            const char *format, va_list args)
{
    if (level == LIBBPF_DEBUG)
        return 0;  /* suppress debug logs */
    return vfprintf(stderr, format, args);
}

libbpf_set_print(libbpf_print_fn);

/* Check for verifier errors */
skel = minimal_bpf__open_and_load();
if (!skel) {
    /* libbpf already printed the verifier log via print_fn */
    return 1;
}

BPF object lifecycle with skeleton

open()           parse ELF sections, create bpf_object
   │  [optional customization here]
   │    - set map sizes
   │    - set program types
   │    - set rodata constants
load()           kernel: create maps, verify programs, JIT
   │             libbpf: CO-RE relocation, pin maps
   │  [optional manual attachment]
attach()         connect programs to their default hooks
   │             (SEC("tracepoint/...") → auto attach)
   │  [program running, handle events]
destroy()        detach all links
                 close file descriptors
                 free bpf_object

Useful bpftool commands

# Inspect loaded programs
bpftool prog list
bpftool prog show id 42 --pretty

# Dump program bytecode
bpftool prog dump xlated id 42
bpftool prog dump jited id 42 linum  # with source line numbers

# Inspect maps
bpftool map list
bpftool map dump id 5
bpftool map lookup id 5 key 0 0 0 0  # lookup key=0 (4 bytes)

# Pin to filesystem
bpftool prog pin id 42 /sys/fs/bpf/my_prog
bpftool map pin id 5 /sys/fs/bpf/my_map

# Generate skeleton
bpftool gen skeleton prog.bpf.o > prog.skel.h

# Generate vmlinux.h
bpftool btf dump file /sys/kernel/btf/vmlinux format c > vmlinux.h

Further reading

  • BPF Architecture — Program types and the bpf() syscall
  • BPF Maps — Map types and operations
  • BTF and CO-RE — How CO-RE relocations work
  • tools/lib/bpf/ in the kernel tree — libbpf source
  • libbpf-bootstrap on GitHub — Project template for BPF programs