bpf: cpumap: switch to napi_skb_cache_get_bulk()

Now that cpumap uses GRO, which drops unused skb heads to the NAPI
cache, use napi_skb_cache_get_bulk() to try to reuse cached entries
and lower MM layer pressure. Always disable the BH before checking and
running the cpumap-pinned XDP prog and don't re-enable it in between
that and allocating an skb bulk, as we can access the NAPI caches only
from the BH context.
The better GRO aggregates packets, the less new skbs will be allocated.
If an aggregated skb contains 16 frags, this means 15 skbs were returned
to the cache, so next 15 skbs will be built without allocating anything.

The same trafficgen UDP GRO test now shows:

                GRO off   GRO on
threaded GRO    2.3       4         Mpps
thr bulk GRO    2.4       4.7       Mpps

diff            +4        +17       %

Comparing to the baseline cpumap:

baseline        2.7       N/A       Mpps
thr bulk GRO    2.4       4.7       Mpps
diff            -11       +74       %

Tested-by: Daniel Xu <dxu@dxuuu.xyz>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Alexander Lobakin 2025-02-25 18:17:48 +01:00 committed by Paolo Abeni
parent 859d6acd94
commit ed16b8a4d1

View File

@ -253,7 +253,7 @@ static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
if (!rcpu->prog) if (!rcpu->prog)
goto out; goto out;
rcu_read_lock_bh(); rcu_read_lock();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats); ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats);
@ -265,7 +265,7 @@ static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
xdp_do_flush(); xdp_do_flush();
bpf_net_ctx_clear(bpf_net_ctx); bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock_bh(); /* resched point, may call do_softirq() */ rcu_read_unlock();
out: out:
if (unlikely(ret->skb_n) && ret->xdp_n) if (unlikely(ret->skb_n) && ret->xdp_n)
@ -303,7 +303,6 @@ static int cpu_map_kthread_run(void *data)
while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
struct xdp_cpumap_stats stats = {}; /* zero stats */ struct xdp_cpumap_stats stats = {}; /* zero stats */
unsigned int kmem_alloc_drops = 0, sched = 0; unsigned int kmem_alloc_drops = 0, sched = 0;
gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
struct cpu_map_ret ret = { }; struct cpu_map_ret ret = { };
void *frames[CPUMAP_BATCH]; void *frames[CPUMAP_BATCH];
void *skbs[CPUMAP_BATCH]; void *skbs[CPUMAP_BATCH];
@ -355,15 +354,14 @@ static int cpu_map_kthread_run(void *data)
prefetchw(page); prefetchw(page);
} }
local_bh_disable();
/* Support running another XDP prog on this CPU */ /* Support running another XDP prog on this CPU */
cpu_map_bpf_prog_run(rcpu, frames, skbs, &ret, &stats); cpu_map_bpf_prog_run(rcpu, frames, skbs, &ret, &stats);
if (!ret.xdp_n) { if (!ret.xdp_n)
local_bh_disable();
goto stats; goto stats;
}
m = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, m = napi_skb_cache_get_bulk(skbs, ret.xdp_n);
ret.xdp_n, skbs);
if (unlikely(m < ret.xdp_n)) { if (unlikely(m < ret.xdp_n)) {
for (i = m; i < ret.xdp_n; i++) for (i = m; i < ret.xdp_n; i++)
xdp_return_frame(frames[i]); xdp_return_frame(frames[i]);
@ -376,7 +374,6 @@ static int cpu_map_kthread_run(void *data)
ret.xdp_n = m; ret.xdp_n = m;
} }
local_bh_disable();
for (i = 0; i < ret.xdp_n; i++) { for (i = 0; i < ret.xdp_n; i++) {
struct xdp_frame *xdpf = frames[i]; struct xdp_frame *xdpf = frames[i];