mirror of
https://github.com/torvalds/linux.git
synced 2025-04-12 06:49:52 +00:00
xsk: Get rid of xdp_buff_xsk::xskb_list_node
Let's bring xdp_buff_xsk back to occupying 2 cachelines by removing xskb_list_node - for the purpose of gathering the xskb frags free_list_node can be used, head of the list (xsk_buff_pool::xskb_list) stays as-is, just reuse the node ptr. It is safe to do as a single xdp_buff_xsk can never reside in two pool's lists simultaneously. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Link: https://lore.kernel.org/bpf/20241007122458.282590-2-maciej.fijalkowski@intel.com
This commit is contained in:
parent
a8057ed23a
commit
b692bf9a75
@ -126,8 +126,8 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
|
||||
if (likely(!xdp_buff_has_frags(xdp)))
|
||||
goto out;
|
||||
|
||||
list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
|
||||
list_del(&pos->xskb_list_node);
|
||||
list_for_each_entry_safe(pos, tmp, xskb_list, free_list_node) {
|
||||
list_del(&pos->free_list_node);
|
||||
xp_free(pos);
|
||||
}
|
||||
|
||||
@ -140,7 +140,7 @@ static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
|
||||
{
|
||||
struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
|
||||
|
||||
list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
|
||||
list_add_tail(&frag->free_list_node, &frag->pool->xskb_list);
|
||||
}
|
||||
|
||||
static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
|
||||
@ -150,9 +150,9 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
|
||||
struct xdp_buff_xsk *frag;
|
||||
|
||||
frag = list_first_entry_or_null(&xskb->pool->xskb_list,
|
||||
struct xdp_buff_xsk, xskb_list_node);
|
||||
struct xdp_buff_xsk, free_list_node);
|
||||
if (frag) {
|
||||
list_del(&frag->xskb_list_node);
|
||||
list_del(&frag->free_list_node);
|
||||
ret = &frag->xdp;
|
||||
}
|
||||
|
||||
@ -163,7 +163,7 @@ static inline void xsk_buff_del_tail(struct xdp_buff *tail)
|
||||
{
|
||||
struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
|
||||
|
||||
list_del(&xskb->xskb_list_node);
|
||||
list_del(&xskb->free_list_node);
|
||||
}
|
||||
|
||||
static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
|
||||
@ -172,7 +172,7 @@ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
|
||||
struct xdp_buff_xsk *frag;
|
||||
|
||||
frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
|
||||
xskb_list_node);
|
||||
free_list_node);
|
||||
return &frag->xdp;
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,6 @@ struct xdp_buff_xsk {
|
||||
struct xsk_buff_pool *pool;
|
||||
u64 orig_addr;
|
||||
struct list_head free_list_node;
|
||||
struct list_head xskb_list_node;
|
||||
};
|
||||
|
||||
#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
|
||||
|
@ -171,14 +171,14 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
|
||||
return 0;
|
||||
|
||||
xskb_list = &xskb->pool->xskb_list;
|
||||
list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
|
||||
list_for_each_entry_safe(pos, tmp, xskb_list, free_list_node) {
|
||||
if (list_is_singular(xskb_list))
|
||||
contd = 0;
|
||||
len = pos->xdp.data_end - pos->xdp.data;
|
||||
err = __xsk_rcv_zc(xs, pos, len, contd);
|
||||
if (err)
|
||||
goto err;
|
||||
list_del(&pos->xskb_list_node);
|
||||
list_del(&pos->free_list_node);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -102,7 +102,6 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
|
||||
xskb->pool = pool;
|
||||
xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
|
||||
INIT_LIST_HEAD(&xskb->free_list_node);
|
||||
INIT_LIST_HEAD(&xskb->xskb_list_node);
|
||||
if (pool->unaligned)
|
||||
pool->free_heads[i] = xskb;
|
||||
else
|
||||
|
Loading…
x
Reference in New Issue
Block a user