Merge branch 'udp-fix-two-integer-overflows-when-sk-sk_rcvbuf-is-close-to-int_max'

Kuniyuki Iwashima says:

====================
udp: Fix two integer overflows when sk->sk_rcvbuf is close to INT_MAX.

I got a report that UDP mem usage in /proc/net/sockstat did not
drop even after an application was terminated.

The issue could happen if sk->sk_rmem_alloc wraps around due
to a large sk->sk_rcvbuf, which was INT_MAX in our case.

The patch 2 fixes the issue, and the patch 1 fixes yet another
overflow I found while investigating the issue.

v3: https://lore.kernel.org/20250327202722.63756-1-kuniyu@amazon.com
v2: https://lore.kernel.org/20250325195826.52385-1-kuniyu@amazon.com
v1: https://lore.kernel.org/20250323231016.74813-1-kuniyu@amazon.com
====================

Link: https://patch.msgid.link/20250401184501.67377-1-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2025-04-02 17:18:28 -07:00
commit 0ff0faf7af

View File

@ -1625,12 +1625,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb)
}
/* fully reclaim rmem/fwd memory allocated for skb */
static void udp_rmem_release(struct sock *sk, int size, int partial,
bool rx_queue_lock_held)
static void udp_rmem_release(struct sock *sk, unsigned int size,
int partial, bool rx_queue_lock_held)
{
struct udp_sock *up = udp_sk(sk);
struct sk_buff_head *sk_queue;
int amt;
unsigned int amt;
if (likely(partial)) {
up->forward_deficit += size;
@ -1650,10 +1650,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
if (!rx_queue_lock_held)
spin_lock(&sk_queue->lock);
sk_forward_alloc_add(sk, size);
amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
sk_forward_alloc_add(sk, -amt);
amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
sk_forward_alloc_add(sk, size - amt);
if (amt)
__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
@ -1725,17 +1723,25 @@ static int udp_rmem_schedule(struct sock *sk, int size)
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff_head *list = &sk->sk_receive_queue;
int rmem, err = -ENOMEM;
unsigned int rmem, rcvbuf;
spinlock_t *busy = NULL;
int size, rcvbuf;
int size, err = -ENOMEM;
/* Immediately drop when the receive queue is full.
* Always allow at least one packet.
*/
rmem = atomic_read(&sk->sk_rmem_alloc);
rcvbuf = READ_ONCE(sk->sk_rcvbuf);
if (rmem > rcvbuf)
goto drop;
size = skb->truesize;
/* Immediately drop when the receive queue is full.
* Cast to unsigned int performs the boundary check for INT_MAX.
*/
if (rmem + size > rcvbuf) {
if (rcvbuf > INT_MAX >> 1)
goto drop;
/* Always allow at least one packet for small buffer. */
if (rmem > rcvbuf)
goto drop;
}
/* Under mem pressure, it might be helpful to help udp_recvmsg()
* having linear skbs :
@ -1745,10 +1751,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
*/
if (rmem > (rcvbuf >> 1)) {
skb_condense(skb);
size = skb->truesize;
busy = busylock_acquire(sk);
}
size = skb->truesize;
udp_set_dev_scratch(skb);
atomic_add(size, &sk->sk_rmem_alloc);
@ -1835,7 +1841,7 @@ EXPORT_IPV6_MOD_GPL(skb_consume_udp);
static struct sk_buff *__first_packet_length(struct sock *sk,
struct sk_buff_head *rcvq,
int *total)
unsigned int *total)
{
struct sk_buff *skb;
@ -1868,8 +1874,8 @@ static int first_packet_length(struct sock *sk)
{
struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
unsigned int total = 0;
struct sk_buff *skb;
int total = 0;
int res;
spin_lock_bh(&rcvq->lock);