diff options
author | Eric Dumazet <edumazet@google.com> | 2017-12-10 17:55:03 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-12-12 10:53:04 -0500 |
commit | 607065bad9931e72207b0cac365d7d4abc06bd99 (patch) | |
tree | d18808d1c3bf46dd8a899c3877257ea8c4961b61 | |
parent | 02db55718d53f9d426cee504c27fb768e9ed4ffe (diff) |
tcp: avoid integer overflows in tcp_rcv_space_adjust()
When using large tcp_rmem[2] values (I did tests with 500 MB),
I noticed overflows while computing rcvwin.
Lets fix this before the following patch.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Wei Wang <weiwan@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/tcp.h | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 12 |
2 files changed, 8 insertions, 6 deletions
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index ca4a6361389b..4f93f0953c41 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -344,7 +344,7 @@ struct tcp_sock { /* Receiver queue space */ struct { - int space; + u32 space; u32 seq; u64 time; } rcvq_space; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 746a6773c482..2900e58738cd 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -576,8 +576,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, void tcp_rcv_space_adjust(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); + u32 copied; int time; - int copied; tcp_mstamp_refresh(tp); time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); @@ -600,12 +600,13 @@ void tcp_rcv_space_adjust(struct sock *sk) if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { - int rcvwin, rcvmem, rcvbuf; + int rcvmem, rcvbuf; + u64 rcvwin; /* minimal window to cope with packet losses, assuming * steady state. Add some cushion because of small variations. */ - rcvwin = (copied << 1) + 16 * tp->advmss; + rcvwin = ((u64)copied << 1) + 16 * tp->advmss; /* If rate increased by 25%, * assume slow start, rcvwin = 3 * copied @@ -625,8 +626,9 @@ void tcp_rcv_space_adjust(struct sock *sk) while (tcp_win_from_space(sk, rcvmem) < tp->advmss) rcvmem += 128; - rcvbuf = min(rcvwin / tp->advmss * rcvmem, - sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); + do_div(rcvwin, tp->advmss); + rcvbuf = min_t(u64, rcvwin * rcvmem, + sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); if (rcvbuf > sk->sk_rcvbuf) { sk->sk_rcvbuf = rcvbuf; |