diff options
-rw-r--r-- | patches/cve/CVE-2018-5390-tcp-free-batches-of-packets-in-tcp_prune_ofo_queue.patch | 98 |
1 files changed, 98 insertions, 0 deletions
diff --git a/patches/cve/CVE-2018-5390-tcp-free-batches-of-packets-in-tcp_prune_ofo_queue.patch b/patches/cve/CVE-2018-5390-tcp-free-batches-of-packets-in-tcp_prune_ofo_queue.patch new file mode 100644 index 0000000..f13f1f1 --- /dev/null +++ b/patches/cve/CVE-2018-5390-tcp-free-batches-of-packets-in-tcp_prune_ofo_queue.patch | |||
@@ -0,0 +1,98 @@ | |||
1 | From f3a5ba6310e11df370f6888ed716d1486896d983 Mon Sep 17 00:00:00 2001 | ||
2 | From: Eric Dumazet <edumazet@google.com> | ||
3 | Date: Mon, 23 Jul 2018 09:28:17 -0700 | ||
4 | Subject: [PATCH] tcp: free batches of packets in tcp_prune_ofo_queue() | ||
5 | |||
6 | [ Upstream commit 72cd43ba64fc172a443410ce01645895850844c8 ] | ||
7 | |||
8 | Juha-Matti Tilli reported that malicious peers could inject tiny | ||
9 | packets in out_of_order_queue, forcing very expensive calls | ||
10 | to tcp_collapse_ofo_queue() and tcp_prune_ofo_queue() for | ||
11 | every incoming packet. out_of_order_queue rb-tree can contain | ||
12 | thousands of nodes, iterating over all of them is not nice. | ||
13 | |||
14 | Before linux-4.9, we would have pruned all packets in ofo_queue | ||
15 | in one go, every XXXX packets. XXXX depends on sk_rcvbuf and skbs | ||
16 | truesize, but is about 7000 packets with tcp_rmem[2] default of 6 MB. | ||
17 | |||
18 | Since we plan to increase tcp_rmem[2] in the future to cope with | ||
19 | modern BDP, can not revert to the old behavior, without great pain. | ||
20 | |||
21 | Strategy taken in this patch is to purge ~12.5 % of the queue capacity. | ||
22 | |||
23 | Fixes: 36a6503fedda ("tcp: refine tcp_prune_ofo_queue() to not drop all packets") | ||
24 | |||
25 | CVE: CVE-2018-5390 | ||
26 | Upstream-Status: Backport [https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.14.y&id=f3a5ba6310e11df370f6888ed716d1486896d983] | ||
27 | |||
28 | Signed-off-by: Eric Dumazet <edumazet@google.com> | ||
29 | Reported-by: Juha-Matti Tilli <juha-matti.tilli@iki.fi> | ||
30 | Acked-by: Yuchung Cheng <ycheng@google.com> | ||
31 | Acked-by: Soheil Hassas Yeganeh <soheil@google.com> | ||
32 | Signed-off-by: David S. Miller <davem@davemloft.net> | ||
33 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | ||
34 | Signed-off-by: Andreas Wellving <andreas.wellving@enea.com> | ||
35 | --- | ||
36 | include/linux/skbuff.h | 2 ++ | ||
37 | net/ipv4/tcp_input.c | 15 +++++++++++---- | ||
38 | 2 files changed, 13 insertions(+), 4 deletions(-) | ||
39 | |||
40 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h | ||
41 | index 9cf971c68401..6dd77767fd5b 100644 | ||
42 | --- a/include/linux/skbuff.h | ||
43 | +++ b/include/linux/skbuff.h | ||
44 | @@ -3167,6 +3167,8 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) | ||
45 | return __skb_grow(skb, len); | ||
46 | } | ||
47 | |||
48 | +#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) | ||
49 | + | ||
50 | #define skb_queue_walk(queue, skb) \ | ||
51 | for (skb = (queue)->next; \ | ||
52 | skb != (struct sk_buff *)(queue); \ | ||
53 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c | ||
54 | index 03fd2ff4007b..1e7175bd3b59 100644 | ||
55 | --- a/net/ipv4/tcp_input.c | ||
56 | +++ b/net/ipv4/tcp_input.c | ||
57 | @@ -4924,6 +4924,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | ||
58 | * 2) not add too big latencies if thousands of packets sit there. | ||
59 | * (But if application shrinks SO_RCVBUF, we could still end up | ||
60 | * freeing whole queue here) | ||
61 | + * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks. | ||
62 | * | ||
63 | * Return true if queue has shrunk. | ||
64 | */ | ||
65 | @@ -4931,20 +4932,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk) | ||
66 | { | ||
67 | struct tcp_sock *tp = tcp_sk(sk); | ||
68 | struct rb_node *node, *prev; | ||
69 | + int goal; | ||
70 | |||
71 | if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) | ||
72 | return false; | ||
73 | |||
74 | NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); | ||
75 | + goal = sk->sk_rcvbuf >> 3; | ||
76 | node = &tp->ooo_last_skb->rbnode; | ||
77 | do { | ||
78 | prev = rb_prev(node); | ||
79 | rb_erase(node, &tp->out_of_order_queue); | ||
80 | + goal -= rb_to_skb(node)->truesize; | ||
81 | tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode)); | ||
82 | - sk_mem_reclaim(sk); | ||
83 | - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && | ||
84 | - !tcp_under_memory_pressure(sk)) | ||
85 | - break; | ||
86 | + if (!prev || goal <= 0) { | ||
87 | + sk_mem_reclaim(sk); | ||
88 | + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && | ||
89 | + !tcp_under_memory_pressure(sk)) | ||
90 | + break; | ||
91 | + goal = sk->sk_rcvbuf >> 3; | ||
92 | + } | ||
93 | node = prev; | ||
94 | } while (node); | ||
95 | tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode); | ||
96 | -- | ||
97 | 2.20.1 | ||
98 | |||