diff options
author | Andreas Wellving <andreas.wellving@enea.com> | 2018-10-15 09:36:50 +0200 |
---|---|---|
committer | Adrian Dudau <Adrian.Dudau@enea.com> | 2018-10-16 17:40:15 +0200 |
commit | ce810776fddc10b907aaa3555d43df21aec7e49d (patch) | |
tree | 5e97bc5db86a97482cd711204e5cb4706b95eda9 | |
parent | 9b0ad321114721e8a23b800587ab3b76b02c8b90 (diff) | |
download | enea-kernel-cache-ce810776fddc10b907aaa3555d43df21aec7e49d.tar.gz |
tcp: CVE-2018-5390
tcp: free batches of packets in tcp_prune_ofo_queue()
References:
https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git/commit/?id=1a4f14bab1868b443f0dd3c55b689a478f82e72e
Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
Change-Id: If6328c8ba200ef1a8d47687bf670178bb0011e31
-rw-r--r-- | patches/cve/4.9.x.scc | 3 | ||||
-rw-r--r-- | patches/cve/CVE-2018-5390-tcp-free-batches-of-packets-in-tcp_prune_ofo_queue.patch | 182 |
2 files changed, 185 insertions, 0 deletions
diff --git a/patches/cve/4.9.x.scc b/patches/cve/4.9.x.scc index 6de2c9b..0840cbd 100644 --- a/patches/cve/4.9.x.scc +++ b/patches/cve/4.9.x.scc | |||
@@ -37,3 +37,6 @@ patch CVE-2018-9516-HID-debug-check-length-before-copy_to_user.patch | |||
37 | 37 | ||
38 | #CVEs fixed in 4.9.113: | 38 | #CVEs fixed in 4.9.113: |
39 | patch CVE-2018-13405-Fix-up-non-directory-creation-in-SGID-directories.patch | 39 | patch CVE-2018-13405-Fix-up-non-directory-creation-in-SGID-directories.patch |
40 | |||
41 | #CVEs fixed in 4.9.116: | ||
42 | patch CVE-2018-5390-tcp-free-batches-of-packets-in-tcp_prune_ofo_queue.patch | ||
diff --git a/patches/cve/CVE-2018-5390-tcp-free-batches-of-packets-in-tcp_prune_ofo_queue.patch b/patches/cve/CVE-2018-5390-tcp-free-batches-of-packets-in-tcp_prune_ofo_queue.patch new file mode 100644 index 0000000..b8f95d0 --- /dev/null +++ b/patches/cve/CVE-2018-5390-tcp-free-batches-of-packets-in-tcp_prune_ofo_queue.patch | |||
@@ -0,0 +1,182 @@ | |||
1 | Date: Mon, 15 Oct 2018 06:07:41 +0200 | ||
2 | Subject: [PATCH] Merge branch 'tcp-robust-ooo' | ||
3 | |||
4 | Eric Dumazet says: | ||
5 | |||
6 | ==================== | ||
7 | Juha-Matti Tilli reported that malicious peers could inject tiny | ||
8 | packets in out_of_order_queue, forcing very expensive calls | ||
9 | to tcp_collapse_ofo_queue() and tcp_prune_ofo_queue() for | ||
10 | every incoming packet. | ||
11 | |||
12 | With tcp_rmem[2] default of 6MB, the ooo queue could | ||
13 | contain ~7000 nodes. | ||
14 | |||
15 | This patch series makes sure we cut cpu cycles enough to | ||
16 | render the attack not critical. | ||
17 | |||
18 | We might in the future go further, like disconnecting | ||
19 | or black-holing proven malicious flows. | ||
20 | ==================== | ||
21 | |||
22 | CVE: CVE-2018-5390 | ||
23 | Upstream-Status: Backport | ||
24 | |||
25 | Signed-off-by: David S. Miller <davem@davemloft.net> | ||
26 | Signed-off-by: Andreas Wellving <andreas.wellving@enea.com> | ||
27 | --- | ||
28 | net/ipv4/tcp_input.c | 60 +++++++++++++++++++++++++++++++++++++++++++--------- | ||
29 | 1 file changed, 50 insertions(+), 10 deletions(-) | ||
30 | |||
31 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c | ||
32 | index 2f107e4..4838b5f 100644 | ||
33 | --- a/net/ipv4/tcp_input.c | ||
34 | +++ b/net/ipv4/tcp_input.c | ||
35 | @@ -4355,6 +4355,23 @@ static bool tcp_try_coalesce(struct sock *sk, | ||
36 | return true; | ||
37 | } | ||
38 | |||
39 | +static bool tcp_ooo_try_coalesce(struct sock *sk, | ||
40 | + struct sk_buff *to, | ||
41 | + struct sk_buff *from, | ||
42 | + bool *fragstolen) | ||
43 | +{ | ||
44 | + bool res = tcp_try_coalesce(sk, to, from, fragstolen); | ||
45 | + | ||
46 | + /* In case tcp_drop() is called later, update to->gso_segs */ | ||
47 | + if (res) { | ||
48 | + u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) + | ||
49 | + max_t(u16, 1, skb_shinfo(from)->gso_segs); | ||
50 | + | ||
51 | + skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF); | ||
52 | + } | ||
53 | + return res; | ||
54 | +} | ||
55 | + | ||
56 | static void tcp_drop(struct sock *sk, struct sk_buff *skb) | ||
57 | { | ||
58 | sk_drops_add(sk, skb); | ||
59 | @@ -4478,7 +4495,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | ||
60 | /* In the typical case, we are adding an skb to the end of the list. | ||
61 | * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. | ||
62 | */ | ||
63 | - if (tcp_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) { | ||
64 | + if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, | ||
65 | + skb, &fragstolen)) { | ||
66 | coalesce_done: | ||
67 | tcp_grow_window(sk, skb); | ||
68 | kfree_skb_partial(skb, fragstolen); | ||
69 | @@ -4506,7 +4524,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | ||
70 | /* All the bits are present. Drop. */ | ||
71 | NET_INC_STATS(sock_net(sk), | ||
72 | LINUX_MIB_TCPOFOMERGE); | ||
73 | - __kfree_skb(skb); | ||
74 | + tcp_drop(sk, skb); | ||
75 | skb = NULL; | ||
76 | tcp_dsack_set(sk, seq, end_seq); | ||
77 | goto add_sack; | ||
78 | @@ -4525,10 +4543,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | ||
79 | TCP_SKB_CB(skb1)->end_seq); | ||
80 | NET_INC_STATS(sock_net(sk), | ||
81 | LINUX_MIB_TCPOFOMERGE); | ||
82 | - __kfree_skb(skb1); | ||
83 | + tcp_drop(sk, skb1); | ||
84 | goto merge_right; | ||
85 | } | ||
86 | - } else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { | ||
87 | + } else if (tcp_ooo_try_coalesce(sk, skb1, | ||
88 | + skb, &fragstolen)) { | ||
89 | goto coalesce_done; | ||
90 | } | ||
91 | p = &parent->rb_right; | ||
92 | @@ -4907,6 +4926,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root, | ||
93 | static void tcp_collapse_ofo_queue(struct sock *sk) | ||
94 | { | ||
95 | struct tcp_sock *tp = tcp_sk(sk); | ||
96 | + u32 range_truesize, sum_tiny = 0; | ||
97 | struct sk_buff *skb, *head; | ||
98 | struct rb_node *p; | ||
99 | u32 start, end; | ||
100 | @@ -4925,6 +4945,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | ||
101 | } | ||
102 | start = TCP_SKB_CB(skb)->seq; | ||
103 | end = TCP_SKB_CB(skb)->end_seq; | ||
104 | + range_truesize = skb->truesize; | ||
105 | |||
106 | for (head = skb;;) { | ||
107 | skb = tcp_skb_next(skb, NULL); | ||
108 | @@ -4935,11 +4956,20 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | ||
109 | if (!skb || | ||
110 | after(TCP_SKB_CB(skb)->seq, end) || | ||
111 | before(TCP_SKB_CB(skb)->end_seq, start)) { | ||
112 | - tcp_collapse(sk, NULL, &tp->out_of_order_queue, | ||
113 | - head, skb, start, end); | ||
114 | + /* Do not attempt collapsing tiny skbs */ | ||
115 | + if (range_truesize != head->truesize || | ||
116 | + end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) { | ||
117 | + tcp_collapse(sk, NULL, &tp->out_of_order_queue, | ||
118 | + head, skb, start, end); | ||
119 | + } else { | ||
120 | + sum_tiny += range_truesize; | ||
121 | + if (sum_tiny > sk->sk_rcvbuf >> 3) | ||
122 | + return; | ||
123 | + } | ||
124 | goto new_range; | ||
125 | } | ||
126 | |||
127 | + range_truesize += skb->truesize; | ||
128 | if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) | ||
129 | start = TCP_SKB_CB(skb)->seq; | ||
130 | if (after(TCP_SKB_CB(skb)->end_seq, end)) | ||
131 | @@ -4954,6 +4984,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | ||
132 | * 2) not add too big latencies if thousands of packets sit there. | ||
133 | * (But if application shrinks SO_RCVBUF, we could still end up | ||
134 | * freeing whole queue here) | ||
135 | + * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks. | ||
136 | * | ||
137 | * Return true if queue has shrunk. | ||
138 | */ | ||
139 | @@ -4961,20 +4992,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk) | ||
140 | { | ||
141 | struct tcp_sock *tp = tcp_sk(sk); | ||
142 | struct rb_node *node, *prev; | ||
143 | + int goal; | ||
144 | |||
145 | if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) | ||
146 | return false; | ||
147 | |||
148 | NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); | ||
149 | + goal = sk->sk_rcvbuf >> 3; | ||
150 | node = &tp->ooo_last_skb->rbnode; | ||
151 | do { | ||
152 | prev = rb_prev(node); | ||
153 | rb_erase(node, &tp->out_of_order_queue); | ||
154 | + goal -= rb_to_skb(node)->truesize; | ||
155 | tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode)); | ||
156 | - sk_mem_reclaim(sk); | ||
157 | - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && | ||
158 | - !tcp_under_memory_pressure(sk)) | ||
159 | - break; | ||
160 | + if (!prev || goal <= 0) { | ||
161 | + sk_mem_reclaim(sk); | ||
162 | + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && | ||
163 | + !tcp_under_memory_pressure(sk)) | ||
164 | + break; | ||
165 | + goal = sk->sk_rcvbuf >> 3; | ||
166 | + } | ||
167 | node = prev; | ||
168 | } while (node); | ||
169 | tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode); | ||
170 | @@ -5009,6 +5046,9 @@ static int tcp_prune_queue(struct sock *sk) | ||
171 | else if (tcp_under_memory_pressure(sk)) | ||
172 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); | ||
173 | |||
174 | + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) | ||
175 | + return 0; | ||
176 | + | ||
177 | tcp_collapse_ofo_queue(sk); | ||
178 | if (!skb_queue_empty(&sk->sk_receive_queue)) | ||
179 | tcp_collapse(sk, &sk->sk_receive_queue, NULL, | ||
180 | -- | ||
181 | |||
182 | |||