diff options
Diffstat (limited to 'patches/cve')
-rw-r--r-- | patches/cve/4.9.x.scc | 3 | ||||
-rw-r--r-- | patches/cve/CVE-2018-17182-mm-get-rid-of-vmacache_flush_all-entirely.patch | 175 |
2 files changed, 178 insertions, 0 deletions
diff --git a/patches/cve/4.9.x.scc b/patches/cve/4.9.x.scc index 7aec14a..c3eca4d 100644 --- a/patches/cve/4.9.x.scc +++ b/patches/cve/4.9.x.scc | |||
@@ -30,3 +30,6 @@ SRC_URI += "file://CVE-2018-5390-tcp-free-batches-of-packets-in-tcp_prune_ofo_qu | |||
30 | 30 | ||
31 | #CVEs fixed in 4.9.121: | 31 | #CVEs fixed in 4.9.121: |
32 | SRC_URI += "file://CVE-2018-9363-Bluetooth-hidp-buffer-overflow-in-hidp_process_repor.patch" | 32 | SRC_URI += "file://CVE-2018-9363-Bluetooth-hidp-buffer-overflow-in-hidp_process_repor.patch" |
33 | |||
34 | #CVEs fixed in 4.9.128: | ||
35 | SRC_URI += "file://CVE-2018-17182-mm-get-rid-of-vmacache_flush_all-entirely.patch" | ||
diff --git a/patches/cve/CVE-2018-17182-mm-get-rid-of-vmacache_flush_all-entirely.patch b/patches/cve/CVE-2018-17182-mm-get-rid-of-vmacache_flush_all-entirely.patch new file mode 100644 index 0000000..6d6c2ca --- /dev/null +++ b/patches/cve/CVE-2018-17182-mm-get-rid-of-vmacache_flush_all-entirely.patch | |||
@@ -0,0 +1,175 @@ | |||
1 | From 84580567f1f856d2c7a610273315852e345bc3ac Mon Sep 17 00:00:00 2001 | ||
2 | From: Linus Torvalds <torvalds@linux-foundation.org> | ||
3 | Date: Wed, 12 Sep 2018 23:57:48 -1000 | ||
4 | Subject: [PATCH] mm: get rid of vmacache_flush_all() entirely | ||
5 | |||
6 | commit 7a9cdebdcc17e426fb5287e4a82db1dfe86339b2 upstream. | ||
7 | |||
8 | Jann Horn points out that the vmacache_flush_all() function is not only | ||
9 | potentially expensive, it's buggy too. It also happens to be entirely | ||
10 | unnecessary, because the sequence number overflow case can be avoided by | ||
11 | simply making the sequence number be 64-bit. That doesn't even grow the | ||
12 | data structures in question, because the other adjacent fields are | ||
13 | already 64-bit. | ||
14 | |||
15 | So simplify the whole thing by just making the sequence number overflow | ||
16 | case go away entirely, which gets rid of all the complications and makes | ||
17 | the code faster too. Win-win. | ||
18 | |||
19 | [ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics | ||
20 | also just goes away entirely with this ] | ||
21 | |||
22 | CVE: CVE-2018-17182 | ||
23 | Upstream-Status: Backport [https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.9.y&id=84580567f1f856d2c7a610273315852e345bc3ac] | ||
24 | |||
25 | Reported-by: Jann Horn <jannh@google.com> | ||
26 | Suggested-by: Will Deacon <will.deacon@arm.com> | ||
27 | Acked-by: Davidlohr Bueso <dave@stgolabs.net> | ||
28 | Cc: Oleg Nesterov <oleg@redhat.com> | ||
29 | Cc: stable@kernel.org | ||
30 | Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | ||
31 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | ||
32 | Signed-off-by: Andreas Wellving <andreas.wellving@enea.com> | ||
33 | --- | ||
34 | include/linux/mm_types.h | 2 +- | ||
35 | include/linux/sched.h | 2 +- | ||
36 | include/linux/vm_event_item.h | 1 - | ||
37 | include/linux/vmacache.h | 5 ----- | ||
38 | mm/debug.c | 4 ++-- | ||
39 | mm/vmacache.c | 38 ----------------------------------- | ||
40 | 6 files changed, 4 insertions(+), 48 deletions(-) | ||
41 | |||
42 | diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h | ||
43 | index e8471c2ca83a..8d6decd50220 100644 | ||
44 | --- a/include/linux/mm_types.h | ||
45 | +++ b/include/linux/mm_types.h | ||
46 | @@ -396,7 +396,7 @@ struct kioctx_table; | ||
47 | struct mm_struct { | ||
48 | struct vm_area_struct *mmap; /* list of VMAs */ | ||
49 | struct rb_root mm_rb; | ||
50 | - u32 vmacache_seqnum; /* per-thread vmacache */ | ||
51 | + u64 vmacache_seqnum; /* per-thread vmacache */ | ||
52 | #ifdef CONFIG_MMU | ||
53 | unsigned long (*get_unmapped_area) (struct file *filp, | ||
54 | unsigned long addr, unsigned long len, | ||
55 | diff --git a/include/linux/sched.h b/include/linux/sched.h | ||
56 | index 1cc5723a7821..f4a551a5482c 100644 | ||
57 | --- a/include/linux/sched.h | ||
58 | +++ b/include/linux/sched.h | ||
59 | @@ -1559,7 +1559,7 @@ struct task_struct { | ||
60 | |||
61 | struct mm_struct *mm, *active_mm; | ||
62 | /* per-thread vma caching */ | ||
63 | - u32 vmacache_seqnum; | ||
64 | + u64 vmacache_seqnum; | ||
65 | struct vm_area_struct *vmacache[VMACACHE_SIZE]; | ||
66 | #if defined(SPLIT_RSS_COUNTING) | ||
67 | struct task_rss_stat rss_stat; | ||
68 | diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h | ||
69 | index 2edb150f1a4d..544cd50fbbd0 100644 | ||
70 | --- a/include/linux/vm_event_item.h | ||
71 | +++ b/include/linux/vm_event_item.h | ||
72 | @@ -97,7 +97,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | ||
73 | #ifdef CONFIG_DEBUG_VM_VMACACHE | ||
74 | VMACACHE_FIND_CALLS, | ||
75 | VMACACHE_FIND_HITS, | ||
76 | - VMACACHE_FULL_FLUSHES, | ||
77 | #endif | ||
78 | NR_VM_EVENT_ITEMS | ||
79 | }; | ||
80 | diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h | ||
81 | index c3fa0fd43949..4f58ff2dacd6 100644 | ||
82 | --- a/include/linux/vmacache.h | ||
83 | +++ b/include/linux/vmacache.h | ||
84 | @@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk) | ||
85 | memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); | ||
86 | } | ||
87 | |||
88 | -extern void vmacache_flush_all(struct mm_struct *mm); | ||
89 | extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); | ||
90 | extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, | ||
91 | unsigned long addr); | ||
92 | @@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, | ||
93 | static inline void vmacache_invalidate(struct mm_struct *mm) | ||
94 | { | ||
95 | mm->vmacache_seqnum++; | ||
96 | - | ||
97 | - /* deal with overflows */ | ||
98 | - if (unlikely(mm->vmacache_seqnum == 0)) | ||
99 | - vmacache_flush_all(mm); | ||
100 | } | ||
101 | |||
102 | #endif /* __LINUX_VMACACHE_H */ | ||
103 | diff --git a/mm/debug.c b/mm/debug.c | ||
104 | index 9feb699c5d25..bebe48aece6d 100644 | ||
105 | --- a/mm/debug.c | ||
106 | +++ b/mm/debug.c | ||
107 | @@ -95,7 +95,7 @@ EXPORT_SYMBOL(dump_vma); | ||
108 | |||
109 | void dump_mm(const struct mm_struct *mm) | ||
110 | { | ||
111 | - pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" | ||
112 | + pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n" | ||
113 | #ifdef CONFIG_MMU | ||
114 | "get_unmapped_area %p\n" | ||
115 | #endif | ||
116 | @@ -125,7 +125,7 @@ void dump_mm(const struct mm_struct *mm) | ||
117 | #endif | ||
118 | "def_flags: %#lx(%pGv)\n", | ||
119 | |||
120 | - mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, | ||
121 | + mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, | ||
122 | #ifdef CONFIG_MMU | ||
123 | mm->get_unmapped_area, | ||
124 | #endif | ||
125 | diff --git a/mm/vmacache.c b/mm/vmacache.c | ||
126 | index 035fdeb35b43..c9ca3dd46b97 100644 | ||
127 | --- a/mm/vmacache.c | ||
128 | +++ b/mm/vmacache.c | ||
129 | @@ -5,44 +5,6 @@ | ||
130 | #include <linux/mm.h> | ||
131 | #include <linux/vmacache.h> | ||
132 | |||
133 | -/* | ||
134 | - * Flush vma caches for threads that share a given mm. | ||
135 | - * | ||
136 | - * The operation is safe because the caller holds the mmap_sem | ||
137 | - * exclusively and other threads accessing the vma cache will | ||
138 | - * have mmap_sem held at least for read, so no extra locking | ||
139 | - * is required to maintain the vma cache. | ||
140 | - */ | ||
141 | -void vmacache_flush_all(struct mm_struct *mm) | ||
142 | -{ | ||
143 | - struct task_struct *g, *p; | ||
144 | - | ||
145 | - count_vm_vmacache_event(VMACACHE_FULL_FLUSHES); | ||
146 | - | ||
147 | - /* | ||
148 | - * Single threaded tasks need not iterate the entire | ||
149 | - * list of process. We can avoid the flushing as well | ||
150 | - * since the mm's seqnum was increased and don't have | ||
151 | - * to worry about other threads' seqnum. Current's | ||
152 | - * flush will occur upon the next lookup. | ||
153 | - */ | ||
154 | - if (atomic_read(&mm->mm_users) == 1) | ||
155 | - return; | ||
156 | - | ||
157 | - rcu_read_lock(); | ||
158 | - for_each_process_thread(g, p) { | ||
159 | - /* | ||
160 | - * Only flush the vmacache pointers as the | ||
161 | - * mm seqnum is already set and curr's will | ||
162 | - * be set upon invalidation when the next | ||
163 | - * lookup is done. | ||
164 | - */ | ||
165 | - if (mm == p->mm) | ||
166 | - vmacache_flush(p); | ||
167 | - } | ||
168 | - rcu_read_unlock(); | ||
169 | -} | ||
170 | - | ||
171 | /* | ||
172 | * This task may be accessing a foreign mm via (for example) | ||
173 | * get_user_pages()->find_vma(). The vmacache is task-local and this | ||
174 | |||
175 | |||