diff options
author | Christopher Clark <christopher.w.clark@gmail.com> | 2018-01-08 23:12:44 -0800 |
---|---|---|
committer | Bruce Ashfield <bruce.ashfield@windriver.com> | 2018-01-12 10:37:46 -0500 |
commit | 3f5221471424c3da63821c60ad720d793844e89e (patch) | |
tree | cffd5309d84c096daf8714af460922adf4011160 /recipes-extended/xen/files/xsa246-4.9.patch | |
parent | d1969606e3540d3771a5ba4626d4e5ea42bd683a (diff) | |
download | meta-virtualization-3f5221471424c3da63821c60ad720d793844e89e.tar.gz |
xen: upgrade 4.9.x recipe to 4.9.1 and apply XSA/CVE fix patches
Upgrade the Xen 4.9.x series recipe to latest 4.9.1
and apply patches for:
XSA-245 / CVE-2017-17046
XSA-246 / CVE-2017-17044
XSA-247 / CVE-2017-17045
XSA-248 / CVE-2017-17566
XSA-249 / CVE-2017-17563
XSA-250 / CVE-2017-17564
XSA-251 / CVE-2017-17565
Signed-off-by: Christopher Clark <christopher.clark6@baesystems.com>
Signed-off-by: Bruce Ashfield <bruce.ashfield@windriver.com>
Diffstat (limited to 'recipes-extended/xen/files/xsa246-4.9.patch')
-rw-r--r-- | recipes-extended/xen/files/xsa246-4.9.patch | 74 |
1 files changed, 74 insertions, 0 deletions
diff --git a/recipes-extended/xen/files/xsa246-4.9.patch b/recipes-extended/xen/files/xsa246-4.9.patch new file mode 100644 index 00000000..6370a106 --- /dev/null +++ b/recipes-extended/xen/files/xsa246-4.9.patch | |||
@@ -0,0 +1,74 @@ | |||
1 | From: Julien Grall <julien.grall@linaro.org> | ||
2 | Subject: x86/pod: prevent infinite loop when shattering large pages | ||
3 | |||
4 | When populating pages, the PoD may need to split large ones using | ||
5 | p2m_set_entry and request the caller to retry (see ept_get_entry for | ||
6 | instance). | ||
7 | |||
8 | p2m_set_entry may fail to shatter if it is not possible to allocate | ||
9 | memory for the new page table. However, the error is not propagated | ||
10 | resulting to the callers to retry infinitely the PoD. | ||
11 | |||
12 | Prevent the infinite loop by return false when it is not possible to | ||
13 | shatter the large mapping. | ||
14 | |||
15 | This is XSA-246. | ||
16 | |||
17 | Signed-off-by: Julien Grall <julien.grall@linaro.org> | ||
18 | Signed-off-by: Jan Beulich <jbeulich@suse.com> | ||
19 | Reviewed-by: George Dunlap <george.dunlap@citrix.com> | ||
20 | |||
21 | --- a/xen/arch/x86/mm/p2m-pod.c | ||
22 | +++ b/xen/arch/x86/mm/p2m-pod.c | ||
23 | @@ -1071,9 +1071,8 @@ p2m_pod_demand_populate(struct p2m_domai | ||
24 | * NOTE: In a fine-grained p2m locking scenario this operation | ||
25 | * may need to promote its locking from gfn->1g superpage | ||
26 | */ | ||
27 | - p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M, | ||
28 | - p2m_populate_on_demand, p2m->default_access); | ||
29 | - return 0; | ||
30 | + return p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M, | ||
31 | + p2m_populate_on_demand, p2m->default_access); | ||
32 | } | ||
33 | |||
34 | /* Only reclaim if we're in actual need of more cache. */ | ||
35 | @@ -1104,8 +1103,12 @@ p2m_pod_demand_populate(struct p2m_domai | ||
36 | |||
37 | gfn_aligned = (gfn >> order) << order; | ||
38 | |||
39 | - p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, | ||
40 | - p2m->default_access); | ||
41 | + if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, | ||
42 | + p2m->default_access) ) | ||
43 | + { | ||
44 | + p2m_pod_cache_add(p2m, p, order); | ||
45 | + goto out_fail; | ||
46 | + } | ||
47 | |||
48 | for( i = 0; i < (1UL << order); i++ ) | ||
49 | { | ||
50 | @@ -1150,13 +1153,18 @@ remap_and_retry: | ||
51 | BUG_ON(order != PAGE_ORDER_2M); | ||
52 | pod_unlock(p2m); | ||
53 | |||
54 | - /* Remap this 2-meg region in singleton chunks */ | ||
55 | - /* NOTE: In a p2m fine-grained lock scenario this might | ||
56 | - * need promoting the gfn lock from gfn->2M superpage */ | ||
57 | + /* | ||
58 | + * Remap this 2-meg region in singleton chunks. See the comment on the | ||
59 | + * 1G page splitting path above for why a single call suffices. | ||
60 | + * | ||
61 | + * NOTE: In a p2m fine-grained lock scenario this might | ||
62 | + * need promoting the gfn lock from gfn->2M superpage. | ||
63 | + */ | ||
64 | gfn_aligned = (gfn>>order)<<order; | ||
65 | - for(i=0; i<(1<<order); i++) | ||
66 | - p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K, | ||
67 | - p2m_populate_on_demand, p2m->default_access); | ||
68 | + if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K, | ||
69 | + p2m_populate_on_demand, p2m->default_access) ) | ||
70 | + return -1; | ||
71 | + | ||
72 | if ( tb_init_done ) | ||
73 | { | ||
74 | struct { | ||