From 3f5221471424c3da63821c60ad720d793844e89e Mon Sep 17 00:00:00 2001 From: Christopher Clark Date: Mon, 8 Jan 2018 23:12:44 -0800 Subject: xen: upgrade 4.9.x recipe to 4.9.1 and apply XSA/CVE fix patches Upgrade the Xen 4.9.x series recipe to latest 4.9.1 and apply patches for: XSA-245 / CVE-2017-17046 XSA-246 / CVE-2017-17044 XSA-247 / CVE-2017-17045 XSA-248 / CVE-2017-17566 XSA-249 / CVE-2017-17563 XSA-250 / CVE-2017-17564 XSA-251 / CVE-2017-17565 Signed-off-by: Christopher Clark Signed-off-by: Bruce Ashfield --- recipes-extended/xen/files/xsa246-4.9.patch | 74 +++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 recipes-extended/xen/files/xsa246-4.9.patch (limited to 'recipes-extended/xen/files/xsa246-4.9.patch') diff --git a/recipes-extended/xen/files/xsa246-4.9.patch b/recipes-extended/xen/files/xsa246-4.9.patch new file mode 100644 index 00000000..6370a106 --- /dev/null +++ b/recipes-extended/xen/files/xsa246-4.9.patch @@ -0,0 +1,74 @@ +From: Julien Grall +Subject: x86/pod: prevent infinite loop when shattering large pages + +When populating pages, the PoD may need to split large ones using +p2m_set_entry and request the caller to retry (see ept_get_entry for +instance). + +p2m_set_entry may fail to shatter if it is not possible to allocate +memory for the new page table. However, the error is not propagated +resulting to the callers to retry infinitely the PoD. + +Prevent the infinite loop by return false when it is not possible to +shatter the large mapping. + +This is XSA-246. + +Signed-off-by: Julien Grall +Signed-off-by: Jan Beulich +Reviewed-by: George Dunlap + +--- a/xen/arch/x86/mm/p2m-pod.c ++++ b/xen/arch/x86/mm/p2m-pod.c +@@ -1071,9 +1071,8 @@ p2m_pod_demand_populate(struct p2m_domai + * NOTE: In a fine-grained p2m locking scenario this operation + * may need to promote its locking from gfn->1g superpage + */ +- p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M, +- p2m_populate_on_demand, p2m->default_access); +- return 0; ++ return p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M, ++ p2m_populate_on_demand, p2m->default_access); + } + + /* Only reclaim if we're in actual need of more cache. */ +@@ -1104,8 +1103,12 @@ p2m_pod_demand_populate(struct p2m_domai + + gfn_aligned = (gfn >> order) << order; + +- p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, +- p2m->default_access); ++ if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, ++ p2m->default_access) ) ++ { ++ p2m_pod_cache_add(p2m, p, order); ++ goto out_fail; ++ } + + for( i = 0; i < (1UL << order); i++ ) + { +@@ -1150,13 +1153,18 @@ remap_and_retry: + BUG_ON(order != PAGE_ORDER_2M); + pod_unlock(p2m); + +- /* Remap this 2-meg region in singleton chunks */ +- /* NOTE: In a p2m fine-grained lock scenario this might +- * need promoting the gfn lock from gfn->2M superpage */ ++ /* ++ * Remap this 2-meg region in singleton chunks. See the comment on the ++ * 1G page splitting path above for why a single call suffices. ++ * ++ * NOTE: In a p2m fine-grained lock scenario this might ++ * need promoting the gfn lock from gfn->2M superpage. ++ */ + gfn_aligned = (gfn>>order)<default_access); ++ if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K, ++ p2m_populate_on_demand, p2m->default_access) ) ++ return -1; ++ + if ( tb_init_done ) + { + struct { -- cgit v1.2.3-54-g00ecf