summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRoger Pau Monné <roger.pau@citrix.com>2020-09-11 14:15:26 +0200
committerJan Beulich <jbeulich@suse.com>2020-09-11 14:15:26 +0200
commite5a1b6f0d2070c7a03f0f2cff5126a5fea94cc4d (patch)
treee821e81e670061c0f8eac5e58a10a455f28a1b15
parentc9476c4ad72e8a1842d713c74843034c7ec6eb51 (diff)
x86/mm: do not mark IO regions as Xen heap
arch_init_memory will treat all the gaps on the physical memory map between RAM regions as MMIO and use share_xen_page_with_guest in order to assign them to dom_io. This has the side effect of setting the Xen heap flag on such pages, and thus is_special_page would then return true which is an issue in epte_get_entry_emt because such pages will be forced to use write-back cache attributes. Fix this by introducing a new helper to assign the MMIO regions to dom_io without setting the Xen heap flag on the pages, so that is_special_page will return false and the pages won't be forced to use write-back cache attributes. Fixes: 81fd0d3ca4b2cd ('x86/hvm: simplify 'mmio_direct' check in epte_get_entry_emt()') Suggested-by: Jan Beulich <jbeulich@suse.com> Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com>
-rw-r--r--xen/arch/x86/mm.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 56bf7add2b..42a6dc9ba4 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -271,6 +271,23 @@ static l4_pgentry_t __read_mostly split_l4e;
#define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS
#endif
+/*
+ * Originally cloned from share_xen_page_with_guest(), just to avoid setting
+ * PGC_xen_heap on non-heap (typically) MMIO pages. Other pieces got dropped
+ * simply because they're not needed in this context.
+ */
+static void __init assign_io_page(struct page_info *page)
+{
+ set_gpfn_from_mfn(mfn_x(page_to_mfn(page)), INVALID_M2P_ENTRY);
+
+ /* The incremented type count pins as writable. */
+ page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
+
+ page_set_owner(page, dom_io);
+
+ page->count_info |= PGC_allocated | 1;
+}
+
void __init arch_init_memory(void)
{
unsigned long i, pfn, rstart_pfn, rend_pfn, iostart_pfn, ioend_pfn;
@@ -291,7 +308,7 @@ void __init arch_init_memory(void)
*/
BUG_ON(pvh_boot && trampoline_phys != 0x1000);
for ( i = 0; i < 0x100; i++ )
- share_xen_page_with_guest(mfn_to_page(_mfn(i)), dom_io, SHARE_rw);
+ assign_io_page(mfn_to_page(_mfn(i)));
/* Any areas not specified as RAM by the e820 map are considered I/O. */
for ( i = 0, pfn = 0; pfn < max_page; i++ )
@@ -332,7 +349,7 @@ void __init arch_init_memory(void)
if ( !mfn_valid(_mfn(pfn)) )
continue;
- share_xen_page_with_guest(mfn_to_page(_mfn(pfn)), dom_io, SHARE_rw);
+ assign_io_page(mfn_to_page(_mfn(pfn)));
}
/* Skip the RAM region. */
@@ -477,6 +494,8 @@ unsigned long domain_get_maximum_gpfn(struct domain *d)
void share_xen_page_with_guest(struct page_info *page, struct domain *d,
enum XENSHARE_flags flags)
{
+ ASSERT(d != dom_io); /* Should use assign_io_page(). */
+
if ( page_get_owner(page) == d )
return;