xen/32on64-shadow.patch

200 lines
7.5 KiB
Diff

Preliminary, likely incomplete and/or wrong, adjustments to shadow code.
unstable c/s 13296: [HVM] Fix shadow memory tracking
Fixes a missing free from cset 13275, and a missing prealloc.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
Index: 2007-01-31/xen/arch/x86/mm/shadow/common.c
===================================================================
--- 2007-01-31.orig/xen/arch/x86/mm/shadow/common.c 2007-01-31 09:31:33.000000000 +0100
+++ 2007-01-31/xen/arch/x86/mm/shadow/common.c 2007-01-31 09:36:44.000000000 +0100
@@ -2427,7 +2427,7 @@ void sh_update_paging_modes(struct vcpu
///
#if CONFIG_PAGING_LEVELS == 4
if ( pv_32bit_guest(v) )
- v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,3);
+ v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
else
v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
#elif CONFIG_PAGING_LEVELS == 3
Index: 2007-01-31/xen/arch/x86/mm/shadow/multi.c
===================================================================
--- 2007-01-31.orig/xen/arch/x86/mm/shadow/multi.c 2007-01-31 09:31:33.000000000 +0100
+++ 2007-01-31/xen/arch/x86/mm/shadow/multi.c 2007-01-31 09:36:54.000000000 +0100
@@ -1423,7 +1423,7 @@ void sh_install_xen_entries_in_l4(struct
}
#endif
-#if CONFIG_PAGING_LEVELS == 3 && GUEST_PAGING_LEVELS == 3
+#if (CONFIG_PAGING_LEVELS == 3 || defined(CONFIG_COMPAT)) && GUEST_PAGING_LEVELS == 3
// For 3-on-3 PV guests, we need to make sure the xen mappings are in
// place, which means that we need to populate the l2h entry in the l3
// table.
@@ -1433,12 +1433,20 @@ void sh_install_xen_entries_in_l2h(struc
{
struct domain *d = v->domain;
shadow_l2e_t *sl2e;
+#if CONFIG_PAGING_LEVELS == 3
int i;
+#else
+
+ if ( !pv_32bit_guest(v) )
+ return;
+#endif
sl2e = sh_map_domain_page(sl2hmfn);
ASSERT(sl2e != NULL);
ASSERT(sizeof (l2_pgentry_t) == sizeof (shadow_l2e_t));
+#if CONFIG_PAGING_LEVELS == 3
+
/* Copy the common Xen mappings from the idle domain */
memcpy(&sl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
&idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
@@ -1479,6 +1487,15 @@ void sh_install_xen_entries_in_l2h(struc
}
sh_unmap_domain_page(p2m);
}
+
+#else
+
+ /* Copy the common Xen mappings from the idle domain */
+ memcpy(&sl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
+ &compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
+ COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
+
+#endif
sh_unmap_domain_page(sl2e);
}
@@ -1639,12 +1656,15 @@ make_fl1_shadow(struct vcpu *v, gfn_t gf
mfn_t
sh_make_monitor_table(struct vcpu *v)
{
+ struct domain *d = v->domain;
ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
+ /* Guarantee we can get the memory we need */
+ shadow_prealloc(d, SHADOW_MAX_ORDER);
+
#if CONFIG_PAGING_LEVELS == 4
{
- struct domain *d = v->domain;
mfn_t m4mfn;
m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
sh_install_xen_entries_in_l4(v, m4mfn, m4mfn);
@@ -1661,6 +1681,19 @@ sh_make_monitor_table(struct vcpu *v)
l4e = sh_map_domain_page(m4mfn);
l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
sh_unmap_domain_page(l4e);
+ if ( pv_32bit_guest(v) )
+ {
+ // Install a monitor l2 table in slot 3 of the l3 table.
+ // This is used for all Xen entries.
+ mfn_t m2mfn;
+ l3_pgentry_t *l3e;
+ m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
+ mfn_to_page(m2mfn)->shadow_flags = 2;
+ l3e = sh_map_domain_page(m3mfn);
+ l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
+ sh_install_xen_entries_in_l2h(v, m2mfn);
+ sh_unmap_domain_page(l3e);
+ }
}
#endif /* SHADOW_PAGING_LEVELS < 4 */
return m4mfn;
@@ -1669,7 +1702,6 @@ sh_make_monitor_table(struct vcpu *v)
#elif CONFIG_PAGING_LEVELS == 3
{
- struct domain *d = v->domain;
mfn_t m3mfn, m2mfn;
l3_pgentry_t *l3e;
l2_pgentry_t *l2e;
@@ -1703,7 +1735,6 @@ sh_make_monitor_table(struct vcpu *v)
#elif CONFIG_PAGING_LEVELS == 2
{
- struct domain *d = v->domain;
mfn_t m2mfn;
m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
sh_install_xen_entries_in_l2(v, m2mfn, m2mfn);
@@ -2066,9 +2097,19 @@ void sh_destroy_monitor_table(struct vcp
#if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS != 4)
/* Need to destroy the l3 monitor page in slot 0 too */
{
+ mfn_t m3mfn;
l4_pgentry_t *l4e = sh_map_domain_page(mmfn);
ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
- shadow_free(d, _mfn(l4e_get_pfn(l4e[0])));
+ m3mfn = _mfn(l4e_get_pfn(l4e[0]));
+ if ( pv_32bit_guest(v) )
+ {
+ /* Need to destroy the l2 monitor page in slot 3 too */
+ l3_pgentry_t *l3e = sh_map_domain_page(m3mfn);
+ ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
+ shadow_free(d, _mfn(l3e_get_pfn(l3e[3])));
+ sh_unmap_domain_page(l3e);
+ }
+ shadow_free(d, m3mfn);
sh_unmap_domain_page(l4e);
}
#elif CONFIG_PAGING_LEVELS == 3
@@ -3048,12 +3089,15 @@ sh_update_linear_entries(struct vcpu *v)
#elif (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS == 3)
- /* This case only exists in HVM. To give ourselves a linear map of the
- * shadows, we need to extend a PAE shadow to 4 levels. We do this by
- * having a monitor l3 in slot 0 of the monitor l4 table, and
- * copying the PAE l3 entries into it. Then, by having the monitor l4e
- * for shadow pagetables also point to the monitor l4, we can use it
- * to access the shadows. */
+ /* PV: XXX
+ *
+ * HVM: To give ourselves a linear map of the shadows, we need to
+ * extend a PAE shadow to 4 levels. We do this by having a monitor
+ * l3 in slot 0 of the monitor l4 table, and copying the PAE l3
+ * entries into it. Then, by having the monitor l4e for shadow
+ * pagetables also point to the monitor l4, we can use it to access
+ * the shadows.
+ */
if ( shadow_mode_external(d) )
{
@@ -3096,6 +3140,8 @@ sh_update_linear_entries(struct vcpu *v)
if ( v != current )
sh_unmap_domain_page(ml3e);
}
+ else
+ domain_crash(d); /* XXX */
#elif CONFIG_PAGING_LEVELS == 3
Index: 2007-01-31/xen/include/asm-x86/shadow.h
===================================================================
--- 2007-01-31.orig/xen/include/asm-x86/shadow.h 2007-01-31 09:31:33.000000000 +0100
+++ 2007-01-31/xen/include/asm-x86/shadow.h 2007-01-31 09:36:44.000000000 +0100
@@ -70,9 +70,9 @@
// How do we tell that we have a 32-bit PV guest in a 64-bit Xen?
#ifdef __x86_64__
-#define pv_32bit_guest(_v) 0 // not yet supported
+#define pv_32bit_guest(_v) (!is_hvm_vcpu(_v) && IS_COMPAT((_v)->domain))
#else
-#define pv_32bit_guest(_v) !is_hvm_vcpu(v)
+#define pv_32bit_guest(_v) (!is_hvm_vcpu(_v))
#endif
/* The shadow lock.
@@ -418,7 +418,7 @@ static inline void update_cr3(struct vcp
}
#if CONFIG_PAGING_LEVELS == 4
- if ( !(v->arch.flags & TF_kernel_mode) )
+ if ( !(v->arch.flags & TF_kernel_mode) && !IS_COMPAT(v->domain) )
cr3_mfn = pagetable_get_pfn(v->arch.guest_table_user);
else
#endif