794 lines
35 KiB
Diff
794 lines
35 KiB
Diff
# HG changeset 13995 patch
|
|
# User Emmanuel Ackaouy <ack@xensource.com>
|
|
# Node ID 9c2e6f8f3aa7a4e2a1f3af3204789568edf975cd
|
|
# Parent 0b882c911b885a51308eee3ec80bc4a5a230d7ce
|
|
[XEN] 32on64 shadowing / live migration support for PV PAE compat guests
|
|
PAE compat guests on 64bit hypervisors are shadowed
|
|
using 4-on-4 with special handling for the top level
|
|
L4 page and the L2E M2P mappings.
|
|
|
|
Signed-off-by: Emmanuel Ackaouy <ack@xensource.com>
|
|
|
|
Index: 2007-02-20/xen/arch/x86/mm.c
|
|
===================================================================
|
|
--- 2007-02-20.orig/xen/arch/x86/mm.c 2007-02-20 11:01:49.000000000 +0100
|
|
+++ 2007-02-20/xen/arch/x86/mm.c 2007-02-20 11:01:50.000000000 +0100
|
|
@@ -1095,7 +1095,7 @@ static int alloc_l4_table(struct page_in
|
|
|
|
for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
|
|
{
|
|
- if ( is_guest_l4_slot(i) &&
|
|
+ if ( is_guest_l4_slot(d, i) &&
|
|
unlikely(!get_page_from_l4e(pl4e[i], pfn, d)) )
|
|
goto fail;
|
|
|
|
@@ -1123,7 +1123,7 @@ static int alloc_l4_table(struct page_in
|
|
fail:
|
|
MEM_LOG("Failure in alloc_l4_table: entry %d", i);
|
|
while ( i-- > 0 )
|
|
- if ( is_guest_l4_slot(i) )
|
|
+ if ( is_guest_l4_slot(d, i) )
|
|
put_page_from_l4e(pl4e[i], pfn);
|
|
|
|
return 0;
|
|
@@ -1198,12 +1198,13 @@ static void free_l3_table(struct page_in
|
|
|
|
static void free_l4_table(struct page_info *page)
|
|
{
|
|
+ struct domain *d = page_get_owner(page);
|
|
unsigned long pfn = page_to_mfn(page);
|
|
l4_pgentry_t *pl4e = page_to_virt(page);
|
|
int i;
|
|
|
|
for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
|
|
- if ( is_guest_l4_slot(i) )
|
|
+ if ( is_guest_l4_slot(d, i) )
|
|
put_page_from_l4e(pl4e[i], pfn);
|
|
}
|
|
|
|
@@ -1463,13 +1464,14 @@ static int mod_l3_entry(l3_pgentry_t *pl
|
|
#if CONFIG_PAGING_LEVELS >= 4
|
|
|
|
/* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */
|
|
-static int mod_l4_entry(l4_pgentry_t *pl4e,
|
|
+static int mod_l4_entry(struct domain *d,
|
|
+ l4_pgentry_t *pl4e,
|
|
l4_pgentry_t nl4e,
|
|
unsigned long pfn)
|
|
{
|
|
l4_pgentry_t ol4e;
|
|
|
|
- if ( unlikely(!is_guest_l4_slot(pgentry_ptr_to_slot(pl4e))) )
|
|
+ if ( unlikely(!is_guest_l4_slot(d, pgentry_ptr_to_slot(pl4e))) )
|
|
{
|
|
MEM_LOG("Illegal L4 update attempt in Xen-private area %p", pl4e);
|
|
return 0;
|
|
@@ -1771,8 +1773,10 @@ int new_guest_cr3(unsigned long mfn)
|
|
put_page(mfn_to_page(old_base_mfn));
|
|
}
|
|
else
|
|
- okay = mod_l4_entry(__va(pagetable_get_paddr(v->arch.guest_table)),
|
|
- l4e, 0);
|
|
+ okay = mod_l4_entry(d,
|
|
+ __va(pagetable_get_paddr(v->arch.guest_table)),
|
|
+ l4e,
|
|
+ pagetable_get_pfn(v->arch.guest_table));
|
|
if ( unlikely(!okay) )
|
|
{
|
|
MEM_LOG("Error while installing new compat baseptr %lx", mfn);
|
|
@@ -2360,7 +2364,7 @@ int do_mmu_update(
|
|
if ( !IS_COMPAT(FOREIGNDOM) )
|
|
{
|
|
l4_pgentry_t l4e = l4e_from_intpte(req.val);
|
|
- okay = mod_l4_entry(va, l4e, mfn);
|
|
+ okay = mod_l4_entry(d, va, l4e, mfn);
|
|
}
|
|
break;
|
|
#endif
|
|
Index: 2007-02-20/xen/arch/x86/mm/shadow/common.c
|
|
===================================================================
|
|
--- 2007-02-20.orig/xen/arch/x86/mm/shadow/common.c 2007-02-20 11:01:43.000000000 +0100
|
|
+++ 2007-02-20/xen/arch/x86/mm/shadow/common.c 2007-02-20 11:02:03.000000000 +0100
|
|
@@ -472,7 +472,11 @@ void shadow_demote(struct vcpu *v, mfn_t
|
|
{
|
|
struct page_info *page = mfn_to_page(gmfn);
|
|
|
|
- ASSERT(test_bit(_PGC_page_table, &page->count_info));
|
|
+#ifdef CONFIG_COMPAT
|
|
+ if ( !IS_COMPAT(v->domain) || type != SH_type_l4_64_shadow )
|
|
+#endif
|
|
+ ASSERT(test_bit(_PGC_page_table, &page->count_info));
|
|
+
|
|
ASSERT(test_bit(type, &page->shadow_flags));
|
|
|
|
clear_bit(type, &page->shadow_flags);
|
|
@@ -555,6 +559,9 @@ __shadow_validate_guest_entry(struct vcp
|
|
if ( page->shadow_flags & SHF_L2_64 )
|
|
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 4, 4)
|
|
(v, gmfn, entry, size);
|
|
+ if ( page->shadow_flags & SHF_L2H_64 )
|
|
+ result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 4, 4)
|
|
+ (v, gmfn, entry, size);
|
|
if ( page->shadow_flags & SHF_L3_64 )
|
|
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, 4, 4)
|
|
(v, gmfn, entry, size);
|
|
@@ -563,7 +570,7 @@ __shadow_validate_guest_entry(struct vcp
|
|
(v, gmfn, entry, size);
|
|
#else /* 32-bit/PAE hypervisor does not support 64-bit guests */
|
|
ASSERT((page->shadow_flags
|
|
- & (SHF_L4_64|SHF_L3_64|SHF_L2_64|SHF_L1_64)) == 0);
|
|
+ & (SHF_L4_64|SHF_L3_64|SHF_L2H_64|SHF_L2_64|SHF_L1_64)) == 0);
|
|
#endif
|
|
|
|
return result;
|
|
@@ -674,7 +681,7 @@ static inline u32
|
|
shadow_order(unsigned int shadow_type)
|
|
{
|
|
#if CONFIG_PAGING_LEVELS > 2
|
|
- static const u32 type_to_order[16] = {
|
|
+ static const u32 type_to_order[SH_type_unused] = {
|
|
0, /* SH_type_none */
|
|
1, /* SH_type_l1_32_shadow */
|
|
1, /* SH_type_fl1_32_shadow */
|
|
@@ -686,12 +693,13 @@ shadow_order(unsigned int shadow_type)
|
|
0, /* SH_type_l1_64_shadow */
|
|
0, /* SH_type_fl1_64_shadow */
|
|
0, /* SH_type_l2_64_shadow */
|
|
+ 0, /* SH_type_l2h_64_shadow */
|
|
0, /* SH_type_l3_64_shadow */
|
|
0, /* SH_type_l4_64_shadow */
|
|
2, /* SH_type_p2m_table */
|
|
0 /* SH_type_monitor_table */
|
|
};
|
|
- ASSERT(shadow_type < 16);
|
|
+ ASSERT(shadow_type < SH_type_unused);
|
|
return type_to_order[shadow_type];
|
|
#else /* 32-bit Xen only ever shadows 32-bit guests on 32-bit shadows. */
|
|
return 0;
|
|
@@ -1849,6 +1857,9 @@ void sh_destroy_shadow(struct vcpu *v, m
|
|
t == SH_type_fl1_pae_shadow ||
|
|
t == SH_type_fl1_64_shadow ||
|
|
t == SH_type_monitor_table ||
|
|
+#ifdef CONFIG_COMPAT
|
|
+ (IS_COMPAT(v->domain) && t == SH_type_l4_64_shadow) ||
|
|
+#endif
|
|
(page_get_owner(mfn_to_page(_mfn(sp->backpointer)))
|
|
== v->domain));
|
|
|
|
@@ -1890,6 +1901,8 @@ void sh_destroy_shadow(struct vcpu *v, m
|
|
case SH_type_fl1_64_shadow:
|
|
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4, 4)(v, smfn);
|
|
break;
|
|
+ case SH_type_l2h_64_shadow:
|
|
+ ASSERT( IS_COMPAT(v->domain) );
|
|
case SH_type_l2_64_shadow:
|
|
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4, 4)(v, smfn);
|
|
break;
|
|
@@ -1918,7 +1931,7 @@ int shadow_remove_write_access(struct vc
|
|
unsigned long fault_addr)
|
|
{
|
|
/* Dispatch table for getting per-type functions */
|
|
- static hash_callback_t callbacks[16] = {
|
|
+ static hash_callback_t callbacks[SH_type_unused] = {
|
|
NULL, /* none */
|
|
#if CONFIG_PAGING_LEVELS == 2
|
|
SHADOW_INTERNAL_NAME(sh_remove_write_access,2,2), /* l1_32 */
|
|
@@ -1945,6 +1958,7 @@ int shadow_remove_write_access(struct vc
|
|
NULL, /* fl1_64 */
|
|
#endif
|
|
NULL, /* l2_64 */
|
|
+ NULL, /* l2h_64 */
|
|
NULL, /* l3_64 */
|
|
NULL, /* l4_64 */
|
|
NULL, /* p2m */
|
|
@@ -2107,7 +2121,7 @@ int shadow_remove_all_mappings(struct vc
|
|
int expected_count;
|
|
|
|
/* Dispatch table for getting per-type functions */
|
|
- static hash_callback_t callbacks[16] = {
|
|
+ static hash_callback_t callbacks[SH_type_unused] = {
|
|
NULL, /* none */
|
|
#if CONFIG_PAGING_LEVELS == 2
|
|
SHADOW_INTERNAL_NAME(sh_remove_all_mappings,2,2), /* l1_32 */
|
|
@@ -2134,6 +2148,7 @@ int shadow_remove_all_mappings(struct vc
|
|
NULL, /* fl1_64 */
|
|
#endif
|
|
NULL, /* l2_64 */
|
|
+ NULL, /* l2h_64 */
|
|
NULL, /* l3_64 */
|
|
NULL, /* l4_64 */
|
|
NULL, /* p2m */
|
|
@@ -2233,6 +2248,7 @@ static int sh_remove_shadow_via_pointer(
|
|
#if CONFIG_PAGING_LEVELS >= 4
|
|
case SH_type_l1_64_shadow:
|
|
case SH_type_l2_64_shadow:
|
|
+ case SH_type_l2h_64_shadow:
|
|
case SH_type_l3_64_shadow:
|
|
case SH_type_l4_64_shadow:
|
|
SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,4,4)(v, vaddr, pmfn);
|
|
@@ -2267,7 +2283,7 @@ void sh_remove_shadows(struct vcpu *v, m
|
|
|
|
/* Dispatch table for getting per-type functions: each level must
|
|
* be called with the function to remove a lower-level shadow. */
|
|
- static hash_callback_t callbacks[16] = {
|
|
+ static hash_callback_t callbacks[SH_type_unused] = {
|
|
NULL, /* none */
|
|
NULL, /* l1_32 */
|
|
NULL, /* fl1_32 */
|
|
@@ -2289,10 +2305,12 @@ void sh_remove_shadows(struct vcpu *v, m
|
|
NULL, /* fl1_64 */
|
|
#if CONFIG_PAGING_LEVELS >= 4
|
|
SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,4,4), /* l2_64 */
|
|
+ SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,4,4), /* l2h_64 */
|
|
SHADOW_INTERNAL_NAME(sh_remove_l2_shadow,4,4), /* l3_64 */
|
|
SHADOW_INTERNAL_NAME(sh_remove_l3_shadow,4,4), /* l4_64 */
|
|
#else
|
|
NULL, /* l2_64 */
|
|
+ NULL, /* l2h_64 */
|
|
NULL, /* l3_64 */
|
|
NULL, /* l4_64 */
|
|
#endif
|
|
@@ -2301,7 +2319,7 @@ void sh_remove_shadows(struct vcpu *v, m
|
|
};
|
|
|
|
/* Another lookup table, for choosing which mask to use */
|
|
- static unsigned int masks[16] = {
|
|
+ static unsigned int masks[SH_type_unused] = {
|
|
0, /* none */
|
|
1 << SH_type_l2_32_shadow, /* l1_32 */
|
|
0, /* fl1_32 */
|
|
@@ -2311,9 +2329,11 @@ void sh_remove_shadows(struct vcpu *v, m
|
|
0, /* fl1_pae */
|
|
0, /* l2_pae */
|
|
0, /* l2h_pae */
|
|
- 1 << SH_type_l2_64_shadow, /* l1_64 */
|
|
+ ((1 << SH_type_l2h_64_shadow)
|
|
+ | (1 << SH_type_l2_64_shadow)), /* l1_64 */
|
|
0, /* fl1_64 */
|
|
1 << SH_type_l3_64_shadow, /* l2_64 */
|
|
+ 1 << SH_type_l3_64_shadow, /* l2h_64 */
|
|
1 << SH_type_l4_64_shadow, /* l3_64 */
|
|
0, /* l4_64 */
|
|
0, /* p2m */
|
|
@@ -2360,6 +2380,7 @@ void sh_remove_shadows(struct vcpu *v, m
|
|
#if CONFIG_PAGING_LEVELS >= 4
|
|
if ( sh_flags & SHF_L1_64 ) DO_UNSHADOW(SH_type_l1_64_shadow);
|
|
if ( sh_flags & SHF_L2_64 ) DO_UNSHADOW(SH_type_l2_64_shadow);
|
|
+ if ( sh_flags & SHF_L2H_64 ) DO_UNSHADOW(SH_type_l2h_64_shadow);
|
|
if ( sh_flags & SHF_L3_64 ) DO_UNSHADOW(SH_type_l3_64_shadow);
|
|
if ( sh_flags & SHF_L4_64 ) DO_UNSHADOW(SH_type_l4_64_shadow);
|
|
#endif
|
|
@@ -2426,10 +2447,7 @@ void sh_update_paging_modes(struct vcpu
|
|
/// PV guest
|
|
///
|
|
#if CONFIG_PAGING_LEVELS == 4
|
|
- if ( pv_32bit_guest(v) )
|
|
- v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
|
|
- else
|
|
- v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
|
|
+ v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
|
|
#elif CONFIG_PAGING_LEVELS == 3
|
|
v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
|
|
#elif CONFIG_PAGING_LEVELS == 2
|
|
@@ -2938,6 +2956,11 @@ static int shadow_log_dirty_enable(struc
|
|
goto out;
|
|
}
|
|
|
|
+#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
|
|
+ if ( IS_COMPAT(d) )
|
|
+ d->arch.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
|
|
+#endif
|
|
+
|
|
ret = sh_alloc_log_dirty_bitmap(d);
|
|
if ( ret != 0 )
|
|
{
|
|
@@ -3290,7 +3313,7 @@ int shadow_domctl(struct domain *d,
|
|
void shadow_audit_tables(struct vcpu *v)
|
|
{
|
|
/* Dispatch table for getting per-type functions */
|
|
- static hash_callback_t callbacks[16] = {
|
|
+ static hash_callback_t callbacks[SH_type_unused] = {
|
|
NULL, /* none */
|
|
#if CONFIG_PAGING_LEVELS == 2
|
|
SHADOW_INTERNAL_NAME(sh_audit_l1_table,2,2), /* l1_32 */
|
|
@@ -3308,6 +3331,7 @@ void shadow_audit_tables(struct vcpu *v)
|
|
SHADOW_INTERNAL_NAME(sh_audit_l1_table,4,4), /* l1_64 */
|
|
SHADOW_INTERNAL_NAME(sh_audit_fl1_table,4,4), /* fl1_64 */
|
|
SHADOW_INTERNAL_NAME(sh_audit_l2_table,4,4), /* l2_64 */
|
|
+ SHADOW_INTERNAL_NAME(sh_audit_l2_table,4,4), /* l2h_64 */
|
|
SHADOW_INTERNAL_NAME(sh_audit_l3_table,4,4), /* l3_64 */
|
|
SHADOW_INTERNAL_NAME(sh_audit_l4_table,4,4), /* l4_64 */
|
|
#endif /* CONFIG_PAGING_LEVELS >= 4 */
|
|
@@ -3330,7 +3354,7 @@ void shadow_audit_tables(struct vcpu *v)
|
|
case 3: mask = (SHF_L1_PAE|SHF_FL1_PAE|SHF_L2_PAE
|
|
|SHF_L2H_PAE); break;
|
|
case 4: mask = (SHF_L1_64|SHF_FL1_64|SHF_L2_64
|
|
- |SHF_L3_64|SHF_L4_64); break;
|
|
+ |SHF_L2H_64|SHF_L3_64|SHF_L4_64); break;
|
|
default: BUG();
|
|
}
|
|
}
|
|
Index: 2007-02-20/xen/arch/x86/mm/shadow/multi.c
|
|
===================================================================
|
|
--- 2007-02-20.orig/xen/arch/x86/mm/shadow/multi.c 2007-02-20 11:01:43.000000000 +0100
|
|
+++ 2007-02-20/xen/arch/x86/mm/shadow/multi.c 2007-02-20 11:01:50.000000000 +0100
|
|
@@ -162,8 +162,13 @@ set_shadow_status(struct vcpu *v, mfn_t
|
|
else
|
|
mfn_to_shadow_page(smfn)->logdirty = 0;
|
|
|
|
- res = get_page(mfn_to_page(gmfn), d);
|
|
- ASSERT(res == 1);
|
|
+#ifdef CONFIG_COMPAT
|
|
+ if ( !IS_COMPAT(d) || shadow_type != SH_type_l4_64_shadow )
|
|
+#endif
|
|
+ {
|
|
+ res = get_page(mfn_to_page(gmfn), d);
|
|
+ ASSERT(res == 1);
|
|
+ }
|
|
|
|
shadow_hash_insert(v, mfn_x(gmfn), shadow_type, smfn);
|
|
}
|
|
@@ -185,7 +190,10 @@ delete_shadow_status(struct vcpu *v, mfn
|
|
v->domain->domain_id, v->vcpu_id,
|
|
mfn_x(gmfn), shadow_type, mfn_x(smfn));
|
|
shadow_hash_delete(v, mfn_x(gmfn), shadow_type, smfn);
|
|
- put_page(mfn_to_page(gmfn));
|
|
+#ifdef CONFIG_COMPAT
|
|
+ if ( !IS_COMPAT(v->domain) || shadow_type != SH_type_l4_64_shadow )
|
|
+#endif
|
|
+ put_page(mfn_to_page(gmfn));
|
|
}
|
|
|
|
/**************************************************************************/
|
|
@@ -764,7 +772,7 @@ _sh_propagate(struct vcpu *v,
|
|
// PV guests in 64-bit mode use two different page tables for user vs
|
|
// supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
|
|
// It is always shadowed as present...
|
|
- if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_domain(d) )
|
|
+ if ( (GUEST_PAGING_LEVELS == 4) && !IS_COMPAT(d) && !is_hvm_domain(d) )
|
|
{
|
|
sflags |= _PAGE_USER;
|
|
}
|
|
@@ -1235,9 +1243,10 @@ do {
|
|
#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
|
|
|
|
/* 32-bit l2 on PAE/64: four pages, touch every second entry, and avoid Xen */
|
|
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
|
|
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \
|
|
do { \
|
|
int _i, _j, __done = 0; \
|
|
+ int _xen = !shadow_mode_external(_dom); \
|
|
ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow); \
|
|
for ( _j = 0; _j < 4 && !__done; _j++ ) \
|
|
{ \
|
|
@@ -1261,9 +1270,10 @@ do {
|
|
#elif GUEST_PAGING_LEVELS == 2
|
|
|
|
/* 32-bit on 32-bit: avoid Xen entries */
|
|
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
|
|
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \
|
|
do { \
|
|
int _i; \
|
|
+ int _xen = !shadow_mode_external(_dom); \
|
|
shadow_l2e_t *_sp = map_shadow_page((_sl2mfn)); \
|
|
ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow); \
|
|
for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \
|
|
@@ -1283,9 +1293,10 @@ do {
|
|
#elif GUEST_PAGING_LEVELS == 3
|
|
|
|
/* PAE: if it's an l2h, don't touch Xen mappings */
|
|
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
|
|
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \
|
|
do { \
|
|
int _i; \
|
|
+ int _xen = !shadow_mode_external(_dom); \
|
|
shadow_l2e_t *_sp = map_shadow_page((_sl2mfn)); \
|
|
ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_pae_shadow \
|
|
|| mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_pae_shadow);\
|
|
@@ -1306,21 +1317,29 @@ do {
|
|
|
|
#else
|
|
|
|
-/* 64-bit l2: touch all entries */
|
|
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
|
|
-do { \
|
|
- int _i; \
|
|
- shadow_l2e_t *_sp = map_shadow_page((_sl2mfn)); \
|
|
- ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow); \
|
|
- for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \
|
|
- { \
|
|
- (_sl2e) = _sp + _i; \
|
|
- if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \
|
|
- {_code} \
|
|
- if ( _done ) break; \
|
|
- increment_ptr_to_guest_entry(_gl2p); \
|
|
- } \
|
|
- unmap_shadow_page(_sp); \
|
|
+/* 64-bit l2: touch all entries except for PAE compat guests. */
|
|
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \
|
|
+do { \
|
|
+ int _i; \
|
|
+ int _xen = !shadow_mode_external(_dom); \
|
|
+ shadow_l2e_t *_sp = map_shadow_page((_sl2mfn)); \
|
|
+ ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow || \
|
|
+ mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_64_shadow); \
|
|
+ for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \
|
|
+ { \
|
|
+ if ( (!(_xen)) \
|
|
+ || !IS_COMPAT(_dom) \
|
|
+ || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_64_shadow \
|
|
+ || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) ) \
|
|
+ { \
|
|
+ (_sl2e) = _sp + _i; \
|
|
+ if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \
|
|
+ {_code} \
|
|
+ if ( _done ) break; \
|
|
+ increment_ptr_to_guest_entry(_gl2p); \
|
|
+ } \
|
|
+ } \
|
|
+ unmap_shadow_page(_sp); \
|
|
} while (0)
|
|
|
|
#endif /* different kinds of l2 */
|
|
@@ -1345,14 +1364,15 @@ do {
|
|
} while (0)
|
|
|
|
/* 64-bit l4: avoid Xen mappings */
|
|
-#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _xen, _code) \
|
|
+#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _dom, _code) \
|
|
do { \
|
|
- int _i; \
|
|
shadow_l4e_t *_sp = map_shadow_page((_sl4mfn)); \
|
|
+ int _xen = !shadow_mode_external(_dom); \
|
|
+ int _i; \
|
|
ASSERT(mfn_to_shadow_page(_sl4mfn)->type == SH_type_l4_64_shadow); \
|
|
for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ ) \
|
|
{ \
|
|
- if ( (!(_xen)) || is_guest_l4_slot(_i) ) \
|
|
+ if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) ) \
|
|
{ \
|
|
(_sl4e) = _sp + _i; \
|
|
if ( shadow_l4e_get_flags(*(_sl4e)) & _PAGE_PRESENT ) \
|
|
@@ -1419,17 +1439,25 @@ void sh_install_xen_entries_in_l4(struct
|
|
__PAGE_HYPERVISOR);
|
|
}
|
|
|
|
+ if ( IS_COMPAT(v->domain) )
|
|
+ {
|
|
+ /* install compat arg xlat entry */
|
|
+ sl4e[shadow_l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
|
|
+ shadow_l4e_from_mfn(
|
|
+ page_to_mfn(virt_to_page(d->arch.mm_arg_xlat_l3)),
|
|
+ __PAGE_HYPERVISOR);
|
|
+ }
|
|
+
|
|
sh_unmap_domain_page(sl4e);
|
|
}
|
|
#endif
|
|
|
|
-#if (CONFIG_PAGING_LEVELS == 3 || defined(CONFIG_COMPAT)) && GUEST_PAGING_LEVELS == 3
|
|
+#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3
|
|
// For 3-on-3 PV guests, we need to make sure the xen mappings are in
|
|
// place, which means that we need to populate the l2h entry in the l3
|
|
// table.
|
|
|
|
-void sh_install_xen_entries_in_l2h(struct vcpu *v,
|
|
- mfn_t sl2hmfn)
|
|
+static void sh_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn)
|
|
{
|
|
struct domain *d = v->domain;
|
|
shadow_l2e_t *sl2e;
|
|
@@ -1491,9 +1519,10 @@ void sh_install_xen_entries_in_l2h(struc
|
|
#else
|
|
|
|
/* Copy the common Xen mappings from the idle domain */
|
|
- memcpy(&sl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
|
|
- &compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
|
|
- COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
|
|
+ memcpy(
|
|
+ &sl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
|
|
+ &compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
|
|
+ COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
|
|
|
|
#endif
|
|
|
|
@@ -1619,8 +1648,11 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
|
|
case SH_type_l4_shadow:
|
|
sh_install_xen_entries_in_l4(v, gmfn, smfn); break;
|
|
#endif
|
|
-#if CONFIG_PAGING_LEVELS == 3 && GUEST_PAGING_LEVELS == 3
|
|
+#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3
|
|
case SH_type_l2h_shadow:
|
|
+#ifdef CONFIG_COMPAT
|
|
+ ASSERT( IS_COMPAT(v->domain) );
|
|
+#endif
|
|
sh_install_xen_entries_in_l2h(v, smfn); break;
|
|
#endif
|
|
#if CONFIG_PAGING_LEVELS == 2 && GUEST_PAGING_LEVELS == 2
|
|
@@ -1834,12 +1866,21 @@ static shadow_l2e_t * shadow_get_and_cre
|
|
{
|
|
int r;
|
|
shadow_l3e_t new_sl3e;
|
|
+ unsigned int t = SH_type_l2_shadow;
|
|
+
|
|
+#ifdef CONFIG_COMPAT
|
|
+ /* Tag compat L2 containing hypervisor (m2p) mappings */
|
|
+ if ( IS_COMPAT(v->domain) &&
|
|
+ guest_l4_table_offset(gw->va) == 0 &&
|
|
+ guest_l3_table_offset(gw->va) == 3 )
|
|
+ t = SH_type_l2h_shadow;
|
|
+#endif
|
|
/* No l2 shadow installed: find and install it. */
|
|
- *sl2mfn = get_shadow_status(v, gw->l2mfn, SH_type_l2_shadow);
|
|
+ *sl2mfn = get_shadow_status(v, gw->l2mfn, t);
|
|
if ( !mfn_valid(*sl2mfn) )
|
|
{
|
|
/* No l2 shadow of this page exists at all: make one. */
|
|
- *sl2mfn = sh_make_shadow(v, gw->l2mfn, SH_type_l2_shadow);
|
|
+ *sl2mfn = sh_make_shadow(v, gw->l2mfn, t);
|
|
}
|
|
/* Install the new sl2 table in the sl3e */
|
|
l3e_propagate_from_guest(v, gw->l3e, gw->l3mfn,
|
|
@@ -1960,7 +2001,6 @@ void sh_destroy_l4_shadow(struct vcpu *v
|
|
shadow_l4e_t *sl4e;
|
|
u32 t = mfn_to_shadow_page(smfn)->type;
|
|
mfn_t gmfn, sl4mfn;
|
|
- int xen_mappings;
|
|
|
|
SHADOW_DEBUG(DESTROY_SHADOW,
|
|
"%s(%05lx)\n", __func__, mfn_x(smfn));
|
|
@@ -1971,9 +2011,8 @@ void sh_destroy_l4_shadow(struct vcpu *v
|
|
delete_shadow_status(v, gmfn, t, smfn);
|
|
shadow_demote(v, gmfn, t);
|
|
/* Decrement refcounts of all the old entries */
|
|
- xen_mappings = (!shadow_mode_external(v->domain));
|
|
sl4mfn = smfn;
|
|
- SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, xen_mappings, {
|
|
+ SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, v->domain, {
|
|
if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
|
|
{
|
|
sh_put_ref(v, shadow_l4e_get_mfn(*sl4e),
|
|
@@ -2021,12 +2060,15 @@ void sh_destroy_l2_shadow(struct vcpu *v
|
|
shadow_l2e_t *sl2e;
|
|
u32 t = mfn_to_shadow_page(smfn)->type;
|
|
mfn_t gmfn, sl2mfn;
|
|
- int xen_mappings;
|
|
|
|
SHADOW_DEBUG(DESTROY_SHADOW,
|
|
"%s(%05lx)\n", __func__, mfn_x(smfn));
|
|
- ASSERT(t == SH_type_l2_shadow
|
|
- || t == SH_type_l2h_pae_shadow);
|
|
+
|
|
+#if GUEST_PAGING_LEVELS >= 3
|
|
+ ASSERT(t == SH_type_l2_shadow || t == SH_type_l2h_shadow);
|
|
+#else
|
|
+ ASSERT(t == SH_type_l2_shadow);
|
|
+#endif
|
|
|
|
/* Record that the guest page isn't shadowed any more (in this type) */
|
|
gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
|
|
@@ -2035,11 +2077,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
|
|
|
|
/* Decrement refcounts of all the old entries */
|
|
sl2mfn = smfn;
|
|
- xen_mappings = (!shadow_mode_external(v->domain) &&
|
|
- ((GUEST_PAGING_LEVELS == 2) ||
|
|
- ((GUEST_PAGING_LEVELS == 3) &&
|
|
- (t == SH_type_l2h_pae_shadow))));
|
|
- SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, xen_mappings, {
|
|
+ SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
|
|
if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT )
|
|
sh_put_ref(v, shadow_l2e_get_mfn(*sl2e),
|
|
(((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
|
|
@@ -2140,8 +2178,7 @@ void sh_destroy_monitor_table(struct vcp
|
|
void sh_unhook_32b_mappings(struct vcpu *v, mfn_t sl2mfn)
|
|
{
|
|
shadow_l2e_t *sl2e;
|
|
- int xen_mappings = !shadow_mode_external(v->domain);
|
|
- SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, xen_mappings, {
|
|
+ SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
|
|
(void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
|
|
});
|
|
}
|
|
@@ -2152,8 +2189,7 @@ void sh_unhook_pae_mappings(struct vcpu
|
|
/* Walk a PAE l2 shadow, unhooking entries from all the subshadows */
|
|
{
|
|
shadow_l2e_t *sl2e;
|
|
- int xen_mappings = !shadow_mode_external(v->domain);
|
|
- SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, xen_mappings, {
|
|
+ SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
|
|
(void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
|
|
});
|
|
}
|
|
@@ -2163,8 +2199,7 @@ void sh_unhook_pae_mappings(struct vcpu
|
|
void sh_unhook_64b_mappings(struct vcpu *v, mfn_t sl4mfn)
|
|
{
|
|
shadow_l4e_t *sl4e;
|
|
- int xen_mappings = !shadow_mode_external(v->domain);
|
|
- SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, xen_mappings, {
|
|
+ SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, v->domain, {
|
|
(void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
|
|
});
|
|
}
|
|
@@ -2210,7 +2245,7 @@ static int validate_gl4e(struct vcpu *v,
|
|
{
|
|
int shadow_index = (((unsigned long)sl4p & ~PAGE_MASK) /
|
|
sizeof(shadow_l4e_t));
|
|
- int reserved_xen_slot = !is_guest_l4_slot(shadow_index);
|
|
+ int reserved_xen_slot = !is_guest_l4_slot(v->domain, shadow_index);
|
|
|
|
if ( unlikely(reserved_xen_slot) )
|
|
{
|
|
@@ -2473,7 +2508,7 @@ int
|
|
sh_map_and_validate_gl2he(struct vcpu *v, mfn_t gl2mfn,
|
|
void *new_gl2p, u32 size)
|
|
{
|
|
-#if GUEST_PAGING_LEVELS == 3
|
|
+#if GUEST_PAGING_LEVELS >= 3
|
|
return sh_map_and_validate(v, gl2mfn, new_gl2p, size,
|
|
SH_type_l2h_shadow,
|
|
shadow_l2_index,
|
|
@@ -3350,7 +3385,12 @@ sh_set_toplevel_shadow(struct vcpu *v,
|
|
|
|
#if SHADOW_OPTIMIZATIONS & SHOPT_EARLY_UNSHADOW
|
|
/* Once again OK to unhook entries from this table if we see fork/exit */
|
|
- ASSERT(sh_mfn_is_a_page_table(gmfn));
|
|
+#if CONFIG_PAGING_LEVELS == 4
|
|
+ if ( IS_COMPAT(d) )
|
|
+ ASSERT(!sh_mfn_is_a_page_table(gmfn));
|
|
+ else
|
|
+#endif
|
|
+ ASSERT(sh_mfn_is_a_page_table(gmfn));
|
|
mfn_to_page(gmfn)->shadow_flags &= ~SHF_unhooked_mappings;
|
|
#endif
|
|
|
|
@@ -3746,7 +3786,7 @@ void sh_clear_shadow_entry(struct vcpu *
|
|
case SH_type_l1_shadow:
|
|
(void) shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
|
|
case SH_type_l2_shadow:
|
|
-#if GUEST_PAGING_LEVELS == 3
|
|
+#if GUEST_PAGING_LEVELS >= 3
|
|
case SH_type_l2h_shadow:
|
|
#endif
|
|
(void) shadow_set_l2e(v, ep, shadow_l2e_empty(), smfn); break;
|
|
@@ -3766,11 +3806,8 @@ int sh_remove_l1_shadow(struct vcpu *v,
|
|
shadow_l2e_t *sl2e;
|
|
int done = 0;
|
|
int flags;
|
|
-#if GUEST_PAGING_LEVELS != 4
|
|
- int xen_mappings = !shadow_mode_external(v->domain);
|
|
-#endif
|
|
|
|
- SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, xen_mappings,
|
|
+ SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, v->domain,
|
|
{
|
|
flags = shadow_l2e_get_flags(*sl2e);
|
|
if ( (flags & _PAGE_PRESENT)
|
|
@@ -3813,9 +3850,9 @@ int sh_remove_l3_shadow(struct vcpu *v,
|
|
{
|
|
shadow_l4e_t *sl4e;
|
|
int done = 0;
|
|
- int flags, xen_mappings = !shadow_mode_external(v->domain);
|
|
+ int flags;
|
|
|
|
- SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, xen_mappings,
|
|
+ SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, v->domain,
|
|
{
|
|
flags = shadow_l4e_get_flags(*sl4e);
|
|
if ( (flags & _PAGE_PRESENT)
|
|
@@ -4153,14 +4190,11 @@ int sh_audit_l2_table(struct vcpu *v, mf
|
|
gfn_t gfn;
|
|
char *s;
|
|
int done = 0;
|
|
-#if GUEST_PAGING_LEVELS != 4
|
|
- int xen_mappings = !shadow_mode_external(v->domain);
|
|
-#endif
|
|
|
|
/* Follow the backpointer */
|
|
gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->backpointer);
|
|
gl2e = gp = sh_map_domain_page(gl2mfn);
|
|
- SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, xen_mappings, {
|
|
+ SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, v->domain, {
|
|
|
|
s = sh_audit_flags(v, 2, guest_l2e_get_flags(*gl2e),
|
|
shadow_l2e_get_flags(*sl2e));
|
|
@@ -4212,10 +4246,11 @@ int sh_audit_l3_table(struct vcpu *v, mf
|
|
gfn = guest_l3e_get_gfn(*gl3e);
|
|
mfn = shadow_l3e_get_mfn(*sl3e);
|
|
gmfn = get_shadow_status(v, audit_gfn_to_mfn(v, gfn, gl3mfn),
|
|
- (GUEST_PAGING_LEVELS == 3
|
|
+ ((GUEST_PAGING_LEVELS == 3 ||
|
|
+ IS_COMPAT(v->domain))
|
|
&& !shadow_mode_external(v->domain)
|
|
&& (guest_index(gl3e) % 4) == 3)
|
|
- ? SH_type_l2h_pae_shadow
|
|
+ ? SH_type_l2h_shadow
|
|
: SH_type_l2_shadow);
|
|
if ( mfn_x(gmfn) != mfn_x(mfn) )
|
|
AUDIT_FAIL(3, "bad translation: gfn %" SH_PRI_gfn
|
|
@@ -4235,12 +4270,11 @@ int sh_audit_l4_table(struct vcpu *v, mf
|
|
gfn_t gfn;
|
|
char *s;
|
|
int done = 0;
|
|
- int xen_mappings = !shadow_mode_external(v->domain);
|
|
|
|
/* Follow the backpointer */
|
|
gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->backpointer);
|
|
gl4e = gp = sh_map_domain_page(gl4mfn);
|
|
- SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, xen_mappings,
|
|
+ SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, v->domain,
|
|
{
|
|
s = sh_audit_flags(v, 4, guest_l4e_get_flags(*gl4e),
|
|
shadow_l4e_get_flags(*sl4e));
|
|
Index: 2007-02-20/xen/arch/x86/mm/shadow/private.h
|
|
===================================================================
|
|
--- 2007-02-20.orig/xen/arch/x86/mm/shadow/private.h 2007-02-20 11:01:43.000000000 +0100
|
|
+++ 2007-02-20/xen/arch/x86/mm/shadow/private.h 2007-02-20 11:01:50.000000000 +0100
|
|
@@ -190,12 +190,13 @@ static inline void shadow_check_page_str
|
|
#define SH_type_l1_64_shadow (8U) /* shadowing a 64-bit L1 page */
|
|
#define SH_type_fl1_64_shadow (9U) /* L1 shadow for 64-bit 2M superpg */
|
|
#define SH_type_l2_64_shadow (10U) /* shadowing a 64-bit L2 page */
|
|
-#define SH_type_l3_64_shadow (11U) /* shadowing a 64-bit L3 page */
|
|
-#define SH_type_l4_64_shadow (12U) /* shadowing a 64-bit L4 page */
|
|
-#define SH_type_max_shadow (12U)
|
|
-#define SH_type_p2m_table (13U) /* in use as the p2m table */
|
|
-#define SH_type_monitor_table (14U) /* in use as a monitor table */
|
|
-#define SH_type_unused (15U)
|
|
+#define SH_type_l2h_64_shadow (11U) /* shadowing a compat PAE L2 high page */
|
|
+#define SH_type_l3_64_shadow (12U) /* shadowing a 64-bit L3 page */
|
|
+#define SH_type_l4_64_shadow (13U) /* shadowing a 64-bit L4 page */
|
|
+#define SH_type_max_shadow (13U)
|
|
+#define SH_type_p2m_table (14U) /* in use as the p2m table */
|
|
+#define SH_type_monitor_table (15U) /* in use as a monitor table */
|
|
+#define SH_type_unused (16U)
|
|
|
|
/*
|
|
* What counts as a pinnable shadow?
|
|
@@ -246,6 +247,7 @@ static inline int sh_type_is_pinnable(st
|
|
#define SHF_L1_64 (1u << SH_type_l1_64_shadow)
|
|
#define SHF_FL1_64 (1u << SH_type_fl1_64_shadow)
|
|
#define SHF_L2_64 (1u << SH_type_l2_64_shadow)
|
|
+#define SHF_L2H_64 (1u << SH_type_l2h_64_shadow)
|
|
#define SHF_L3_64 (1u << SH_type_l3_64_shadow)
|
|
#define SHF_L4_64 (1u << SH_type_l4_64_shadow)
|
|
|
|
@@ -284,7 +286,6 @@ void shadow_unhook_mappings(struct vcpu
|
|
|
|
/* Install the xen mappings in various flavours of shadow */
|
|
void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn);
|
|
-void sh_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn);
|
|
void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn);
|
|
|
|
|
|
Index: 2007-02-20/xen/arch/x86/mm/shadow/types.h
|
|
===================================================================
|
|
--- 2007-02-20.orig/xen/arch/x86/mm/shadow/types.h 2007-02-20 11:01:43.000000000 +0100
|
|
+++ 2007-02-20/xen/arch/x86/mm/shadow/types.h 2007-02-20 11:01:50.000000000 +0100
|
|
@@ -389,6 +389,7 @@ static inline guest_l4e_t guest_l4e_from
|
|
#define SH_type_l1_shadow SH_type_l1_64_shadow
|
|
#define SH_type_fl1_shadow SH_type_fl1_64_shadow
|
|
#define SH_type_l2_shadow SH_type_l2_64_shadow
|
|
+#define SH_type_l2h_shadow SH_type_l2h_64_shadow
|
|
#define SH_type_l3_shadow SH_type_l3_64_shadow
|
|
#define SH_type_l4_shadow SH_type_l4_64_shadow
|
|
#endif
|
|
Index: 2007-02-20/xen/include/asm-x86/x86_64/page.h
|
|
===================================================================
|
|
--- 2007-02-20.orig/xen/include/asm-x86/x86_64/page.h 2007-02-20 11:01:43.000000000 +0100
|
|
+++ 2007-02-20/xen/include/asm-x86/x86_64/page.h 2007-02-20 11:01:50.000000000 +0100
|
|
@@ -59,9 +59,11 @@ typedef l4_pgentry_t root_pgentry_t;
|
|
!((_t) & PGT_pae_xen_l2) || \
|
|
((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) )
|
|
#define is_guest_l3_slot(_s) (1)
|
|
-#define is_guest_l4_slot(_s) \
|
|
- (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \
|
|
- ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT))
|
|
+#define is_guest_l4_slot(_d, _s) \
|
|
+ ( IS_COMPAT(_d) \
|
|
+ ? ((_s) == 0) \
|
|
+ : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \
|
|
+ ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
|
|
|
|
#define root_get_pfn l4e_get_pfn
|
|
#define root_get_flags l4e_get_flags
|