603 lines
22 KiB
Diff
603 lines
22 KiB
Diff
Index: xen-3.2.1-testing/tools/libxc/xc_hvm_build.c
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/tools/libxc/xc_hvm_build.c
|
|
+++ xen-3.2.1-testing/tools/libxc/xc_hvm_build.c
|
|
@@ -208,15 +208,39 @@ static int setup_guest(int xc_handle,
|
|
for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ )
|
|
page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
|
|
|
|
- /* Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. */
|
|
+ /* Allocate memory for HVM guest:
|
|
+ * 1) skip VGA hole 0xA0000-0xC0000;
|
|
+ * 2) Allocate 4K pages for the first 2M guest memory;
|
|
+ * 3) try to allocate 2M pages for the left guest memory, or use 4k pages;
|
|
+ */
|
|
rc = xc_domain_memory_populate_physmap(
|
|
xc_handle, dom, 0xa0, 0, 0, &page_array[0x00]);
|
|
if ( rc == 0 )
|
|
rc = xc_domain_memory_populate_physmap(
|
|
- xc_handle, dom, nr_pages - 0xc0, 0, 0, &page_array[0xc0]);
|
|
+ xc_handle, dom, 0x200 - 0xc0, 0, 0, &page_array[0xc0]);
|
|
if ( rc != 0 )
|
|
{
|
|
- PERROR("Could not allocate memory for HVM guest.\n");
|
|
+ PERROR("Cannot allocate memory for HVM guest 1.\n");
|
|
+ goto error_out;
|
|
+ }
|
|
+
|
|
+ /* Allocate 2M pages */
|
|
+ for ( i = 0x200; (i + 0x1ff) < nr_pages; i += 0x200 )
|
|
+ {
|
|
+ rc = xc_domain_memory_populate_physmap(
|
|
+ xc_handle, dom, 1, 9, 0, &page_array[i]);
|
|
+ if ( rc != 0 )
|
|
+ {
|
|
+ PERROR("Cannot allocate more 2M pages for HVM guest.\n");
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if ( i < nr_pages )
|
|
+ rc = xc_domain_memory_populate_physmap(
|
|
+ xc_handle, dom, nr_pages - i, 0, 0, &page_array[i]);
|
|
+ if ( rc != 0 )
|
|
+ {
|
|
+ PERROR("Cannot allocate memory for HVM guest 2.\n");
|
|
goto error_out;
|
|
}
|
|
|
|
Index: xen-3.2.1-testing/xen/arch/ia64/xen/mm.c
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/arch/ia64/xen/mm.c
|
|
+++ xen-3.2.1-testing/xen/arch/ia64/xen/mm.c
|
|
@@ -2416,7 +2416,7 @@ steal_page(struct domain *d, struct page
|
|
|
|
int
|
|
guest_physmap_add_page(struct domain *d, unsigned long gpfn,
|
|
- unsigned long mfn)
|
|
+ unsigned long mfn, int order)
|
|
{
|
|
BUG_ON(!mfn_valid(mfn));
|
|
BUG_ON(mfn_to_page(mfn)->count_info != (PGC_allocated | 1));
|
|
@@ -2433,7 +2433,7 @@ guest_physmap_add_page(struct domain *d,
|
|
|
|
void
|
|
guest_physmap_remove_page(struct domain *d, unsigned long gpfn,
|
|
- unsigned long mfn)
|
|
+ unsigned long mfn, int order)
|
|
{
|
|
BUG_ON(mfn == 0);//XXX
|
|
zap_domain_page_one(d, gpfn << PAGE_SHIFT, 0, mfn);
|
|
@@ -2836,7 +2836,7 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(
|
|
if (prev_mfn && mfn_valid(prev_mfn)) {
|
|
if (is_xen_heap_mfn(prev_mfn))
|
|
/* Xen heap frames are simply unhooked from this phys slot. */
|
|
- guest_physmap_remove_page(d, xatp.gpfn, prev_mfn);
|
|
+ guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
|
|
else
|
|
/* Normal domain memory is freed, to avoid leaking memory. */
|
|
guest_remove_page(d, xatp.gpfn);
|
|
@@ -2845,10 +2845,10 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(
|
|
/* Unmap from old location, if any. */
|
|
gpfn = get_gpfn_from_mfn(mfn);
|
|
if (gpfn != INVALID_M2P_ENTRY)
|
|
- guest_physmap_remove_page(d, gpfn, mfn);
|
|
+ guest_physmap_remove_page(d, gpfn, mfn, 0);
|
|
|
|
/* Map at new location. */
|
|
- guest_physmap_add_page(d, xatp.gpfn, mfn);
|
|
+ guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
|
|
|
|
out:
|
|
UNLOCK_BIGLOCK(d);
|
|
Index: xen-3.2.1-testing/xen/arch/powerpc/mm.c
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/arch/powerpc/mm.c
|
|
+++ xen-3.2.1-testing/xen/arch/powerpc/mm.c
|
|
@@ -591,7 +591,7 @@ void guest_physmap_add_page(
|
|
}
|
|
|
|
void guest_physmap_remove_page(
|
|
- struct domain *d, unsigned long gpfn, unsigned long mfn)
|
|
+ struct domain *d, unsigned long gpfn, unsigned long mfn, int order)
|
|
{
|
|
if (page_get_owner(mfn_to_page(mfn)) != d) {
|
|
printk("Won't unmap foreign MFN 0x%lx for DOM%d\n", mfn, d->domain_id);
|
|
Index: xen-3.2.1-testing/xen/arch/x86/mm.c
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/arch/x86/mm.c
|
|
+++ xen-3.2.1-testing/xen/arch/x86/mm.c
|
|
@@ -3286,7 +3286,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
|
|
{
|
|
if ( is_xen_heap_mfn(prev_mfn) )
|
|
/* Xen heap frames are simply unhooked from this phys slot. */
|
|
- guest_physmap_remove_page(d, xatp.gpfn, prev_mfn);
|
|
+ guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
|
|
else
|
|
/* Normal domain memory is freed, to avoid leaking memory. */
|
|
guest_remove_page(d, xatp.gpfn);
|
|
@@ -3295,10 +3295,10 @@ long arch_memory_op(int op, XEN_GUEST_HA
|
|
/* Unmap from old location, if any. */
|
|
gpfn = get_gpfn_from_mfn(mfn);
|
|
if ( gpfn != INVALID_M2P_ENTRY )
|
|
- guest_physmap_remove_page(d, gpfn, mfn);
|
|
+ guest_physmap_remove_page(d, gpfn, mfn, 0);
|
|
|
|
/* Map at new location. */
|
|
- guest_physmap_add_page(d, xatp.gpfn, mfn);
|
|
+ guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
|
|
|
|
UNLOCK_BIGLOCK(d);
|
|
|
|
Index: xen-3.2.1-testing/xen/arch/x86/mm/p2m-ept.c
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/arch/x86/mm/p2m-ept.c
|
|
+++ xen-3.2.1-testing/xen/arch/x86/mm/p2m-ept.c
|
|
@@ -20,6 +20,7 @@
|
|
#include <xen/domain_page.h>
|
|
#include <xen/sched.h>
|
|
#include <asm/current.h>
|
|
+#include <asm/paging.h>
|
|
#include <asm/types.h>
|
|
#include <asm/domain.h>
|
|
#include <asm/hvm/vmx/vmx.h>
|
|
@@ -32,7 +33,6 @@ static int ept_next_level(struct domain
|
|
u32 index;
|
|
|
|
index = *gfn_remainder >> shift;
|
|
- *gfn_remainder &= (1UL << shift) - 1;
|
|
|
|
ept_entry = (*table) + index;
|
|
|
|
@@ -59,31 +59,52 @@ static int ept_next_level(struct domain
|
|
/* last step */
|
|
ept_entry->r = ept_entry->w = ept_entry->x = 1;
|
|
}
|
|
-
|
|
- next = map_domain_page(ept_entry->mfn);
|
|
- unmap_domain_page(*table);
|
|
- *table = next;
|
|
-
|
|
- return 1;
|
|
+ if ( !ept_entry->sp_avail )
|
|
+ {
|
|
+ *gfn_remainder &= (1UL << shift) - 1;
|
|
+ next = map_domain_page(ept_entry->mfn);
|
|
+ unmap_domain_page(*table);
|
|
+ *table = next;
|
|
+ return 1;
|
|
+ }
|
|
+ else
|
|
+ return 2;
|
|
}
|
|
|
|
+#define GUEST_TABLE_SUPER_PAGE 2
|
|
static int
|
|
-ept_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt)
|
|
+ept_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, int order, p2m_type_t p2mt)
|
|
{
|
|
ept_entry_t *table =
|
|
map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
|
|
- unsigned long gfn_remainder = gfn;
|
|
+ unsigned long gfn_remainder = gfn, offset = 0;
|
|
ept_entry_t *ept_entry;
|
|
u32 index;
|
|
- int i, rv = 0;
|
|
+ int i, rv = 0, ret = 0;
|
|
+ int walk_level = order / EPT_TABLE_ORDER;
|
|
+
|
|
+ ASSERT(table != NULL);
|
|
|
|
/* should check if gfn obeys GAW here */
|
|
|
|
- for ( i = EPT_DEFAULT_GAW; i > 0; i-- )
|
|
- if ( !ept_next_level(d, 0, &table, &gfn_remainder, i * EPT_TABLE_ORDER) )
|
|
+ if ( order != 0 )
|
|
+ if ( (gfn & ((1UL << order) - 1)) )
|
|
+ return 1;
|
|
+
|
|
+ for ( i = EPT_DEFAULT_GAW; i > walk_level; i-- )
|
|
+ {
|
|
+ ret = ept_next_level(d, 0, &table,
|
|
+ &gfn_remainder, i * EPT_TABLE_ORDER);
|
|
+ if ( !ret )
|
|
goto out;
|
|
+ else if ( ret == GUEST_TABLE_SUPER_PAGE )
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ index = gfn_remainder >> ( i ? (i * EPT_TABLE_ORDER): order);
|
|
+ walk_level = ( i ? ( i * EPT_TABLE_ORDER) : order) / EPT_TABLE_ORDER;
|
|
+ offset = (gfn_remainder & ( ((1 << (i*EPT_TABLE_ORDER)) - 1)));
|
|
|
|
- index = gfn_remainder;
|
|
ept_entry = table + index;
|
|
|
|
if ( mfn_valid(mfn_x(mfn)) )
|
|
@@ -93,9 +114,20 @@ ept_set_entry(struct domain *d, unsigned
|
|
d->arch.p2m.max_mapped_pfn = gfn;
|
|
|
|
ept_entry->emt = EPT_DEFAULT_MT;
|
|
- ept_entry->sp_avail = 0;
|
|
+ ept_entry->sp_avail = walk_level ? 1 : 0;
|
|
+
|
|
+ if ( ret == 2)
|
|
+ {
|
|
+ ept_entry->mfn = mfn_x(mfn) - offset;
|
|
+ if ( ept_entry->avail1 == p2m_ram_logdirty &&
|
|
+ p2mt == p2m_ram_rw )
|
|
+ for ( i = 0; i < 512; i++ )
|
|
+ paging_mark_dirty(d, mfn_x(mfn)-offset+i);
|
|
+ }
|
|
+ else
|
|
+ ept_entry->mfn = mfn_x(mfn);
|
|
+
|
|
ept_entry->avail1 = p2mt;
|
|
- ept_entry->mfn = mfn_x(mfn);
|
|
ept_entry->rsvd = 0;
|
|
ept_entry->avail2 = 0;
|
|
/* last step */
|
|
@@ -122,7 +154,7 @@ static mfn_t ept_get_entry(struct domain
|
|
unsigned long gfn_remainder = gfn;
|
|
ept_entry_t *ept_entry;
|
|
u32 index;
|
|
- int i;
|
|
+ int i, ret=0;
|
|
mfn_t mfn = _mfn(INVALID_MFN);
|
|
|
|
*t = p2m_mmio_dm;
|
|
@@ -134,18 +166,29 @@ static mfn_t ept_get_entry(struct domain
|
|
/* should check if gfn obeys GAW here */
|
|
|
|
for ( i = EPT_DEFAULT_GAW; i > 0; i-- )
|
|
- if ( !ept_next_level(d, 1, &table, &gfn_remainder, i * EPT_TABLE_ORDER) )
|
|
+ {
|
|
+ ret = ept_next_level(d, 1, &table, &gfn_remainder, i * EPT_TABLE_ORDER);
|
|
+ if ( ret == 0 )
|
|
goto out;
|
|
+ else if ( ret == 2 )
|
|
+ break;
|
|
+ }
|
|
|
|
- index = gfn_remainder;
|
|
+ index = gfn_remainder >> ( i * EPT_TABLE_ORDER);
|
|
ept_entry = table + index;
|
|
|
|
- if ( (ept_entry->epte & 0x7) == 0x7 )
|
|
+ if ( ept_entry->avail1 != p2m_invalid )
|
|
{
|
|
- if ( ept_entry->avail1 != p2m_invalid )
|
|
- {
|
|
- *t = ept_entry->avail1;
|
|
- mfn = _mfn(ept_entry->mfn);
|
|
+ *t = ept_entry->avail1;
|
|
+ mfn = _mfn(ept_entry->mfn);
|
|
+ if ( i )
|
|
+ {
|
|
+ /* we may meet super pages, and to split into 4k pages
|
|
+ * to emulate p2m table
|
|
+ */
|
|
+ unsigned long split_mfn =
|
|
+ mfn_x(mfn) + (gfn_remainder & ( ((1 << (i*EPT_TABLE_ORDER)) - 1 )));
|
|
+ mfn = _mfn(split_mfn);
|
|
}
|
|
}
|
|
|
|
Index: xen-3.2.1-testing/xen/arch/x86/mm/p2m.c
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/arch/x86/mm/p2m.c
|
|
+++ xen-3.2.1-testing/xen/arch/x86/mm/p2m.c
|
|
@@ -203,7 +203,7 @@ p2m_next_level(struct domain *d, mfn_t *
|
|
|
|
// Returns 0 on error (out of memory)
|
|
static int
|
|
-p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt)
|
|
+p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, int order, p2m_type_t p2mt)
|
|
{
|
|
// XXX -- this might be able to be faster iff current->domain == d
|
|
mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
|
|
@@ -285,9 +285,9 @@ void p2m_init(struct domain *d)
|
|
}
|
|
|
|
static inline
|
|
-int set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt)
|
|
+int set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, int order, p2m_type_t p2mt)
|
|
{
|
|
- return d->arch.p2m.set_entry(d, gfn, mfn, p2mt);
|
|
+ return d->arch.p2m.set_entry(d, gfn, mfn, order, p2mt);
|
|
}
|
|
|
|
// Allocate a new p2m table for a domain.
|
|
@@ -349,7 +349,7 @@ int p2m_alloc_table(struct domain *d,
|
|
P2M_PRINTK("populating p2m table\n");
|
|
|
|
/* Initialise physmap tables for slot zero. Other code assumes this. */
|
|
- if ( !set_p2m_entry(d, 0, _mfn(INVALID_MFN), p2m_invalid) )
|
|
+ if ( !set_p2m_entry(d, 0, _mfn(INVALID_MFN), 0, p2m_invalid) )
|
|
goto error;
|
|
|
|
/* Copy all existing mappings from the page list and m2p */
|
|
@@ -368,7 +368,7 @@ int p2m_alloc_table(struct domain *d,
|
|
(gfn != 0x55555555L)
|
|
#endif
|
|
&& gfn != INVALID_M2P_ENTRY
|
|
- && !set_p2m_entry(d, gfn, mfn, p2m_ram_rw) )
|
|
+ && !set_p2m_entry(d, gfn, mfn, 0, p2m_ram_rw) )
|
|
goto error;
|
|
}
|
|
|
|
@@ -682,30 +682,31 @@ static void audit_p2m(struct domain *d)
|
|
|
|
|
|
static void
|
|
-p2m_remove_page(struct domain *d, unsigned long gfn, unsigned long mfn)
|
|
+p2m_remove_page(struct domain *d, unsigned long gfn, unsigned long mfn, int order)
|
|
{
|
|
if ( !paging_mode_translate(d) )
|
|
return;
|
|
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
|
|
|
|
- set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid);
|
|
+ set_p2m_entry(d, gfn, _mfn(INVALID_MFN), order, p2m_invalid);
|
|
set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
|
|
}
|
|
|
|
void
|
|
guest_physmap_remove_page(struct domain *d, unsigned long gfn,
|
|
- unsigned long mfn)
|
|
+ unsigned long mfn, int order)
|
|
{
|
|
p2m_lock(d);
|
|
audit_p2m(d);
|
|
- p2m_remove_page(d, gfn, mfn);
|
|
+ for ( int i = 0; i < ( 1<< order); i++ )
|
|
+ p2m_remove_page(d, gfn+i, mfn+i, order);
|
|
audit_p2m(d);
|
|
p2m_unlock(d);
|
|
}
|
|
|
|
int
|
|
guest_physmap_add_entry(struct domain *d, unsigned long gfn,
|
|
- unsigned long mfn, p2m_type_t t)
|
|
+ unsigned long mfn, int order, p2m_type_t t)
|
|
{
|
|
unsigned long ogfn;
|
|
p2m_type_t ot;
|
|
@@ -756,13 +757,13 @@ guest_physmap_add_entry(struct domain *d
|
|
P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
|
|
ogfn , mfn_x(omfn));
|
|
if ( mfn_x(omfn) == mfn )
|
|
- p2m_remove_page(d, ogfn, mfn);
|
|
+ p2m_remove_page(d, ogfn, mfn, order);
|
|
}
|
|
}
|
|
|
|
if ( mfn_valid(_mfn(mfn)) )
|
|
{
|
|
- if ( !set_p2m_entry(d, gfn, _mfn(mfn), t) )
|
|
+ if ( !set_p2m_entry(d, gfn, _mfn(mfn), order, t) )
|
|
rc = -EINVAL;
|
|
set_gpfn_from_mfn(mfn, gfn);
|
|
}
|
|
@@ -770,7 +771,7 @@ guest_physmap_add_entry(struct domain *d
|
|
{
|
|
gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
|
|
gfn, mfn);
|
|
- if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid) )
|
|
+ if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), order, p2m_invalid) )
|
|
rc = -EINVAL;
|
|
}
|
|
|
|
@@ -893,7 +894,7 @@ p2m_type_t p2m_change_type(struct domain
|
|
|
|
mfn = gfn_to_mfn(d, gfn, &pt);
|
|
if ( pt == ot )
|
|
- set_p2m_entry(d, gfn, mfn, nt);
|
|
+ set_p2m_entry(d, gfn, mfn, 0, nt);
|
|
|
|
p2m_unlock(d);
|
|
|
|
@@ -917,7 +918,7 @@ set_mmio_p2m_entry(struct domain *d, uns
|
|
set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
|
|
}
|
|
|
|
- rc = set_p2m_entry(d, gfn, mfn, p2m_mmio_direct);
|
|
+ rc = set_p2m_entry(d, gfn, mfn, 0, p2m_mmio_direct);
|
|
if ( 0 == rc )
|
|
gdprintk(XENLOG_ERR,
|
|
"set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
|
|
@@ -941,7 +942,7 @@ clear_mmio_p2m_entry(struct domain *d, u
|
|
"clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn);
|
|
return 0;
|
|
}
|
|
- rc = set_p2m_entry(d, gfn, _mfn(INVALID_MFN), 0);
|
|
+ rc = set_p2m_entry(d, gfn, _mfn(INVALID_MFN), 0, 0);
|
|
|
|
return rc;
|
|
}
|
|
Index: xen-3.2.1-testing/xen/common/grant_table.c
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/common/grant_table.c
|
|
+++ xen-3.2.1-testing/xen/common/grant_table.c
|
|
@@ -1154,7 +1154,7 @@ gnttab_transfer(
|
|
spin_lock(&e->grant_table->lock);
|
|
|
|
sha = &shared_entry(e->grant_table, gop.ref);
|
|
- guest_physmap_add_page(e, sha->frame, mfn);
|
|
+ guest_physmap_add_page(e, sha->frame, mfn, 0);
|
|
sha->frame = mfn;
|
|
wmb();
|
|
sha->flags |= GTF_transfer_completed;
|
|
Index: xen-3.2.1-testing/xen/common/memory.c
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/common/memory.c
|
|
+++ xen-3.2.1-testing/xen/common/memory.c
|
|
@@ -113,8 +113,11 @@ static void populate_physmap(struct memo
|
|
goto out;
|
|
}
|
|
|
|
- if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
|
|
+ if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i << a->extent_order, 1)) )
|
|
+ {
|
|
+ printk("copy_from_guest failed.\n");
|
|
goto out;
|
|
+ }
|
|
|
|
page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
|
|
if ( unlikely(page == NULL) )
|
|
@@ -129,11 +132,7 @@ static void populate_physmap(struct memo
|
|
mfn = page_to_mfn(page);
|
|
|
|
if ( unlikely(paging_mode_translate(d)) )
|
|
- {
|
|
- for ( j = 0; j < (1 << a->extent_order); j++ )
|
|
- if ( guest_physmap_add_page(d, gpfn + j, mfn + j) )
|
|
- goto out;
|
|
- }
|
|
+ guest_physmap_add_page(d, gpfn, mfn, a->extent_order);
|
|
else
|
|
{
|
|
for ( j = 0; j < (1 << a->extent_order); j++ )
|
|
@@ -175,7 +174,7 @@ int guest_remove_page(struct domain *d,
|
|
if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
|
|
put_page(page);
|
|
|
|
- guest_physmap_remove_page(d, gmfn, mfn);
|
|
+ guest_physmap_remove_page(d, gmfn, mfn, 0);
|
|
|
|
put_page(page);
|
|
|
|
@@ -416,7 +415,7 @@ static long memory_exchange(XEN_GUEST_HA
|
|
if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
|
|
BUG();
|
|
mfn = page_to_mfn(page);
|
|
- guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn);
|
|
+ guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn, 0);
|
|
put_page(page);
|
|
}
|
|
|
|
@@ -438,8 +437,7 @@ static long memory_exchange(XEN_GUEST_HA
|
|
if ( unlikely(paging_mode_translate(d)) )
|
|
{
|
|
/* Ignore failure here. There's nothing we can do. */
|
|
- for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
|
|
- (void)guest_physmap_add_page(d, gpfn + k, mfn + k);
|
|
+ (void)guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
|
|
}
|
|
else
|
|
{
|
|
Index: xen-3.2.1-testing/xen/include/asm-ia64/grant_table.h
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/include/asm-ia64/grant_table.h
|
|
+++ xen-3.2.1-testing/xen/include/asm-ia64/grant_table.h
|
|
@@ -13,7 +13,7 @@ int create_grant_host_mapping(unsigned l
|
|
int replace_grant_host_mapping(unsigned long gpaddr, unsigned long mfn, unsigned long new_gpaddr, unsigned int flags);
|
|
|
|
// for grant transfer
|
|
-int guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long mfn);
|
|
+int guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long mfn, int order);
|
|
|
|
/* XXX
|
|
* somewhere appropriate
|
|
Index: xen-3.2.1-testing/xen/include/asm-ia64/shadow.h
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/include/asm-ia64/shadow.h
|
|
+++ xen-3.2.1-testing/xen/include/asm-ia64/shadow.h
|
|
@@ -40,8 +40,10 @@
|
|
* Utilities to change relationship of gpfn->mfn for designated domain,
|
|
* which is required by gnttab transfer, balloon, device model and etc.
|
|
*/
|
|
-int guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long mfn);
|
|
-void guest_physmap_remove_page(struct domain *d, unsigned long gpfn, unsigned long mfn);
|
|
+int guest_physmap_add_page(struct domain *d, unsigned long gpfn,
|
|
+ unsigned long mfn, int order);
|
|
+void guest_physmap_remove_page(struct domain *d, unsigned long gpfn,
|
|
+ unsigned long mfn, int order);
|
|
|
|
static inline int
|
|
shadow_mode_enabled(struct domain *d)
|
|
Index: xen-3.2.1-testing/xen/include/asm-powerpc/mm.h
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/include/asm-powerpc/mm.h
|
|
+++ xen-3.2.1-testing/xen/include/asm-powerpc/mm.h
|
|
@@ -278,9 +278,9 @@ extern int steal_page(struct domain *d,
|
|
extern int guest_physmap_max_mem_pages(struct domain *d, unsigned long new_max);
|
|
|
|
extern void guest_physmap_add_page(
|
|
- struct domain *d, unsigned long gpfn, unsigned long mfn);
|
|
+ struct domain *d, unsigned long gpfn, unsigned long mfn, int order);
|
|
|
|
extern void guest_physmap_remove_page(
|
|
- struct domain *d, unsigned long gpfn, unsigned long mfn);
|
|
+ struct domain *d, unsigned long gpfn, unsigned long mfn, int order);
|
|
|
|
#endif
|
|
Index: xen-3.2.1-testing/xen/include/asm-x86/domain.h
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/include/asm-x86/domain.h
|
|
+++ xen-3.2.1-testing/xen/include/asm-x86/domain.h
|
|
@@ -166,7 +166,7 @@ struct p2m_domain {
|
|
void (*free_page )(struct domain *d,
|
|
struct page_info *pg);
|
|
int (*set_entry )(struct domain *d, unsigned long gfn,
|
|
- mfn_t mfn, p2m_type_t p2mt);
|
|
+ mfn_t mfn, int order, p2m_type_t p2mt);
|
|
mfn_t (*get_entry )(struct domain *d, unsigned long gfn,
|
|
p2m_type_t *p2mt);
|
|
mfn_t (*get_entry_fast)(unsigned long gfn, p2m_type_t *p2mt);
|
|
Index: xen-3.2.1-testing/xen/include/asm-x86/p2m.h
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/include/asm-x86/p2m.h
|
|
+++ xen-3.2.1-testing/xen/include/asm-x86/p2m.h
|
|
@@ -191,21 +191,32 @@ void p2m_teardown(struct domain *d);
|
|
|
|
/* Add a page to a domain's p2m table */
|
|
int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
|
|
- unsigned long mfn, p2m_type_t t);
|
|
+ unsigned long mfn, int order, p2m_type_t t);
|
|
|
|
/* Untyped version for RAM only, for compatibility
|
|
*
|
|
* Return 0 for success
|
|
*/
|
|
static inline int guest_physmap_add_page(struct domain *d, unsigned long gfn,
|
|
- unsigned long mfn)
|
|
+ unsigned long mfn, int order)
|
|
{
|
|
- return guest_physmap_add_entry(d, gfn, mfn, p2m_ram_rw);
|
|
+ int ret;
|
|
+
|
|
+ for ( int i = 0; i < (1 << order); i++ )
|
|
+ {
|
|
+ ret = guest_physmap_add_entry(d, gfn+i, mfn+i, order, p2m_ram_rw);
|
|
+ if ( ret != 0 )
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* TODO: fix exit path when failure */
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
/* Remove a page from a domain's p2m table */
|
|
void guest_physmap_remove_page(struct domain *d, unsigned long gfn,
|
|
- unsigned long mfn);
|
|
+ unsigned long mfn, int order);
|
|
|
|
/* Change types across all p2m entries in a domain */
|
|
void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
|
|
Index: xen-3.2.1-testing/xen/include/xen/paging.h
|
|
===================================================================
|
|
--- xen-3.2.1-testing.orig/xen/include/xen/paging.h
|
|
+++ xen-3.2.1-testing/xen/include/xen/paging.h
|
|
@@ -18,8 +18,8 @@
|
|
#else
|
|
|
|
#define paging_mode_translate(d) (0)
|
|
-#define guest_physmap_add_page(d, p, m) (0)
|
|
-#define guest_physmap_remove_page(d, p, m) ((void)0)
|
|
+#define guest_physmap_add_page(d, p, m, order) (0)
|
|
+#define guest_physmap_remove_page(d, p, m, order) ((void)0)
|
|
|
|
#endif
|
|
|