2010-11-12 18:55:23 +01:00
|
|
|
Subject: xenpaging: update machine_to_phys_mapping during page-in and page-out
|
|
|
|
|
|
|
|
The machine_to_phys_mapping array needs updating during page-out.
|
|
|
|
If a page is gone, a call to get_gpfn_from_mfn will still return the old
|
|
|
|
gfn for an already paged-out page. This happens when the entire guest
|
|
|
|
ram is paged-out before xen_vga_populate_vram() runs. Then
|
|
|
|
XENMEM_populate_physmap is called with gfn 0xff000. A new page is
|
|
|
|
allocated with alloc_domheap_pages. This new page does not have a gfn
|
|
|
|
yet. However, in guest_physmap_add_entry() the passed mfn maps still to
|
|
|
|
an old gfn. This old gfn is paged-out and has no mfn anymore. As a
|
|
|
|
result, the ASSERT() triggers because p2m_is_ram() is true for
|
|
|
|
p2m_ram_paging* types.
|
|
|
|
|
|
|
|
If the machine_to_phys_mapping array is updated properly, both loops in
|
|
|
|
guest_physmap_add_entry() turn into no-ops for the new page and the
|
|
|
|
mfn/gfn mapping will be done at the end of the function.
|
|
|
|
|
|
|
|
|
|
|
|
The same thing needs to happen dring a page-in.
|
|
|
|
If XENMEM_add_to_physmap is used with XENMAPSPACE_gmfn,
|
|
|
|
get_gpfn_from_mfn() will return an appearently valid gfn. As a result,
|
|
|
|
guest_physmap_remove_page() is called. The ASSERT in p2m_remove_page
|
|
|
|
triggers because the passed mfn does not match the old mfn for the
|
|
|
|
passed gfn.
|
|
|
|
|
|
|
|
Signed-off-by: Olaf Hering <olaf@aepfle.de>
|
|
|
|
|
|
|
|
---
|
2010-11-19 21:15:50 +01:00
|
|
|
v2:
|
|
|
|
call set_gpfn_from_mfn only if mfn is valid
|
2010-11-12 18:55:23 +01:00
|
|
|
xen/arch/x86/mm/p2m.c | 14 +++++++++++---
|
|
|
|
1 file changed, 11 insertions(+), 3 deletions(-)
|
|
|
|
|
|
|
|
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/p2m.c
|
|
|
|
+++ xen-4.0.1-testing/xen/arch/x86/mm/p2m.c
|
|
|
|
@@ -2524,6 +2524,7 @@ int p2m_mem_paging_evict(struct domain *
|
|
|
|
/* Remove mapping from p2m table */
|
|
|
|
p2m_lock(d->arch.p2m);
|
|
|
|
set_p2m_entry(d, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged);
|
|
|
|
+ set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
|
|
|
|
p2m_unlock(d->arch.p2m);
|
|
|
|
|
|
|
|
/* Put the page back so it gets freed */
|
|
|
|
@@ -2598,9 +2599,16 @@ void p2m_mem_paging_resume(struct domain
|
|
|
|
|
|
|
|
/* Fix p2m entry */
|
|
|
|
mfn = gfn_to_mfn(d, rsp.gfn, &p2mt);
|
|
|
|
- p2m_lock(d->arch.p2m);
|
|
|
|
- set_p2m_entry(d, rsp.gfn, mfn, 0, p2m_ram_rw);
|
|
|
|
- p2m_unlock(d->arch.p2m);
|
|
|
|
+ if (mfn_valid(mfn))
|
|
|
|
+ {
|
|
|
|
+ p2m_lock(d->arch.p2m);
|
|
|
|
+ set_p2m_entry(d, rsp.gfn, mfn, 0, p2m_ram_rw);
|
|
|
|
+ set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
|
|
|
|
+ p2m_unlock(d->arch.p2m);
|
|
|
|
+ } else {
|
2010-11-19 21:15:50 +01:00
|
|
|
+ gdprintk(XENLOG_ERR, "invalid mfn %lx for gfn %lx p2mt %x flags %lx\n",
|
|
|
|
+ mfn_x(mfn), rsp.gfn, p2mt, (unsigned long)rsp.flags);
|
2010-11-12 18:55:23 +01:00
|
|
|
+ }
|
|
|
|
|
|
|
|
/* Unpause domain */
|
|
|
|
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
|