changeset: 23953:eda18b27de6e user: Olaf Hering date: Thu Oct 13 12:21:10 2011 +0100 files: tools/xenpaging/xenpaging.c xen/arch/x86/mm.c xen/arch/x86/mm/p2m.c xen/include/public/mem_event.h description: xenpaging: handle evict failures Evict of a nominated gfn must fail if some other process mapped the page without checking the p2mt of that gfn first. Add a check to cancel eviction if the page usage count is not 1. Handle the possible eviction failure in the page-in paths. After nominate and before evict, something may check the p2mt and call populate. Handle this case and let the gfn enter the page-in path. The gfn may still be connected to a mfn, so there is no need to allocate a new page in prep. Adjust do_mmu_update to return -ENOENT only if the gfn has entered the page-in path and if it is not yet connected to a mfn. Otherwise linux_privcmd_map_foreign_bulk() may loop forever. Add MEM_EVENT_FLAG_EVICT_FAIL to inform pager that a page-in request for a possible not-evicted page was sent. xenpaging does currently not need that flag because failure to evict a gfn will be caught. Signed-off-by: Olaf Hering Acked-by: Tim Deegan Committed-by: Tim Deegan --- tools/xenpaging/xenpaging.c | 10 ++++--- xen/arch/x86/mm.c | 8 ++--- xen/arch/x86/mm/p2m.c | 55 +++++++++++++++++++++++++++++------------ xen/include/public/mem_event.h | 1 4 files changed, 50 insertions(+), 24 deletions(-) Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c =================================================================== --- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c +++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c @@ -734,10 +734,12 @@ int main(int argc, char *argv[]) } else { - DPRINTF("page already populated (domain = %d; vcpu = %d;" - " gfn = %"PRIx64"; paused = %d)\n", - paging->mem_event.domain_id, req.vcpu_id, - req.gfn, req.flags & MEM_EVENT_FLAG_VCPU_PAUSED); + DPRINTF("page %s populated (domain = %d; vcpu = %d;" + " gfn = %"PRIx64"; paused = %d; evict_fail = %d)\n", + req.flags & MEM_EVENT_FLAG_EVICT_FAIL ? "not" : "already", + paging->mem_event.domain_id, req.vcpu_id, req.gfn, + !!(req.flags & MEM_EVENT_FLAG_VCPU_PAUSED) , + !!(req.flags & MEM_EVENT_FLAG_EVICT_FAIL) ); /* Tell Xen to resume the vcpu */ /* XXX: Maybe just check if the vcpu was paused? */ Index: xen-4.1.2-testing/xen/arch/x86/mm.c =================================================================== --- xen-4.1.2-testing.orig/xen/arch/x86/mm.c +++ xen-4.1.2-testing/xen/arch/x86/mm.c @@ -3502,7 +3502,7 @@ int do_mmu_update( rc = -ENOENT; break; } - else if ( p2m_ram_paging_in_start == l1e_p2mt ) + else if ( p2m_ram_paging_in_start == l1e_p2mt && !mfn_valid(mfn) ) { rc = -ENOENT; break; @@ -3543,7 +3543,7 @@ int do_mmu_update( rc = -ENOENT; break; } - else if ( p2m_ram_paging_in_start == l2e_p2mt ) + else if ( p2m_ram_paging_in_start == l2e_p2mt && !mfn_valid(mfn) ) { rc = -ENOENT; break; @@ -3572,7 +3572,7 @@ int do_mmu_update( rc = -ENOENT; break; } - else if ( p2m_ram_paging_in_start == l3e_p2mt ) + else if ( p2m_ram_paging_in_start == l3e_p2mt && !mfn_valid(mfn) ) { rc = -ENOENT; break; @@ -3602,7 +3602,7 @@ int do_mmu_update( rc = -ENOENT; break; } - else if ( p2m_ram_paging_in_start == l4e_p2mt ) + else if ( p2m_ram_paging_in_start == l4e_p2mt && !mfn_valid(mfn) ) { rc = -ENOENT; break; Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c =================================================================== --- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c +++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c @@ -2899,15 +2899,24 @@ int p2m_mem_paging_evict(struct p2m_doma if ( unlikely(!mfn_valid(mfn)) ) goto out; - if ( (p2mt == p2m_ram_paged) || (p2mt == p2m_ram_paging_in) || - (p2mt == p2m_ram_paging_in_start) ) + /* Allow only nominated pages */ + if ( p2mt != p2m_ram_paging_out ) goto out; + ret = -EBUSY; /* Get the page so it doesn't get modified under Xen's feet */ page = mfn_to_page(mfn); if ( unlikely(!get_page(page, d)) ) goto out; + /* Check page count and type once more */ + if ( (page->count_info & (PGC_count_mask | PGC_allocated)) != + (2 | PGC_allocated) ) + goto out_put; + + if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_none ) + goto out_put; + /* Decrement guest domain's ref count of the page */ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) put_page(page); @@ -2919,14 +2928,15 @@ int p2m_mem_paging_evict(struct p2m_doma /* Clear content before returning the page to Xen */ scrub_one_page(page); - /* Put the page back so it gets freed */ - put_page(page); - /* Track number of paged gfns */ atomic_inc(&p2m->domain->paged_pages); ret = 0; + out_put: + /* Put the page back so it gets freed */ + put_page(page); + out: p2m_unlock(p2m); return ret; @@ -2957,6 +2967,7 @@ void p2m_mem_paging_populate(struct p2m_ mem_event_request_t req; p2m_type_t p2mt; p2m_access_t a; + mfn_t mfn; struct domain *d = p2m->domain; /* Check that there's space on the ring for this request */ @@ -2968,20 +2979,26 @@ void p2m_mem_paging_populate(struct p2m_ /* Fix p2m mapping */ p2m_lock(p2m); - p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); - if ( p2mt == p2m_ram_paged ) + mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + /* Allow only nominated or evicted pages to enter page-in path */ + if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged ) { - set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start, a); + /* Evict will fail now, tag this request for pager */ + if ( p2mt == p2m_ram_paging_out ) + req.flags |= MEM_EVENT_FLAG_EVICT_FAIL; + + set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_in_start, a); audit_p2m(p2m, 1); } p2m_unlock(p2m); - /* Pause domain */ - if ( v->domain->domain_id == d->domain_id ) + /* Pause domain if request came from guest and gfn has paging type */ + if ( p2m_is_paging(p2mt) && v->domain->domain_id == d->domain_id ) { vcpu_pause_nosync(v); req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; } + /* No need to inform pager if the gfn is not in the page-out path */ else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged ) { /* gfn is already on its way back and vcpu is not paused */ @@ -3002,19 +3019,25 @@ int p2m_mem_paging_prep(struct p2m_domai struct page_info *page; p2m_type_t p2mt; p2m_access_t a; + mfn_t mfn; int ret = -ENOMEM; p2m_lock(p2m); - p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); + mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query); - /* Get a free page */ - page = alloc_domheap_page(p2m->domain, 0); - if ( unlikely(page == NULL) ) - goto out; + /* Allocate a page if the gfn does not have one yet */ + if ( !mfn_valid(mfn) ) + { + /* Get a free page */ + page = alloc_domheap_page(p2m->domain, 0); + if ( unlikely(page == NULL) ) + goto out; + mfn = page_to_mfn(page); + } /* Fix p2m mapping */ - set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, a); + set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_in, a); audit_p2m(p2m, 1); atomic_dec(&p2m->domain->paged_pages); Index: xen-4.1.2-testing/xen/include/public/mem_event.h =================================================================== --- xen-4.1.2-testing.orig/xen/include/public/mem_event.h +++ xen-4.1.2-testing/xen/include/public/mem_event.h @@ -38,6 +38,7 @@ /* Memory event flags */ #define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0) #define MEM_EVENT_FLAG_DROP_PAGE (1 << 1) +#define MEM_EVENT_FLAG_EVICT_FAIL (1 << 2) /* Reasons for the memory event request */ #define MEM_EVENT_REASON_UNKNOWN 0 /* typical reason */