xen/19103-x86_64-fold-page-lock.patch

835 lines
25 KiB
Diff

# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1233072141 0
# Node ID bcf77bfd1161d1e2693d6762bcd436ad98ec0779
# Parent dbf53b739af0434adff50172fc071f718b57b450
x86: Fold page_info lock into type_info.
References: bnc#470949
Fix some racey looking code at the same time.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -202,11 +202,6 @@ void __init init_frametable(void)
}
memset(frame_table, 0, nr_pages << PAGE_SHIFT);
-
-#if defined(__x86_64__)
- for ( i = 0; i < max_page; i ++ )
- spin_lock_init(&frame_table[i].lock);
-#endif
}
void __init arch_init_memory(void)
@@ -1499,24 +1494,31 @@ static int free_l4_table(struct page_inf
#define free_l4_table(page, preemptible) (-EINVAL)
#endif
-static void page_lock(struct page_info *page)
+static int page_lock(struct page_info *page)
{
-#if defined(__i386__)
- while ( unlikely(test_and_set_bit(_PGC_locked, &page->count_info)) )
- while ( test_bit(_PGC_locked, &page->count_info) )
+ unsigned long x, nx;
+
+ do {
+ while ( (x = page->u.inuse.type_info) & PGT_locked )
cpu_relax();
-#else
- spin_lock(&page->lock);
-#endif
+ nx = x + (1 | PGT_locked);
+ if ( !(x & PGT_validated) ||
+ !(x & PGT_count_mask) ||
+ !(nx & PGT_count_mask) )
+ return 0;
+ } while ( cmpxchg(&page->u.inuse.type_info, x, nx) != x );
+
+ return 1;
}
static void page_unlock(struct page_info *page)
{
-#if defined(__i386__)
- clear_bit(_PGC_locked, &page->count_info);
-#else
- spin_unlock(&page->lock);
-#endif
+ unsigned long x, nx, y = page->u.inuse.type_info;
+
+ do {
+ x = y;
+ nx = x - (1 | PGT_locked);
+ } while ( (y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x );
}
/* How to write an entry to the guest pagetables.
@@ -1579,19 +1581,15 @@ static int mod_l1_entry(l1_pgentry_t *pl
struct vcpu *curr = current;
struct domain *d = curr->domain;
unsigned long mfn;
- struct page_info *l1pg = mfn_to_page(gl1mfn);
p2m_type_t p2mt;
int rc = 1;
- page_lock(l1pg);
-
if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
- return page_unlock(l1pg), 0;
+ return 0;
if ( unlikely(paging_mode_refcounts(d)) )
{
rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, preserve_ad);
- page_unlock(l1pg);
return rc;
}
@@ -1600,13 +1598,12 @@ static int mod_l1_entry(l1_pgentry_t *pl
/* Translate foreign guest addresses. */
mfn = mfn_x(gfn_to_mfn(FOREIGNDOM, l1e_get_pfn(nl1e), &p2mt));
if ( !p2m_is_ram(p2mt) || unlikely(mfn == INVALID_MFN) )
- return page_unlock(l1pg), 0;
+ return 0;
ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0);
nl1e = l1e_from_pfn(mfn, l1e_get_flags(nl1e));
if ( unlikely(l1e_get_flags(nl1e) & l1_disallow_mask(d)) )
{
- page_unlock(l1pg);
MEM_LOG("Bad L1 flags %x",
l1e_get_flags(nl1e) & l1_disallow_mask(d));
return 0;
@@ -1618,12 +1615,11 @@ static int mod_l1_entry(l1_pgentry_t *pl
adjust_guest_l1e(nl1e, d);
rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
preserve_ad);
- page_unlock(l1pg);
return rc;
}
if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
- return page_unlock(l1pg), 0;
+ return 0;
adjust_guest_l1e(nl1e, d);
if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
@@ -1636,11 +1632,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
else if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
preserve_ad)) )
{
- page_unlock(l1pg);
return 0;
}
- page_unlock(l1pg);
put_page_from_l1e(ol1e, d);
return rc;
}
@@ -1650,13 +1644,13 @@ static int mod_l1_entry(l1_pgentry_t *pl
static int mod_l2_entry(l2_pgentry_t *pl2e,
l2_pgentry_t nl2e,
unsigned long pfn,
- unsigned long type,
int preserve_ad)
{
l2_pgentry_t ol2e;
struct vcpu *curr = current;
struct domain *d = curr->domain;
struct page_info *l2pg = mfn_to_page(pfn);
+ unsigned long type = l2pg->u.inuse.type_info;
int rc = 1;
if ( unlikely(!is_guest_l2_slot(d, type, pgentry_ptr_to_slot(pl2e))) )
@@ -1665,16 +1659,13 @@ static int mod_l2_entry(l2_pgentry_t *pl
return 0;
}
- page_lock(l2pg);
-
if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) )
- return page_unlock(l2pg), 0;
+ return 0;
if ( l2e_get_flags(nl2e) & _PAGE_PRESENT )
{
if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) )
{
- page_unlock(l2pg);
MEM_LOG("Bad L2 flags %x",
l2e_get_flags(nl2e) & L2_DISALLOW_MASK);
return 0;
@@ -1685,12 +1676,11 @@ static int mod_l2_entry(l2_pgentry_t *pl
{
adjust_guest_l2e(nl2e, d);
rc = UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr, preserve_ad);
- page_unlock(l2pg);
return rc;
}
if ( unlikely(get_page_from_l2e(nl2e, pfn, d) < 0) )
- return page_unlock(l2pg), 0;
+ return 0;
adjust_guest_l2e(nl2e, d);
if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr,
@@ -1703,11 +1693,9 @@ static int mod_l2_entry(l2_pgentry_t *pl
else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr,
preserve_ad)) )
{
- page_unlock(l2pg);
return 0;
}
- page_unlock(l2pg);
put_page_from_l2e(ol2e, pfn);
return rc;
}
@@ -1722,7 +1710,6 @@ static int mod_l3_entry(l3_pgentry_t *pl
l3_pgentry_t ol3e;
struct vcpu *curr = current;
struct domain *d = curr->domain;
- struct page_info *l3pg = mfn_to_page(pfn);
int rc = 0;
if ( unlikely(!is_guest_l3_slot(pgentry_ptr_to_slot(pl3e))) )
@@ -1738,16 +1725,13 @@ static int mod_l3_entry(l3_pgentry_t *pl
if ( is_pv_32bit_domain(d) && (pgentry_ptr_to_slot(pl3e) >= 3) )
return -EINVAL;
- page_lock(l3pg);
-
if ( unlikely(__copy_from_user(&ol3e, pl3e, sizeof(ol3e)) != 0) )
- return page_unlock(l3pg), -EFAULT;
+ return -EFAULT;
if ( l3e_get_flags(nl3e) & _PAGE_PRESENT )
{
if ( unlikely(l3e_get_flags(nl3e) & l3_disallow_mask(d)) )
{
- page_unlock(l3pg);
MEM_LOG("Bad L3 flags %x",
l3e_get_flags(nl3e) & l3_disallow_mask(d));
return -EINVAL;
@@ -1758,13 +1742,12 @@ static int mod_l3_entry(l3_pgentry_t *pl
{
adjust_guest_l3e(nl3e, d);
rc = UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr, preserve_ad);
- page_unlock(l3pg);
return rc ? 0 : -EFAULT;
}
rc = get_page_from_l3e(nl3e, pfn, d, 0, preemptible);
if ( unlikely(rc < 0) )
- return page_unlock(l3pg), rc;
+ return rc;
rc = 0;
adjust_guest_l3e(nl3e, d);
@@ -1778,7 +1761,6 @@ static int mod_l3_entry(l3_pgentry_t *pl
else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr,
preserve_ad)) )
{
- page_unlock(l3pg);
return -EFAULT;
}
@@ -1790,7 +1772,6 @@ static int mod_l3_entry(l3_pgentry_t *pl
pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
}
- page_unlock(l3pg);
put_page_from_l3e(ol3e, pfn, 0, 0);
return rc;
}
@@ -1807,7 +1788,6 @@ static int mod_l4_entry(l4_pgentry_t *pl
struct vcpu *curr = current;
struct domain *d = curr->domain;
l4_pgentry_t ol4e;
- struct page_info *l4pg = mfn_to_page(pfn);
int rc = 0;
if ( unlikely(!is_guest_l4_slot(d, pgentry_ptr_to_slot(pl4e))) )
@@ -1816,16 +1796,13 @@ static int mod_l4_entry(l4_pgentry_t *pl
return -EINVAL;
}
- page_lock(l4pg);
-
if ( unlikely(__copy_from_user(&ol4e, pl4e, sizeof(ol4e)) != 0) )
- return page_unlock(l4pg), -EFAULT;
+ return -EFAULT;
if ( l4e_get_flags(nl4e) & _PAGE_PRESENT )
{
if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) )
{
- page_unlock(l4pg);
MEM_LOG("Bad L4 flags %x",
l4e_get_flags(nl4e) & L4_DISALLOW_MASK);
return -EINVAL;
@@ -1836,13 +1813,12 @@ static int mod_l4_entry(l4_pgentry_t *pl
{
adjust_guest_l4e(nl4e, d);
rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr, preserve_ad);
- page_unlock(l4pg);
return rc ? 0 : -EFAULT;
}
rc = get_page_from_l4e(nl4e, pfn, d, 0, preemptible);
if ( unlikely(rc < 0) )
- return page_unlock(l4pg), rc;
+ return rc;
rc = 0;
adjust_guest_l4e(nl4e, d);
@@ -1856,11 +1832,9 @@ static int mod_l4_entry(l4_pgentry_t *pl
else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr,
preserve_ad)) )
{
- page_unlock(l4pg);
return -EFAULT;
}
- page_unlock(l4pg);
put_page_from_l4e(ol4e, pfn, 0, 0);
return rc;
}
@@ -2918,7 +2892,6 @@ int do_mmu_update(
unsigned int cmd, done = 0;
struct vcpu *v = current;
struct domain *d = v->domain;
- unsigned long type_info;
struct domain_mmap_cache mapcache;
if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
@@ -2990,24 +2963,9 @@ int do_mmu_update(
(unsigned long)(req.ptr & ~PAGE_MASK));
page = mfn_to_page(mfn);
- switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask )
+ if ( page_lock(page) )
{
- case PGT_l1_page_table:
- case PGT_l2_page_table:
- case PGT_l3_page_table:
- case PGT_l4_page_table:
- {
- if ( paging_mode_refcounts(d) )
- {
- MEM_LOG("mmu update on auto-refcounted domain!");
- break;
- }
-
- if ( unlikely(!get_page_type(
- page, type_info & (PGT_type_mask|PGT_pae_xen_l2))) )
- goto not_a_pt;
-
- switch ( type_info & PGT_type_mask )
+ switch ( page->u.inuse.type_info & PGT_type_mask )
{
case PGT_l1_page_table:
{
@@ -3019,7 +2977,7 @@ int do_mmu_update(
case PGT_l2_page_table:
{
l2_pgentry_t l2e = l2e_from_intpte(req.val);
- okay = mod_l2_entry(va, l2e, mfn, type_info,
+ okay = mod_l2_entry(va, l2e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD);
}
break;
@@ -3041,31 +2999,23 @@ int do_mmu_update(
}
break;
#endif
+ case PGT_writable_page:
+ perfc_incr(writable_mmu_updates);
+ okay = paging_write_guest_entry(v, va, req.val, _mfn(mfn));
+ break;
}
-
- put_page_type(page);
+ page_unlock(page);
if ( rc == -EINTR )
rc = -EAGAIN;
}
- break;
-
- default:
- not_a_pt:
+ else if ( get_page_type(page, PGT_writable_page) )
{
- if ( unlikely(!get_page_type(page, PGT_writable_page)) )
- break;
-
perfc_incr(writable_mmu_updates);
-
okay = paging_write_guest_entry(v, va, req.val, _mfn(mfn));
-
put_page_type(page);
}
- break;
- }
unmap_domain_page_with_cache(va, &mapcache);
-
put_page(page);
break;
@@ -3144,7 +3094,6 @@ static int create_grant_pte_mapping(
void *va;
unsigned long gmfn, mfn;
struct page_info *page;
- unsigned long type;
l1_pgentry_t ol1e;
struct domain *d = v->domain;
@@ -3165,21 +3114,23 @@ static int create_grant_pte_mapping(
va = (void *)((unsigned long)va + ((unsigned long)pte_addr & ~PAGE_MASK));
page = mfn_to_page(mfn);
- type = page->u.inuse.type_info & PGT_type_mask;
- if ( (type != PGT_l1_page_table) || !get_page_type(page, type) )
+ if ( !page_lock(page) )
{
- MEM_LOG("Grant map attempted to update a non-L1 page");
rc = GNTST_general_error;
goto failed;
}
- page_lock(page);
+ if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(page);
+ rc = GNTST_general_error;
+ goto failed;
+ }
ol1e = *(l1_pgentry_t *)va;
if ( !UPDATE_ENTRY(l1, (l1_pgentry_t *)va, ol1e, nl1e, mfn, v, 0) )
{
page_unlock(page);
- put_page_type(page);
rc = GNTST_general_error;
goto failed;
}
@@ -3189,8 +3140,6 @@ static int create_grant_pte_mapping(
if ( !paging_mode_refcounts(d) )
put_page_from_l1e(ol1e, d);
- put_page_type(page);
-
failed:
unmap_domain_page(va);
put_page(page);
@@ -3205,7 +3154,6 @@ static int destroy_grant_pte_mapping(
void *va;
unsigned long gmfn, mfn;
struct page_info *page;
- unsigned long type;
l1_pgentry_t ol1e;
gmfn = addr >> PAGE_SHIFT;
@@ -3221,15 +3169,18 @@ static int destroy_grant_pte_mapping(
va = (void *)((unsigned long)va + ((unsigned long)addr & ~PAGE_MASK));
page = mfn_to_page(mfn);
- type = page->u.inuse.type_info & PGT_type_mask;
- if ( (type != PGT_l1_page_table) || !get_page_type(page, type) )
+ if ( !page_lock(page) )
{
- MEM_LOG("Grant map attempted to update a non-L1 page");
rc = GNTST_general_error;
goto failed;
}
- page_lock(page);
+ if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(page);
+ rc = GNTST_general_error;
+ goto failed;
+ }
ol1e = *(l1_pgentry_t *)va;
@@ -3239,7 +3190,6 @@ static int destroy_grant_pte_mapping(
page_unlock(page);
MEM_LOG("PTE entry %lx for address %"PRIx64" doesn't match frame %lx",
(unsigned long)l1e_get_intpte(ol1e), addr, frame);
- put_page_type(page);
rc = GNTST_general_error;
goto failed;
}
@@ -3253,13 +3203,11 @@ static int destroy_grant_pte_mapping(
{
page_unlock(page);
MEM_LOG("Cannot delete PTE entry at %p", va);
- put_page_type(page);
rc = GNTST_general_error;
goto failed;
}
page_unlock(page);
- put_page_type(page);
failed:
unmap_domain_page(va);
@@ -3287,21 +3235,40 @@ static int create_grant_va_mapping(
MEM_LOG("Could not find L1 PTE for address %lx", va);
return GNTST_general_error;
}
+
+ if ( !get_page_from_pagenr(gl1mfn, current->domain) )
+ {
+ guest_unmap_l1e(v, pl1e);
+ return GNTST_general_error;
+ }
+
l1pg = mfn_to_page(gl1mfn);
- page_lock(l1pg);
+ if ( !page_lock(l1pg) )
+ {
+ put_page(l1pg);
+ guest_unmap_l1e(v, pl1e);
+ return GNTST_general_error;
+ }
+
+ if ( (l1pg->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(l1pg);
+ put_page(l1pg);
+ guest_unmap_l1e(v, pl1e);
+ return GNTST_general_error;
+ }
+
ol1e = *pl1e;
okay = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, v, 0);
+
page_unlock(l1pg);
+ put_page(l1pg);
guest_unmap_l1e(v, pl1e);
- pl1e = NULL;
- if ( !okay )
- return GNTST_general_error;
-
- if ( !paging_mode_refcounts(d) )
+ if ( okay && !paging_mode_refcounts(d) )
put_page_from_l1e(ol1e, d);
- return GNTST_okay;
+ return okay ? GNTST_okay : GNTST_general_error;
}
static int replace_grant_va_mapping(
@@ -3319,31 +3286,48 @@ static int replace_grant_va_mapping(
return GNTST_general_error;
}
+ if ( !get_page_from_pagenr(gl1mfn, current->domain) )
+ {
+ rc = GNTST_general_error;
+ goto out;
+ }
+
l1pg = mfn_to_page(gl1mfn);
- page_lock(l1pg);
+ if ( !page_lock(l1pg) )
+ {
+ rc = GNTST_general_error;
+ put_page(l1pg);
+ goto out;
+ }
+
+ if ( (l1pg->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ rc = GNTST_general_error;
+ goto unlock_and_out;
+ }
+
ol1e = *pl1e;
/* Check that the virtual address supplied is actually mapped to frame. */
if ( unlikely(l1e_get_pfn(ol1e) != frame) )
{
- page_unlock(l1pg);
MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx",
l1e_get_pfn(ol1e), addr, frame);
rc = GNTST_general_error;
- goto out;
+ goto unlock_and_out;
}
/* Delete pagetable entry. */
if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, v, 0)) )
{
- page_unlock(l1pg);
MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
rc = GNTST_general_error;
- goto out;
+ goto unlock_and_out;
}
+ unlock_and_out:
page_unlock(l1pg);
-
+ put_page(l1pg);
out:
guest_unmap_l1e(v, pl1e);
return rc;
@@ -3405,20 +3389,42 @@ int replace_grant_host_mapping(
return GNTST_general_error;
}
+ if ( !get_page_from_pagenr(gl1mfn, current->domain) )
+ {
+ guest_unmap_l1e(curr, pl1e);
+ return GNTST_general_error;
+ }
+
l1pg = mfn_to_page(gl1mfn);
- page_lock(l1pg);
+ if ( !page_lock(l1pg) )
+ {
+ put_page(l1pg);
+ guest_unmap_l1e(curr, pl1e);
+ return GNTST_general_error;
+ }
+
+ if ( (l1pg->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(l1pg);
+ put_page(l1pg);
+ guest_unmap_l1e(curr, pl1e);
+ return GNTST_general_error;
+ }
+
ol1e = *pl1e;
if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, l1e_empty(),
gl1mfn, curr, 0)) )
{
page_unlock(l1pg);
+ put_page(l1pg);
MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
guest_unmap_l1e(curr, pl1e);
return GNTST_general_error;
}
page_unlock(l1pg);
+ put_page(l1pg);
guest_unmap_l1e(curr, pl1e);
rc = replace_grant_va_mapping(addr, frame, ol1e, curr);
@@ -3480,28 +3486,45 @@ int do_update_va_mapping(unsigned long v
l1_pgentry_t val = l1e_from_intpte(val64);
struct vcpu *v = current;
struct domain *d = v->domain;
+ struct page_info *gl1pg;
l1_pgentry_t *pl1e;
unsigned long vmask, bmap_ptr, gl1mfn;
cpumask_t pmask;
- int rc = 0;
+ int rc;
perfc_incr(calls_to_update_va);
- if ( unlikely(!access_ok(va, 1) && !paging_mode_external(d)) )
- return -EINVAL;
-
rc = xsm_update_va_mapping(d, val);
if ( rc )
return rc;
+ rc = -EINVAL;
pl1e = guest_map_l1e(v, va, &gl1mfn);
+ if ( unlikely(!pl1e || !get_page_from_pagenr(gl1mfn, d)) )
+ goto out;
- if ( unlikely(!pl1e || !mod_l1_entry(pl1e, val, gl1mfn, 0)) )
- rc = -EINVAL;
+ gl1pg = mfn_to_page(gl1mfn);
+ if ( !page_lock(gl1pg) )
+ {
+ put_page(gl1pg);
+ goto out;
+ }
+
+ if ( (gl1pg->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(gl1pg);
+ put_page(gl1pg);
+ goto out;
+ }
+
+ rc = mod_l1_entry(pl1e, val, gl1mfn, 0) ? 0 : -EINVAL;
+ page_unlock(gl1pg);
+ put_page(gl1pg);
+
+ out:
if ( pl1e )
guest_unmap_l1e(v, pl1e);
- pl1e = NULL;
process_deferred_ops();
@@ -4122,15 +4145,25 @@ int ptwr_do_page_fault(struct vcpu *v, u
/* Attempt to read the PTE that maps the VA being accessed. */
guest_get_eff_l1e(v, addr, &pte);
- page = l1e_get_page(pte);
/* We are looking only for read-only mappings of p.t. pages. */
if ( ((l1e_get_flags(pte) & (_PAGE_PRESENT|_PAGE_RW)) != _PAGE_PRESENT) ||
- !mfn_valid(l1e_get_pfn(pte)) ||
- ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
- ((page->u.inuse.type_info & PGT_count_mask) == 0) ||
- (page_get_owner(page) != d) )
+ !get_page_from_pagenr(l1e_get_pfn(pte), d) )
+ goto bail;
+
+ page = l1e_get_page(pte);
+ if ( !page_lock(page) )
+ {
+ put_page(page);
+ goto bail;
+ }
+
+ if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
+ {
+ page_unlock(page);
+ put_page(page);
goto bail;
+ }
ptwr_ctxt.ctxt.regs = regs;
ptwr_ctxt.ctxt.force_writeback = 0;
@@ -4139,9 +4172,11 @@ int ptwr_do_page_fault(struct vcpu *v, u
ptwr_ctxt.cr2 = addr;
ptwr_ctxt.pte = pte;
- page_lock(page);
rc = x86_emulate(&ptwr_ctxt.ctxt, &ptwr_emulate_ops);
+
page_unlock(page);
+ put_page(page);
+
if ( rc == X86EMUL_UNHANDLEABLE )
goto bail;
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1685,9 +1685,6 @@ shadow_free_p2m_page(struct domain *d, s
/* Free should not decrement domain's total allocation, since
* these pages were allocated without an owner. */
page_set_owner(pg, NULL);
-#if defined(__x86_64__)
- spin_lock_init(&pg->lock);
-#endif
free_domheap_pages(pg, 0);
d->arch.paging.shadow.p2m_pages--;
perfc_decr(shadow_alloc_count);
@@ -1801,16 +1798,7 @@ static unsigned int sh_set_allocation(st
* may get overwritten, so need to clear it here.
*/
for ( j = 0; j < 1U << order; j++ )
- {
page_set_owner(&((struct page_info *)sp)[j], NULL);
-#if defined(__x86_64__)
- /*
- * Re-instate lock field which we overwrite with shadow_page_info.
- * This was safe, since the lock is only used on guest pages.
- */
- spin_lock_init(&((struct page_info *)sp)[j].lock);
-#endif
- }
d->arch.paging.shadow.free_pages -= 1 << order;
d->arch.paging.shadow.total_pages -= 1 << order;
free_domheap_pages((struct page_info *)sp, order);
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -46,10 +46,6 @@ struct page_info
} u;
-#if defined(__x86_64__)
- spinlock_t lock;
-#endif
-
union {
/*
* Timestamp from 'TLB clock', used to avoid extra safety flushes.
@@ -127,27 +123,25 @@ struct page_info
/* Has this page been *partially* validated for use as its current type? */
#define _PGT_partial PG_shift(7)
#define PGT_partial PG_mask(1, 7)
+ /* Page is locked? */
+#define _PGT_locked PG_shift(8)
+#define PGT_locked PG_mask(1, 8)
/* Count of uses of this frame as its current type. */
-#define PGT_count_width PG_shift(7)
+#define PGT_count_width PG_shift(8)
#define PGT_count_mask ((1UL<<PGT_count_width)-1)
/* Cleared when the owning guest 'frees' this page. */
#define _PGC_allocated PG_shift(1)
#define PGC_allocated PG_mask(1, 1)
-#if defined(__i386__)
- /* Page is locked? */
-# define _PGC_locked PG_shift(2)
-# define PGC_locked PG_mask(1, 2)
-#endif
/* Set when is using a page as a page table */
-#define _PGC_page_table PG_shift(3)
-#define PGC_page_table PG_mask(1, 3)
+#define _PGC_page_table PG_shift(2)
+#define PGC_page_table PG_mask(1, 2)
/* 3-bit PAT/PCD/PWT cache-attribute hint. */
-#define PGC_cacheattr_base PG_shift(6)
-#define PGC_cacheattr_mask PG_mask(7, 6)
+#define PGC_cacheattr_base PG_shift(5)
+#define PGC_cacheattr_mask PG_mask(7, 5)
/* Count of references to this frame. */
-#define PGC_count_width PG_shift(6)
+#define PGC_count_width PG_shift(5)
#define PGC_count_mask ((1UL<<PGC_count_width)-1)
#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -336,7 +336,7 @@ void paging_dump_vcpu_info(struct vcpu *
* Access to the guest pagetables */
/* Get a mapping of a PV guest's l1e for this virtual address. */
-static inline void *
+static inline l1_pgentry_t *
guest_map_l1e(struct vcpu *v, unsigned long addr, unsigned long *gl1mfn)
{
l2_pgentry_t l2e;
@@ -354,15 +354,14 @@ guest_map_l1e(struct vcpu *v, unsigned l
!= _PAGE_PRESENT )
return NULL;
*gl1mfn = l2e_get_pfn(l2e);
- return &__linear_l1_table[l1_linear_offset(addr)];
+ return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(addr);
}
/* Pull down the mapping we got from guest_map_l1e() */
static inline void
guest_unmap_l1e(struct vcpu *v, void *p)
{
- if ( unlikely(paging_mode_translate(v->domain)) )
- unmap_domain_page(p);
+ unmap_domain_page(p);
}
/* Read the guest's l1e that maps this address. */