xen/x86-dom-cleanup.patch

158 lines
4.6 KiB
Diff

Equivalent of -unstable c/s 18720, 18731, and 18735.
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1687,6 +1687,8 @@ static int relinquish_memory(
{
if ( free_page_type(page, x, 0) != 0 )
BUG();
+ if ( x & PGT_partial )
+ page->u.inuse.type_info--;
break;
}
}
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1343,7 +1343,7 @@ static void free_l1_table(struct page_in
static int free_l2_table(struct page_info *page, int preemptible)
{
-#ifdef CONFIG_COMPAT
+#if defined(CONFIG_COMPAT) || defined(DOMAIN_DESTRUCT_AVOID_RECURSION)
struct domain *d = page_get_owner(page);
#endif
unsigned long pfn = page_to_mfn(page);
@@ -1351,6 +1351,11 @@ static int free_l2_table(struct page_inf
unsigned int i = page->nr_validated_ptes - 1;
int err = 0;
+#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
+ if ( d->arch.relmem == RELMEM_l3 )
+ return 0;
+#endif
+
pl2e = map_domain_page(pfn);
ASSERT(page->nr_validated_ptes);
@@ -1381,7 +1386,7 @@ static int free_l3_table(struct page_inf
int rc = 0;
#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
- if ( d->arch.relmem == RELMEM_l3 )
+ if ( d->arch.relmem == RELMEM_l4 )
return 0;
#endif
@@ -1424,11 +1429,6 @@ static int free_l4_table(struct page_inf
unsigned int i = page->nr_validated_ptes - !page->partial_pte;
int rc = 0;
-#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
- if ( d->arch.relmem == RELMEM_l4 )
- return 0;
-#endif
-
do {
if ( is_guest_l4_slot(d, i) )
rc = put_page_from_l4e(pl4e[i], pfn, preemptible);
@@ -1940,7 +1940,6 @@ int free_page_type(struct page_info *pag
{
struct domain *owner = page_get_owner(page);
unsigned long gmfn;
- int rc;
if ( likely(owner != NULL) )
{
@@ -1973,34 +1972,39 @@ int free_page_type(struct page_info *pag
page->nr_validated_ptes = 1U << PAGETABLE_ORDER;
page->partial_pte = 0;
}
+
switch ( type & PGT_type_mask )
{
case PGT_l1_page_table:
free_l1_table(page);
- rc = 0;
- break;
+ return 0;
case PGT_l2_page_table:
- rc = free_l2_table(page, preemptible);
- break;
+ return free_l2_table(page, preemptible);
case PGT_l3_page_table:
#if CONFIG_PAGING_LEVELS == 3
if ( !(type & PGT_partial) )
page->nr_validated_ptes = L3_PAGETABLE_ENTRIES;
#endif
- rc = free_l3_table(page, preemptible);
- break;
+ return free_l3_table(page, preemptible);
case PGT_l4_page_table:
- rc = free_l4_table(page, preemptible);
- break;
- default:
- MEM_LOG("type %lx pfn %lx\n", type, page_to_mfn(page));
- rc = -EINVAL;
- BUG();
+ return free_l4_table(page, preemptible);
}
+ MEM_LOG("type %lx pfn %lx\n", type, page_to_mfn(page));
+ BUG();
+ return -EINVAL;
+}
+
+
+static int __put_page_type_final(struct page_info *page, unsigned long type,
+ int preemptible)
+{
+ int rc = free_page_type(page, type, preemptible);
+
/* No need for atomic update of type_info here: noone else updates it. */
- if ( rc == 0 )
+ switch ( rc )
{
+ case 0:
/*
* Record TLB information for flush later. We do not stamp page tables
* when running in shadow mode:
@@ -2013,9 +2017,8 @@ int free_page_type(struct page_info *pag
page->tlbflush_timestamp = tlbflush_current_time();
wmb();
page->u.inuse.type_info--;
- }
- else if ( rc == -EINTR )
- {
+ break;
+ case -EINTR:
ASSERT(!(page->u.inuse.type_info &
(PGT_count_mask|PGT_validated|PGT_partial)));
if ( !(shadow_mode_enabled(page_get_owner(page)) &&
@@ -2023,12 +2026,13 @@ int free_page_type(struct page_info *pag
page->tlbflush_timestamp = tlbflush_current_time();
wmb();
page->u.inuse.type_info |= PGT_validated;
- }
- else
- {
- BUG_ON(rc != -EAGAIN);
+ break;
+ case -EAGAIN:
wmb();
page->u.inuse.type_info |= PGT_partial;
+ break;
+ default:
+ BUG();
}
return rc;
@@ -2062,7 +2066,7 @@ static int __put_page_type(struct page_i
x, nx)) != x) )
continue;
/* We cleared the 'valid bit' so we do the clean up. */
- return free_page_type(page, x, preemptible);
+ return __put_page_type_final(page, x, preemptible);
}
/*