2007-02-19 23:34:05 +01:00
|
|
|
Index: 2007-01-31/xen/arch/x86/domain.c
|
|
|
|
===================================================================
|
|
|
|
--- 2007-01-31.orig/xen/arch/x86/domain.c 2007-01-31 09:39:18.000000000 +0100
|
|
|
|
+++ 2007-01-31/xen/arch/x86/domain.c 2007-02-15 15:10:02.000000000 +0100
|
|
|
|
@@ -1337,7 +1337,8 @@ int hypercall_xlat_continuation(unsigned
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
-static void relinquish_memory(struct domain *d, struct list_head *list)
|
|
|
|
+static void relinquish_memory(struct domain *d, struct list_head *list,
|
|
|
|
+ unsigned long type)
|
|
|
|
{
|
|
|
|
struct list_head *ent;
|
|
|
|
struct page_info *page;
|
|
|
|
@@ -1366,23 +1367,24 @@ static void relinquish_memory(struct dom
|
|
|
|
put_page(page);
|
|
|
|
|
|
|
|
/*
|
|
|
|
- * Forcibly invalidate base page tables at this point to break circular
|
|
|
|
- * 'linear page table' references. This is okay because MMU structures
|
|
|
|
- * are not shared across domains and this domain is now dead. Thus base
|
|
|
|
- * tables are not in use so a non-zero count means circular reference.
|
|
|
|
+ * Forcibly invalidate top-most, still valid page tables at this point
|
|
|
|
+ * to break circular 'linear page table' references. This is okay
|
|
|
|
+ * because MMU structures are not shared across domains and this domain
|
|
|
|
+ * is now dead. Thus top-most valid tables are not in use so a non-zero
|
|
|
|
+ * count means circular reference.
|
|
|
|
*/
|
|
|
|
y = page->u.inuse.type_info;
|
|
|
|
for ( ; ; )
|
|
|
|
{
|
|
|
|
x = y;
|
|
|
|
if ( likely((x & (PGT_type_mask|PGT_validated)) !=
|
|
|
|
- (PGT_base_page_table|PGT_validated)) )
|
|
|
|
+ (type|PGT_validated)) )
|
|
|
|
break;
|
|
|
|
|
|
|
|
y = cmpxchg(&page->u.inuse.type_info, x, x & ~PGT_validated);
|
|
|
|
if ( likely(y == x) )
|
|
|
|
{
|
|
|
|
- free_page_type(page, PGT_base_page_table);
|
|
|
|
+ free_page_type(page, type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
@@ -1464,8 +1466,16 @@ void domain_relinquish_resources(struct
|
|
|
|
destroy_gdt(v);
|
|
|
|
|
|
|
|
/* Relinquish every page of memory. */
|
|
|
|
- relinquish_memory(d, &d->xenpage_list);
|
|
|
|
- relinquish_memory(d, &d->page_list);
|
|
|
|
+#if CONFIG_PAGING_LEVELS >= 4
|
|
|
|
+ relinquish_memory(d, &d->xenpage_list, PGT_l4_page_table);
|
|
|
|
+ relinquish_memory(d, &d->page_list, PGT_l4_page_table);
|
|
|
|
+#endif
|
|
|
|
+#if CONFIG_PAGING_LEVELS >= 3
|
|
|
|
+ relinquish_memory(d, &d->xenpage_list, PGT_l3_page_table);
|
|
|
|
+ relinquish_memory(d, &d->page_list, PGT_l3_page_table);
|
|
|
|
+#endif
|
|
|
|
+ relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table);
|
|
|
|
+ relinquish_memory(d, &d->page_list, PGT_l2_page_table);
|
|
|
|
|
|
|
|
/* Free page used by xen oprofile buffer */
|
|
|
|
free_xenoprof_pages(d);
|
2007-02-11 11:48:10 +01:00
|
|
|
Index: 2007-01-31/xen/arch/x86/mm.c
|
|
|
|
===================================================================
|
2007-02-19 23:34:05 +01:00
|
|
|
--- 2007-01-31.orig/xen/arch/x86/mm.c 2007-02-15 15:02:01.000000000 +0100
|
2007-02-11 11:48:10 +01:00
|
|
|
+++ 2007-01-31/xen/arch/x86/mm.c 2007-02-02 16:34:03.000000000 +0100
|
|
|
|
@@ -509,7 +509,7 @@ get_linear_pagetable(
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
- * Make sure that the mapped frame is an already-validated L2 table.
|
|
|
|
+ * Make sure that the mapped frame is an already-validated root table.
|
|
|
|
* If so, atomically increment the count (checking for overflow).
|
|
|
|
*/
|
|
|
|
page = mfn_to_page(pfn);
|
|
|
|
@@ -531,6 +531,51 @@ get_linear_pagetable(
|
|
|
|
}
|
|
|
|
#endif /* !CONFIG_X86_PAE */
|
|
|
|
|
|
|
|
+#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
|
|
|
|
+static int
|
|
|
|
+get_l2_linear_pagetable(
|
|
|
|
+ l2_pgentry_t l2e, unsigned long l2e_pfn, struct domain *d)
|
|
|
|
+{
|
|
|
|
+ unsigned long pfn;
|
|
|
|
+
|
|
|
|
+ if ( (l2e_get_flags(l2e) & _PAGE_RW) )
|
|
|
|
+ {
|
|
|
|
+ MEM_LOG("Attempt to create linear p.t. with write perms");
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if ( (pfn = l2e_get_pfn(l2e)) != l2e_pfn )
|
|
|
|
+ {
|
|
|
|
+ unsigned long x, y;
|
|
|
|
+ struct page_info *page;
|
|
|
|
+
|
|
|
|
+ /* Make sure the mapped frame belongs to the correct domain. */
|
|
|
|
+ if ( unlikely(!get_page_from_pagenr(pfn, d)) )
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Make sure that the mapped frame is an already-validated L2 table.
|
|
|
|
+ * If so, atomically increment the count (checking for overflow).
|
|
|
|
+ */
|
|
|
|
+ page = mfn_to_page(pfn);
|
|
|
|
+ y = page->u.inuse.type_info;
|
|
|
|
+ do {
|
|
|
|
+ x = y;
|
|
|
|
+ if ( unlikely((x & PGT_count_mask) == PGT_count_mask) ||
|
|
|
|
+ unlikely((x & (PGT_type_mask|PGT_validated)) !=
|
|
|
|
+ (PGT_l2_page_table|PGT_validated)) )
|
|
|
|
+ {
|
|
|
|
+ put_page(page);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ while ( (y = cmpxchg(&page->u.inuse.type_info, x, x + 1)) != x );
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+#endif /* !CONFIG_X86_PAE */
|
|
|
|
+
|
|
|
|
int
|
|
|
|
get_page_from_l1e(
|
|
|
|
l1_pgentry_t l1e, struct domain *d)
|
|
|
|
@@ -607,10 +652,16 @@ get_page_from_l2e(
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = get_page_and_type_from_pagenr(l2e_get_pfn(l2e), PGT_l1_page_table, d);
|
|
|
|
-#if CONFIG_PAGING_LEVELS == 2
|
|
|
|
if ( unlikely(!rc) )
|
|
|
|
+ {
|
|
|
|
+#if CONFIG_PAGING_LEVELS == 2
|
|
|
|
rc = get_linear_pagetable(l2e, pfn, d);
|
|
|
|
+#else
|
|
|
|
+ if ( (CONFIG_PAGING_LEVELS == 3 || IS_COMPAT(d)) )
|
|
|
|
+ rc = get_l2_linear_pagetable(l2e, pfn, d);
|
|
|
|
#endif
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|