xen/55c3232b-x86-mm-Make-hap-shadow-teardown-preemptible.patch

170 lines
5.9 KiB
Diff
Raw Normal View History

2015-08-27 00:28:15 +02:00
# Commit 0174da5b79752e2d5d6ca0faed89536e8f3d91c7
# Date 2015-08-06 10:04:43 +0100
# Author Anshul Makkar <anshul.makkar@citrix.com>
# Committer Ian Campbell <ian.campbell@citrix.com>
x86/mm: Make {hap, shadow}_teardown() preemptible
A domain with sufficient shadow allocation can cause a watchdog timeout
during domain destruction. Expand the existing -ERESTART logic in
paging_teardown() to allow {hap/sh}_set_allocation() to become
restartable during the DOMCTL_destroydomain hypercall.
Signed-off-by: Anshul Makkar <anshul.makkar@citrix.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
Reviewed-by: George Dunlap <george.dunlap@eu.citrix.com>
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -503,7 +503,7 @@ void hap_final_teardown(struct domain *d
}
if ( d->arch.paging.hap.total_pages != 0 )
- hap_teardown(d);
+ hap_teardown(d, NULL);
p2m_teardown(p2m_get_hostp2m(d));
/* Free any memory that the p2m teardown released */
@@ -513,7 +513,7 @@ void hap_final_teardown(struct domain *d
paging_unlock(d);
}
-void hap_teardown(struct domain *d)
+void hap_teardown(struct domain *d, int *preempted)
{
struct vcpu *v;
mfn_t mfn;
@@ -541,18 +541,11 @@ void hap_teardown(struct domain *d)
if ( d->arch.paging.hap.total_pages != 0 )
{
- HAP_PRINTK("teardown of domain %u starts."
- " pages total = %u, free = %u, p2m=%u\n",
- d->domain_id,
- d->arch.paging.hap.total_pages,
- d->arch.paging.hap.free_pages,
- d->arch.paging.hap.p2m_pages);
- hap_set_allocation(d, 0, NULL);
- HAP_PRINTK("teardown done."
- " pages total = %u, free = %u, p2m=%u\n",
- d->arch.paging.hap.total_pages,
- d->arch.paging.hap.free_pages,
- d->arch.paging.hap.p2m_pages);
+ hap_set_allocation(d, 0, preempted);
+
+ if ( preempted && *preempted )
+ goto out;
+
ASSERT(d->arch.paging.hap.total_pages == 0);
}
@@ -561,6 +554,7 @@ void hap_teardown(struct domain *d)
xfree(d->arch.hvm_domain.dirty_vram);
d->arch.hvm_domain.dirty_vram = NULL;
+out:
paging_unlock(d);
}
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -779,12 +779,15 @@ long paging_domctl_continuation(XEN_GUES
/* Call when destroying a domain */
int paging_teardown(struct domain *d)
{
- int rc;
+ int rc, preempted = 0;
if ( hap_enabled(d) )
- hap_teardown(d);
+ hap_teardown(d, &preempted);
else
- shadow_teardown(d);
+ shadow_teardown(d, &preempted);
+
+ if ( preempted )
+ return -ERESTART;
/* clean up log dirty resources. */
rc = paging_free_log_dirty_bitmap(d, 0);
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3030,7 +3030,7 @@ int shadow_enable(struct domain *d, u32
return rv;
}
-void shadow_teardown(struct domain *d)
+void shadow_teardown(struct domain *d, int *preempted)
/* Destroy the shadow pagetables of this domain and free its shadow memory.
* Should only be called for dying domains. */
{
@@ -3091,23 +3091,16 @@ void shadow_teardown(struct domain *d)
if ( d->arch.paging.shadow.total_pages != 0 )
{
- SHADOW_PRINTK("teardown of domain %u starts."
- " Shadow pages total = %u, free = %u, p2m=%u\n",
- d->domain_id,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
- d->arch.paging.shadow.p2m_pages);
/* Destroy all the shadows and release memory to domheap */
- sh_set_allocation(d, 0, NULL);
+ sh_set_allocation(d, 0, preempted);
+
+ if ( preempted && *preempted )
+ goto out;
+
/* Release the hash table back to xenheap */
if (d->arch.paging.shadow.hash_table)
shadow_hash_teardown(d);
- /* Should not have any more memory held */
- SHADOW_PRINTK("teardown done."
- " Shadow pages total = %u, free = %u, p2m=%u\n",
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
- d->arch.paging.shadow.p2m_pages);
+
ASSERT(d->arch.paging.shadow.total_pages == 0);
}
@@ -3138,6 +3131,7 @@ void shadow_teardown(struct domain *d)
d->arch.hvm_domain.dirty_vram = NULL;
}
+out:
paging_unlock(d);
/* Must be called outside the lock */
@@ -3159,7 +3153,7 @@ void shadow_final_teardown(struct domain
* It is possible for a domain that never got domain_kill()ed
* to get here with its shadow allocation intact. */
if ( d->arch.paging.shadow.total_pages != 0 )
- shadow_teardown(d);
+ shadow_teardown(d, NULL);
/* It is now safe to pull down the p2m map. */
p2m_teardown(p2m_get_hostp2m(d));
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -54,7 +54,7 @@ int hap_domctl(struct domain *d, xen_d
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
int hap_enable(struct domain *d, u32 mode);
void hap_final_teardown(struct domain *d);
-void hap_teardown(struct domain *d);
+void hap_teardown(struct domain *d, int *preempted);
void hap_vcpu_init(struct vcpu *v);
int hap_track_dirty_vram(struct domain *d,
unsigned long begin_pfn,
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -72,7 +72,7 @@ int shadow_domctl(struct domain *d,
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
/* Call when destroying a domain */
-void shadow_teardown(struct domain *d);
+void shadow_teardown(struct domain *d, int *preempted);
/* Call once all of the references to the domain have gone away */
void shadow_final_teardown(struct domain *d);