5347b524-evtchn-eliminate-64k-ports-limitation.patch 53aac342-x86-HVM-consolidate-and-sanitize-CR4-guest-reserved-bit-determination.patch 53b16cd4-VT-d-ATS-correct-and-clean-up-dev_invalidate_iotlb.patch 53b56de1-properly-reference-count-DOMCTL_-un-pausedomain-hypercalls.patch 53cfdcc7-avoid-crash-when-doing-shutdown-with-active-cpupools.patch 53cfddaf-x86-mem_event-validate-the-response-vcpu_id-before-acting-on-it.patch 53cfdde4-x86-mem_event-prevent-underflow-of-vcpu-pause-counts.patch - bnc#886801 - xl vncviewer: The first domu can be accessed by any id 53c9151b-Fix-xl-vncviewer-accesses-port-0-by-any-invalid-domid.patch - Upstream pygrub bug fix 5370e03b-pygrub-fix-error-handling-if-no-valid-partitions-are-found.patch - Fix pygrub to handle old 32 bit VMs pygrub-boot-legacy-sles.patch (Mike Latimer) - Remove xen-vmresync utility. It is an old Platespin Orchestrate utility that should have never been included in the Xen package. Updated xen.spec - Rework xen-destroy utility included in xen-utils bnc#885292 and bnc#886063 Updated xen-utils-0.1.tar.bz2 - bnc#886063 - Xen monitor fails (xl list --long output different from xm list --long output) - bnc#885292 - VirtualDomain: pid_status does not know how to check status on SLE12 OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=322
146 lines
4.3 KiB
Diff
146 lines
4.3 KiB
Diff
# Commit 868d9b99b39c53dc1f6ae9bfd7b148c206fd7240
|
|
# Date 2014-07-23 18:08:04 +0200
|
|
# Author Andrew Cooper <andrew.cooper3@citrix.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
x86/mem_event: prevent underflow of vcpu pause counts
|
|
|
|
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
Tested-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
|
|
Reviewed-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
|
|
Tested-by: Aravindh Puthiyaparambil <aravindp@cisco.com>
|
|
|
|
--- a/xen/arch/x86/hvm/hvm.c
|
|
+++ b/xen/arch/x86/hvm/hvm.c
|
|
@@ -4762,7 +4762,7 @@ static int hvm_memory_event_traps(long p
|
|
if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
|
|
{
|
|
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
|
|
- vcpu_pause_nosync(v);
|
|
+ mem_event_vcpu_pause(v);
|
|
}
|
|
|
|
req.gfn = value;
|
|
--- a/xen/arch/x86/mm/mem_event.c
|
|
+++ b/xen/arch/x86/mm/mem_event.c
|
|
@@ -655,6 +655,38 @@ int mem_event_domctl(struct domain *d, x
|
|
return rc;
|
|
}
|
|
|
|
+void mem_event_vcpu_pause(struct vcpu *v)
|
|
+{
|
|
+ ASSERT(v == current);
|
|
+
|
|
+ atomic_inc(&v->mem_event_pause_count);
|
|
+ vcpu_pause_nosync(v);
|
|
+}
|
|
+
|
|
+void mem_event_vcpu_unpause(struct vcpu *v)
|
|
+{
|
|
+ int old, new, prev = v->mem_event_pause_count.counter;
|
|
+
|
|
+ /* All unpause requests as a result of toolstack responses. Prevent
|
|
+ * underflow of the vcpu pause count. */
|
|
+ do
|
|
+ {
|
|
+ old = prev;
|
|
+ new = old - 1;
|
|
+
|
|
+ if ( new < 0 )
|
|
+ {
|
|
+ printk(XENLOG_G_WARNING
|
|
+ "d%d:v%d mem_event: Too many unpause attempts\n",
|
|
+ v->domain->domain_id, v->vcpu_id);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ prev = cmpxchg(&v->mem_event_pause_count.counter, old, new);
|
|
+ } while ( prev != old );
|
|
+
|
|
+ vcpu_unpause(v);
|
|
+}
|
|
|
|
/*
|
|
* Local variables:
|
|
--- a/xen/arch/x86/mm/mem_sharing.c
|
|
+++ b/xen/arch/x86/mm/mem_sharing.c
|
|
@@ -568,7 +568,7 @@ int mem_sharing_notify_enomem(struct dom
|
|
if ( v->domain == d )
|
|
{
|
|
req.flags = MEM_EVENT_FLAG_VCPU_PAUSED;
|
|
- vcpu_pause_nosync(v);
|
|
+ mem_event_vcpu_pause(v);
|
|
}
|
|
|
|
req.p2mt = p2m_ram_shared;
|
|
@@ -609,7 +609,7 @@ int mem_sharing_sharing_resume(struct do
|
|
|
|
/* Unpause domain/vcpu */
|
|
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
|
|
- vcpu_unpause(v);
|
|
+ mem_event_vcpu_unpause(v);
|
|
}
|
|
|
|
return 0;
|
|
--- a/xen/arch/x86/mm/p2m.c
|
|
+++ b/xen/arch/x86/mm/p2m.c
|
|
@@ -1094,7 +1094,7 @@ void p2m_mem_paging_populate(struct doma
|
|
/* Pause domain if request came from guest and gfn has paging type */
|
|
if ( p2m_is_paging(p2mt) && v->domain == d )
|
|
{
|
|
- vcpu_pause_nosync(v);
|
|
+ mem_event_vcpu_pause(v);
|
|
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
|
|
}
|
|
/* No need to inform pager if the gfn is not in the page-out path */
|
|
@@ -1257,7 +1257,7 @@ void p2m_mem_paging_resume(struct domain
|
|
}
|
|
/* Unpause domain */
|
|
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
|
|
- vcpu_unpause(v);
|
|
+ mem_event_vcpu_unpause(v);
|
|
}
|
|
}
|
|
|
|
@@ -1352,7 +1352,7 @@ bool_t p2m_mem_access_check(paddr_t gpa,
|
|
|
|
/* Pause the current VCPU */
|
|
if ( p2ma != p2m_access_n2rwx )
|
|
- vcpu_pause_nosync(v);
|
|
+ mem_event_vcpu_pause(v);
|
|
|
|
/* VCPU may be paused, return whether we promoted automatically */
|
|
return (p2ma == p2m_access_n2rwx);
|
|
@@ -1378,7 +1378,7 @@ void p2m_mem_access_resume(struct domain
|
|
|
|
/* Unpause domain */
|
|
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
|
|
- vcpu_unpause(v);
|
|
+ mem_event_vcpu_unpause(v);
|
|
}
|
|
}
|
|
|
|
--- a/xen/include/asm-x86/mem_event.h
|
|
+++ b/xen/include/asm-x86/mem_event.h
|
|
@@ -66,6 +66,9 @@ int do_mem_event_op(int op, uint32_t dom
|
|
int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
|
|
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
|
|
|
|
+void mem_event_vcpu_pause(struct vcpu *v);
|
|
+void mem_event_vcpu_unpause(struct vcpu *v);
|
|
+
|
|
#endif /* __MEM_EVENT_H__ */
|
|
|
|
|
|
--- a/xen/include/xen/sched.h
|
|
+++ b/xen/include/xen/sched.h
|
|
@@ -189,6 +189,9 @@ struct vcpu
|
|
unsigned long pause_flags;
|
|
atomic_t pause_count;
|
|
|
|
+ /* VCPU paused for mem_event replies. */
|
|
+ atomic_t mem_event_pause_count;
|
|
+
|
|
/* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */
|
|
evtchn_port_t virq_to_evtchn[NR_VIRQS];
|
|
spinlock_t virq_lock;
|