xen/5587d779-evtchn-defer-freeing-struct-evtchn-s-until-evtchn_destroy_final.patch
Charles Arnold 763b78040d - bnc#935634 - VUL-0: CVE-2015-3259: xen: XSA-137: xl command line
config handling stack overflow
  CVE-2015-3259-xsa137.patch
- Upstream patches from Jan
  558bfaa0-x86-traps-avoid-using-current-too-early.patch
  5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch
  559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch
  559bdde5-pull-in-latest-linux-earlycpio.patch
- Upstream patches from Jan pending review
  552d0fd2-x86-hvm-don-t-include-asm-spinlock-h.patch
  552d0fe8-x86-mtrr-include-asm-atomic.h.patch
  552d293b-x86-vMSI-X-honor-all-mask-requests.patch
  552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch
  554c7aee-x86-provide-arch_fetch_and_add.patch
  554c7b00-arm-provide-arch_fetch_and_add.patch
  55534b0a-x86-provide-add_sized.patch
  55534b25-arm-provide-add_sized.patch
  5555a4f8-use-ticket-locks-for-spin-locks.patch
  5555a5b9-x86-arm-remove-asm-spinlock-h.patch
  5555a8ec-introduce-non-contiguous-allocation.patch
  55795a52-x86-vMSI-X-support-qword-MMIO-access.patch
  557eb55f-gnttab-per-active-entry-locking.patch
  557eb5b6-gnttab-introduce-maptrack-lock.patch
  557eb620-gnttab-make-the-grant-table-lock-a-read-write-lock.patch
  557ffab8-evtchn-factor-out-freeing-an-event-channel.patch
  5582bf43-evtchn-simplify-port_is_valid.patch
  5582bf81-evtchn-remove-the-locking-when-unmasking-an-event-channel.patch
  5583d9c5-x86-MSI-X-cleanup.patch
  5583da09-x86-MSI-track-host-and-guest-masking-separately.patch
  5583da64-gnttab-use-per-VCPU-maptrack-free-lists.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=369
2015-07-10 15:21:29 +00:00

111 lines
3.1 KiB
Diff

# Commit a753f0e53ff973a8a066e86c1cb3d6dd5c68d59f
# Date 2015-06-22 11:38:01 +0200
# Author David Vrabel <david.vrabel@citrix.com>
# Committer Jan Beulich <jbeulich@suse.com>
evtchn: defer freeing struct evtchn's until evtchn_destroy_final()
notify_via_xen_event_channel() and free_xen_event_channel() had to
check if the domain was dying because they may be called while the
domain is being destroyed and the struct evtchn's are being freed.
By deferring the freeing of the struct evtchn's until all references
to the domain are dropped, these functions can rely on the channel
state being present and valid.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
--- sle12sp1.orig/xen/common/event_channel.c 2015-07-08 13:57:44.000000000 +0200
+++ sle12sp1/xen/common/event_channel.c 2015-07-08 14:00:53.000000000 +0200
@@ -1177,22 +1177,8 @@ int alloc_unbound_xen_event_channel(
void free_xen_event_channel(
struct vcpu *local_vcpu, int port)
{
- struct evtchn *chn;
struct domain *d = local_vcpu->domain;
-
- spin_lock(&d->event_lock);
-
- if ( unlikely(d->is_dying) )
- {
- spin_unlock(&d->event_lock);
- return;
- }
-
BUG_ON(!port_is_valid(d, port));
- chn = evtchn_from_port(d, port);
- BUG_ON(!consumer_is_xen(chn));
-
- spin_unlock(&d->event_lock);
evtchn_close(d, port, 0);
}
@@ -1206,18 +1192,12 @@ void notify_via_xen_event_channel(struct
spin_lock(&ld->event_lock);
- if ( unlikely(ld->is_dying) )
- {
- spin_unlock(&ld->event_lock);
- return;
- }
-
ASSERT(port_is_valid(ld, lport));
lchn = evtchn_from_port(ld, lport);
- ASSERT(consumer_is_xen(lchn));
if ( likely(lchn->state == ECS_INTERDOMAIN) )
{
+ ASSERT(consumer_is_xen(lchn));
rd = lchn->u.interdomain.remote_dom;
rport = lchn->u.interdomain.remote_port;
rchn = evtchn_from_port(rd, rport);
@@ -1285,7 +1265,7 @@ int evtchn_init(struct domain *d)
void evtchn_destroy(struct domain *d)
{
- unsigned int i, j;
+ unsigned int i;
/* After this barrier no new event-channel allocations can occur. */
BUG_ON(!d->is_dying);
@@ -1295,8 +1275,17 @@ void evtchn_destroy(struct domain *d)
for ( i = 0; port_is_valid(d, i); i++ )
evtchn_close(d, i, 0);
+ clear_global_virq_handlers(d);
+
+ evtchn_fifo_destroy(d);
+}
+
+
+void evtchn_destroy_final(struct domain *d)
+{
+ unsigned int i, j;
+
/* Free all event-channel buckets. */
- spin_lock(&d->event_lock);
for ( i = 0; i < NR_EVTCHN_GROUPS; i++ )
{
if ( !d->evtchn_group[i] )
@@ -1304,20 +1293,9 @@ void evtchn_destroy(struct domain *d)
for ( j = 0; j < BUCKETS_PER_GROUP; j++ )
free_evtchn_bucket(d, d->evtchn_group[i][j]);
xfree(d->evtchn_group[i]);
- d->evtchn_group[i] = NULL;
}
free_evtchn_bucket(d, d->evtchn);
- d->evtchn = NULL;
- spin_unlock(&d->event_lock);
- clear_global_virq_handlers(d);
-
- evtchn_fifo_destroy(d);
-}
-
-
-void evtchn_destroy_final(struct domain *d)
-{
#if MAX_VIRT_CPUS > BITS_PER_LONG
xfree(d->poll_mask);
d->poll_mask = NULL;