SHA256
1
0
forked from pool/xen
xen/5583da8c-gnttab-steal-maptrack-entries-from-other-VCPUs.patch
Charles Arnold 8724a18868 - bnc#935634 - VUL-0: CVE-2015-3259: xen: XSA-137: xl command line
config handling stack overflow
  55a62eb0-xl-correct-handling-of-extra_config-in-main_cpupoolcreate.patch

- bsc#907514 - Bus fatal error & sles12 sudden reboot has been
  observed
- bsc#910258 - SLES12 Xen host crashes with FATAL NMI after
  shutdown of guest with VT-d NIC
- bsc#918984 - Bus fatal error & sles11-SP4 sudden reboot has been
  observed
- bsc#923967 - Partner-L3: Bus fatal error & sles11-SP3 sudden
  reboot has been observed
  552d293b-x86-vMSI-X-honor-all-mask-requests.patch
  552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch
  5576f143-x86-adjust-PV-I-O-emulation-functions-types.patch
  55795a52-x86-vMSI-X-support-qword-MMIO-access.patch
  5583d9c5-x86-MSI-X-cleanup.patch
  5583da09-x86-MSI-track-host-and-guest-masking-separately.patch
  55b0a218-x86-PCI-CFG-write-intercept.patch
  55b0a255-x86-MSI-X-maskall.patch
  55b0a283-x86-MSI-X-teardown.patch
  55b0a2ab-x86-MSI-X-enable.patch
  55b0a2db-x86-MSI-track-guest-masking.patch
- Upstream patches from Jan 
  552d0f49-x86-traps-identify-the-vcpu-in-context-when-dumping-regs.patch
  559bc633-x86-cpupool-clear-proper-cpu_valid-bit-on-CPU-teardown.patch
  559bc64e-credit1-properly-deal-with-CPUs-not-in-any-pool.patch
  559bc87f-x86-hvmloader-avoid-data-corruption-with-xenstore-rw.patch
  55a66a1e-make-rangeset_report_ranges-report-all-ranges.patch
  55a77e4f-dmar-device-scope-mem-leak-fix.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=373
2015-08-26 22:28:15 +00:00

154 lines
4.6 KiB
Diff

# Commit e76ff6c156906b515c2a4300a81c95886ece5d5f
# Date 2015-06-19 11:02:04 +0200
# Author David Vrabel <david.vrabel@citrix.com>
# Committer Jan Beulich <jbeulich@suse.com>
gnttab: steal maptrack entries from other VCPUs
If a guest is not evenly grant mapping across its VCPUs one of the
VCPUs may run out of free maptrack entries even though other VCPUs
have many free.
If this happens, "steal" free entries from other VCPUs. We want to
steal entries such that:
a) We avoid ping-ponging stolen entries between VCPUs.
b) The number of free entries owned by each VCPUs tends (over time) to
the number it uses.
So when stealing, we select a VCPU at random (reducing (a)) and we
transfer the stolen entries to the thief VCPU (aiming for (b)).
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -283,26 +283,70 @@ __get_maptrack_handle(
struct grant_table *t,
struct vcpu *v)
{
- unsigned int head, next;
+ unsigned int head, next, prev_head;
- /* No maptrack pages allocated for this VCPU yet? */
- head = v->maptrack_head;
- if ( unlikely(head == MAPTRACK_TAIL) )
- return -1;
-
- /*
- * Always keep one entry in the free list to make it easier to add
- * free entries to the tail.
- */
- next = read_atomic(&maptrack_entry(t, head).ref);
- if ( unlikely(next == MAPTRACK_TAIL) )
- return -1;
+ do {
+ /* No maptrack pages allocated for this VCPU yet? */
+ head = read_atomic(&v->maptrack_head);
+ if ( unlikely(head == MAPTRACK_TAIL) )
+ return -1;
- v->maptrack_head = next;
+ /*
+ * Always keep one entry in the free list to make it easier to
+ * add free entries to the tail.
+ */
+ next = read_atomic(&maptrack_entry(t, head).ref);
+ if ( unlikely(next == MAPTRACK_TAIL) )
+ return -1;
+
+ prev_head = head;
+ head = cmpxchg(&v->maptrack_head, prev_head, next);
+ } while ( head != prev_head );
return head;
}
+/*
+ * Try to "steal" a free maptrack entry from another VCPU.
+ *
+ * A stolen entry is transferred to the thief, so the number of
+ * entries for each VCPU should tend to the usage pattern.
+ *
+ * To avoid having to atomically count the number of free entries on
+ * each VCPU and to avoid two VCPU repeatedly stealing entries from
+ * each other, the initial victim VCPU is selected randomly.
+ */
+static int steal_maptrack_handle(struct grant_table *t,
+ const struct vcpu *curr)
+{
+ const struct domain *currd = curr->domain;
+ unsigned int first, i;
+
+ /* Find an initial victim. */
+ first = i = get_random() % currd->max_vcpus;
+
+ do {
+ if ( currd->vcpu[i] )
+ {
+ int handle;
+
+ handle = __get_maptrack_handle(t, currd->vcpu[i]);
+ if ( handle != -1 )
+ {
+ maptrack_entry(t, handle).vcpu = curr->vcpu_id;
+ return handle;
+ }
+ }
+
+ i++;
+ if ( i == currd->max_vcpus )
+ i = 0;
+ } while ( i != first );
+
+ /* No free handles on any VCPU. */
+ return -1;
+}
+
static inline void
put_maptrack_handle(
struct grant_table *t, int handle)
@@ -342,10 +386,31 @@ get_maptrack_handle(
spin_lock(&lgt->maptrack_lock);
+ /*
+ * If we've run out of frames, try stealing an entry from another
+ * VCPU (in case the guest isn't mapping across its VCPUs evenly).
+ */
if ( nr_maptrack_frames(lgt) >= max_maptrack_frames )
{
+ /*
+ * Can drop the lock since no other VCPU can be adding a new
+ * frame once they've run out.
+ */
spin_unlock(&lgt->maptrack_lock);
- return -1;
+
+ /*
+ * Uninitialized free list? Steal an extra entry for the tail
+ * sentinel.
+ */
+ if ( curr->maptrack_tail == MAPTRACK_TAIL )
+ {
+ handle = steal_maptrack_handle(lgt, curr);
+ if ( handle == -1 )
+ return -1;
+ curr->maptrack_tail = handle;
+ write_atomic(&curr->maptrack_head, handle);
+ }
+ return steal_maptrack_handle(lgt, curr);
}
new_mt = alloc_xenheap_page();
@@ -373,7 +438,7 @@ get_maptrack_handle(
if ( curr->maptrack_tail == MAPTRACK_TAIL )
curr->maptrack_tail = handle + MAPTRACK_PER_PAGE - 1;
- curr->maptrack_head = handle + 1;
+ write_atomic(&curr->maptrack_head, handle + 1);
lgt->maptrack[nr_maptrack_frames(lgt)] = new_mt;
lgt->maptrack_limit += MAPTRACK_PER_PAGE;