a11c33863f
5281fad4-numa-sched-leave-node-affinity-alone-if-not-in-auto-mode.patch 52820823-nested-SVM-adjust-guest-handling-of-structure-mappings.patch 52820863-VMX-don-t-crash-processing-d-debug-key.patch 5282492f-x86-eliminate-has_arch_mmios.patch 52864df2-credit-Update-other-parameters-when-setting-tslice_ms.patch 52864f30-fix-leaking-of-v-cpu_affinity_saved-on-domain-destruction.patch 5289d225-nested-VMX-don-t-ignore-mapping-errors.patch 528a0eb0-x86-consider-modules-when-cutting-off-memory.patch 528f606c-x86-hvm-reset-TSC-to-0-after-domain-resume-from-S3.patch 528f609c-x86-crash-disable-the-watchdog-NMIs-on-the-crashing-cpu.patch 52932418-x86-xsave-fix-nonlazy-state-handling.patch - Add missing requires to pciutils package for xend-tools - bnc#851749 - Xen service file does not call xend properly xend.service - bnc#851386 - VUL-0: xen: XSA-78: Insufficient TLB flushing in VT-d (iommu) code 528a0e5b-TLB-flushing-in-dma_pte_clear_one.patch - bnc#849667 - VUL-0: xen: XSA-74: Lock order reversal between page_alloc_lock and mm_rwlock CVE-2013-4553-xsa74.patch - bnc#849665 - VUL-0: CVE-2013-4551: xen: XSA-75: Host crash due to guest VMX instruction execution 52809208-nested-VMX-VMLANUCH-VMRESUME-emulation-must-check-permission-1st.patch - bnc#849668 - VUL-0: xen: XSA-76: Hypercalls exposed to privilege rings 1 and 2 of HVM guests OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=279
76 lines
3.0 KiB
Diff
76 lines
3.0 KiB
Diff
# Commit 67348c3ac700b8bc9147638c719c3035c5ef20f5
|
|
# Date 2013-11-12 10:54:28 +0100
|
|
# Author Dario Faggioli <dario.faggioli@citrix.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
numa-sched: leave node-affinity alone if not in "auto" mode
|
|
|
|
If the domain's NUMA node-affinity is being specified by the
|
|
user/toolstack (instead of being automatically computed by Xen),
|
|
we really should stick to that. This means domain_update_node_affinity()
|
|
is wrong when it filters out some stuff from there even in "!auto"
|
|
mode.
|
|
|
|
This commit fixes that. Of course, this does not mean node-affinity
|
|
is always honoured (e.g., a vcpu won't run on a pcpu of a different
|
|
cpupool) but the necessary logic for taking into account all the
|
|
possible situations lives in the scheduler code, where it belongs.
|
|
|
|
What could happen without this change is that, under certain
|
|
circumstances, the node-affinity of a domain may change when the
|
|
user modifies the vcpu-affinity of the domain's vcpus. This, even
|
|
if probably not a real bug, is at least something the user does
|
|
not expect, so let's avoid it.
|
|
|
|
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
|
|
Reviewed-by: George Dunlap <george.dunlap@eu.citrix.com>
|
|
Acked-by: Keir Fraser <keir@xen.org>
|
|
|
|
--- a/xen/common/domain.c
|
|
+++ b/xen/common/domain.c
|
|
@@ -345,7 +345,6 @@ void domain_update_node_affinity(struct
|
|
cpumask_var_t cpumask;
|
|
cpumask_var_t online_affinity;
|
|
const cpumask_t *online;
|
|
- nodemask_t nodemask = NODE_MASK_NONE;
|
|
struct vcpu *v;
|
|
unsigned int node;
|
|
|
|
@@ -367,28 +366,19 @@ void domain_update_node_affinity(struct
|
|
cpumask_or(cpumask, cpumask, online_affinity);
|
|
}
|
|
|
|
+ /*
|
|
+ * If d->auto_node_affinity is true, the domain's node-affinity mask
|
|
+ * (d->node_affinity) is automaically computed from all the domain's
|
|
+ * vcpus' vcpu-affinity masks (the union of which we have just built
|
|
+ * above in cpumask). OTOH, if d->auto_node_affinity is false, we
|
|
+ * must leave the node-affinity of the domain alone.
|
|
+ */
|
|
if ( d->auto_node_affinity )
|
|
{
|
|
- /* Node-affinity is automaically computed from all vcpu-affinities */
|
|
+ nodes_clear(d->node_affinity);
|
|
for_each_online_node ( node )
|
|
if ( cpumask_intersects(&node_to_cpumask(node), cpumask) )
|
|
- node_set(node, nodemask);
|
|
-
|
|
- d->node_affinity = nodemask;
|
|
- }
|
|
- else
|
|
- {
|
|
- /* Node-affinity is provided by someone else, just filter out cpus
|
|
- * that are either offline or not in the affinity of any vcpus. */
|
|
- nodemask = d->node_affinity;
|
|
- for_each_node_mask ( node, d->node_affinity )
|
|
- if ( !cpumask_intersects(&node_to_cpumask(node), cpumask) )
|
|
- node_clear(node, nodemask);//d->node_affinity);
|
|
-
|
|
- /* Avoid loosing track of node-affinity because of a bad
|
|
- * vcpu-affinity has been specified. */
|
|
- if ( !nodes_empty(nodemask) )
|
|
- d->node_affinity = nodemask;
|
|
+ node_set(node, d->node_affinity);
|
|
}
|
|
|
|
sched_set_node_affinity(d, &d->node_affinity);
|