76 lines
3.0 KiB
Diff
76 lines
3.0 KiB
Diff
|
# Commit 67348c3ac700b8bc9147638c719c3035c5ef20f5
|
||
|
# Date 2013-11-12 10:54:28 +0100
|
||
|
# Author Dario Faggioli <dario.faggioli@citrix.com>
|
||
|
# Committer Jan Beulich <jbeulich@suse.com>
|
||
|
numa-sched: leave node-affinity alone if not in "auto" mode
|
||
|
|
||
|
If the domain's NUMA node-affinity is being specified by the
|
||
|
user/toolstack (instead of being automatically computed by Xen),
|
||
|
we really should stick to that. This means domain_update_node_affinity()
|
||
|
is wrong when it filters out some stuff from there even in "!auto"
|
||
|
mode.
|
||
|
|
||
|
This commit fixes that. Of course, this does not mean node-affinity
|
||
|
is always honoured (e.g., a vcpu won't run on a pcpu of a different
|
||
|
cpupool) but the necessary logic for taking into account all the
|
||
|
possible situations lives in the scheduler code, where it belongs.
|
||
|
|
||
|
What could happen without this change is that, under certain
|
||
|
circumstances, the node-affinity of a domain may change when the
|
||
|
user modifies the vcpu-affinity of the domain's vcpus. This, even
|
||
|
if probably not a real bug, is at least something the user does
|
||
|
not expect, so let's avoid it.
|
||
|
|
||
|
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
|
||
|
Reviewed-by: George Dunlap <george.dunlap@eu.citrix.com>
|
||
|
Acked-by: Keir Fraser <keir@xen.org>
|
||
|
|
||
|
--- a/xen/common/domain.c
|
||
|
+++ b/xen/common/domain.c
|
||
|
@@ -345,7 +345,6 @@ void domain_update_node_affinity(struct
|
||
|
cpumask_var_t cpumask;
|
||
|
cpumask_var_t online_affinity;
|
||
|
const cpumask_t *online;
|
||
|
- nodemask_t nodemask = NODE_MASK_NONE;
|
||
|
struct vcpu *v;
|
||
|
unsigned int node;
|
||
|
|
||
|
@@ -367,28 +366,19 @@ void domain_update_node_affinity(struct
|
||
|
cpumask_or(cpumask, cpumask, online_affinity);
|
||
|
}
|
||
|
|
||
|
+ /*
|
||
|
+ * If d->auto_node_affinity is true, the domain's node-affinity mask
|
||
|
+ * (d->node_affinity) is automaically computed from all the domain's
|
||
|
+ * vcpus' vcpu-affinity masks (the union of which we have just built
|
||
|
+ * above in cpumask). OTOH, if d->auto_node_affinity is false, we
|
||
|
+ * must leave the node-affinity of the domain alone.
|
||
|
+ */
|
||
|
if ( d->auto_node_affinity )
|
||
|
{
|
||
|
- /* Node-affinity is automaically computed from all vcpu-affinities */
|
||
|
+ nodes_clear(d->node_affinity);
|
||
|
for_each_online_node ( node )
|
||
|
if ( cpumask_intersects(&node_to_cpumask(node), cpumask) )
|
||
|
- node_set(node, nodemask);
|
||
|
-
|
||
|
- d->node_affinity = nodemask;
|
||
|
- }
|
||
|
- else
|
||
|
- {
|
||
|
- /* Node-affinity is provided by someone else, just filter out cpus
|
||
|
- * that are either offline or not in the affinity of any vcpus. */
|
||
|
- nodemask = d->node_affinity;
|
||
|
- for_each_node_mask ( node, d->node_affinity )
|
||
|
- if ( !cpumask_intersects(&node_to_cpumask(node), cpumask) )
|
||
|
- node_clear(node, nodemask);//d->node_affinity);
|
||
|
-
|
||
|
- /* Avoid loosing track of node-affinity because of a bad
|
||
|
- * vcpu-affinity has been specified. */
|
||
|
- if ( !nodes_empty(nodemask) )
|
||
|
- d->node_affinity = nodemask;
|
||
|
+ node_set(node, d->node_affinity);
|
||
|
}
|
||
|
|
||
|
sched_set_node_affinity(d, &d->node_affinity);
|