151 lines
6.9 KiB
Diff
151 lines
6.9 KiB
Diff
# HG changeset patch
|
|
# User Keir Fraser <keir.fraser@citrix.com>
|
|
# Date 1226401587 0
|
|
# Node ID 76e90ac5067ef71f60b68ea0515f7f0466be5dca
|
|
# Parent beade55d67fc2c81adaaa552804e0b66dc25becb
|
|
xend: Restore CPU affinity on domain resume.
|
|
|
|
Move affinity-setting logic into its own function and call from
|
|
relevant places.
|
|
|
|
From: Jiri Denemark <jdenemar@redhat.com>
|
|
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
|
|
|
|
Index: xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
|
|
===================================================================
|
|
--- xen-3.3.1-testing.orig/tools/python/xen/xend/XendDomainInfo.py
|
|
+++ xen-3.3.1-testing/tools/python/xen/xend/XendDomainInfo.py
|
|
@@ -476,6 +476,7 @@ class XendDomainInfo:
|
|
if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
|
|
try:
|
|
self._constructDomain()
|
|
+ self._setCPUAffinity()
|
|
self._storeVmDetails()
|
|
self._createChannels()
|
|
self._createDevices()
|
|
@@ -2131,6 +2132,64 @@ class XendDomainInfo:
|
|
raise XendError(str(exn))
|
|
|
|
|
|
+ def _setCPUAffinity(self):
|
|
+ """ Repin domain vcpus if a restricted cpus list is provided
|
|
+ """
|
|
+
|
|
+ def has_cpus():
|
|
+ if self.info['cpus'] is not None:
|
|
+ for c in self.info['cpus']:
|
|
+ if c:
|
|
+ return True
|
|
+ return False
|
|
+
|
|
+ if has_cpus():
|
|
+ for v in range(0, self.info['VCPUs_max']):
|
|
+ if self.info['cpus'][v]:
|
|
+ xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
|
|
+ else:
|
|
+ def find_relaxed_node(node_list):
|
|
+ import sys
|
|
+ nr_nodes = info['nr_nodes']
|
|
+ if node_list is None:
|
|
+ node_list = range(0, nr_nodes)
|
|
+ nodeload = [0]
|
|
+ nodeload = nodeload * nr_nodes
|
|
+ from xen.xend import XendDomain
|
|
+ doms = XendDomain.instance().list('all')
|
|
+ for dom in filter (lambda d: d.domid != self.domid, doms):
|
|
+ cpuinfo = dom.getVCPUInfo()
|
|
+ for vcpu in sxp.children(cpuinfo, 'vcpu'):
|
|
+ if sxp.child_value(vcpu, 'online') == 0: continue
|
|
+ cpumap = list(sxp.child_value(vcpu,'cpumap'))
|
|
+ for i in range(0, nr_nodes):
|
|
+ node_cpumask = info['node_to_cpu'][i]
|
|
+ for j in node_cpumask:
|
|
+ if j in cpumap:
|
|
+ nodeload[i] += 1
|
|
+ break
|
|
+ for i in range(0, nr_nodes):
|
|
+ if len(info['node_to_cpu'][i]) > 0 and i in node_list:
|
|
+ nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
|
|
+ else:
|
|
+ nodeload[i] = sys.maxint
|
|
+ index = nodeload.index( min(nodeload) )
|
|
+ return index
|
|
+
|
|
+ info = xc.physinfo()
|
|
+ if info['nr_nodes'] > 1:
|
|
+ node_memory_list = info['node_to_memory']
|
|
+ needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
|
|
+ candidate_node_list = []
|
|
+ for i in range(0, info['nr_nodes']):
|
|
+ if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
|
|
+ candidate_node_list.append(i)
|
|
+ index = find_relaxed_node(candidate_node_list)
|
|
+ cpumask = info['node_to_cpu'][index]
|
|
+ for v in range(0, self.info['VCPUs_max']):
|
|
+ xc.vcpu_setaffinity(self.domid, v, cpumask)
|
|
+
|
|
+
|
|
def _initDomain(self):
|
|
log.debug('XendDomainInfo.initDomain: %s %s',
|
|
self.domid,
|
|
@@ -2150,58 +2209,7 @@ class XendDomainInfo:
|
|
# repin domain vcpus if a restricted cpus list is provided
|
|
# this is done prior to memory allocation to aide in memory
|
|
# distribution for NUMA systems.
|
|
- def has_cpus():
|
|
- if self.info['cpus'] is not None:
|
|
- for c in self.info['cpus']:
|
|
- if c:
|
|
- return True
|
|
- return False
|
|
-
|
|
- if has_cpus():
|
|
- for v in range(0, self.info['VCPUs_max']):
|
|
- if self.info['cpus'][v]:
|
|
- xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
|
|
- else:
|
|
- def find_relaxed_node(node_list):
|
|
- import sys
|
|
- nr_nodes = info['nr_nodes']
|
|
- if node_list is None:
|
|
- node_list = range(0, nr_nodes)
|
|
- nodeload = [0]
|
|
- nodeload = nodeload * nr_nodes
|
|
- from xen.xend import XendDomain
|
|
- doms = XendDomain.instance().list('all')
|
|
- for dom in filter (lambda d: d.domid != self.domid, doms):
|
|
- cpuinfo = dom.getVCPUInfo()
|
|
- for vcpu in sxp.children(cpuinfo, 'vcpu'):
|
|
- if sxp.child_value(vcpu, 'online') == 0: continue
|
|
- cpumap = list(sxp.child_value(vcpu,'cpumap'))
|
|
- for i in range(0, nr_nodes):
|
|
- node_cpumask = info['node_to_cpu'][i]
|
|
- for j in node_cpumask:
|
|
- if j in cpumap:
|
|
- nodeload[i] += 1
|
|
- break
|
|
- for i in range(0, nr_nodes):
|
|
- if len(info['node_to_cpu'][i]) > 0 and i in node_list:
|
|
- nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
|
|
- else:
|
|
- nodeload[i] = sys.maxint
|
|
- index = nodeload.index( min(nodeload) )
|
|
- return index
|
|
-
|
|
- info = xc.physinfo()
|
|
- if info['nr_nodes'] > 1:
|
|
- node_memory_list = info['node_to_memory']
|
|
- needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
|
|
- candidate_node_list = []
|
|
- for i in range(0, info['nr_nodes']):
|
|
- if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
|
|
- candidate_node_list.append(i)
|
|
- index = find_relaxed_node(candidate_node_list)
|
|
- cpumask = info['node_to_cpu'][index]
|
|
- for v in range(0, self.info['VCPUs_max']):
|
|
- xc.vcpu_setaffinity(self.domid, v, cpumask)
|
|
+ self._setCPUAffinity()
|
|
|
|
# Use architecture- and image-specific calculations to determine
|
|
# the various headrooms necessary, given the raw configured
|