393ad2e586
21866-xenapi.patch - bnc#625003 - Fix vm config options coredump-{restart,destroy} Added hunk to xm-create-xflag.patch - bnc#605186 - Squelch harmless error messages in block-iscsi - bnc#623438 - Add ability to control SCSI device path scanning in xend 21847-pscsi.patch 21723-get-domu-state.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=68
2532 lines
91 KiB
Diff
2532 lines
91 KiB
Diff
Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c
|
|
+++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
|
|
@@ -106,8 +106,8 @@ static PyObject *pyxc_domain_create(XcOb
|
|
|
|
static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", NULL };
|
|
|
|
- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list,
|
|
- &dom, &ssidref, &pyhandle, &flags, &target))
|
|
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list, &dom,
|
|
+ &ssidref, &pyhandle, &flags, &target))
|
|
return NULL;
|
|
if ( pyhandle != NULL )
|
|
{
|
|
@@ -329,7 +329,7 @@ static PyObject *pyxc_domain_getinfo(XcO
|
|
{
|
|
info_dict = Py_BuildValue(
|
|
"{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
|
|
- ",s:L,s:L,s:L,s:i,s:i}",
|
|
+ ",s:L,s:L,s:L,s:i,s:i,s:i}",
|
|
"domid", (int)info[i].domid,
|
|
"online_vcpus", info[i].nr_online_vcpus,
|
|
"max_vcpu_id", info[i].max_vcpu_id,
|
|
@@ -344,7 +344,8 @@ static PyObject *pyxc_domain_getinfo(XcO
|
|
"cpu_time", (long long)info[i].cpu_time,
|
|
"maxmem_kb", (long long)info[i].max_memkb,
|
|
"ssidref", (int)info[i].ssidref,
|
|
- "shutdown_reason", info[i].shutdown_reason);
|
|
+ "shutdown_reason", info[i].shutdown_reason,
|
|
+ "cpupool", (int)info[i].cpupool);
|
|
pyhandle = PyList_New(sizeof(xen_domain_handle_t));
|
|
if ( (pyhandle == NULL) || (info_dict == NULL) )
|
|
{
|
|
@@ -1751,6 +1752,179 @@ static PyObject *pyxc_dom_set_memshr(XcO
|
|
return zero;
|
|
}
|
|
|
|
+static PyObject *cpumap_to_cpulist(uint64_t cpumap)
|
|
+{
|
|
+ PyObject *cpulist = NULL;
|
|
+ uint32_t i;
|
|
+
|
|
+ cpulist = PyList_New(0);
|
|
+ for ( i = 0; cpumap != 0; i++ )
|
|
+ {
|
|
+ if ( cpumap & 1 )
|
|
+ {
|
|
+ PyObject* pyint = PyInt_FromLong(i);
|
|
+
|
|
+ PyList_Append(cpulist, pyint);
|
|
+ Py_DECREF(pyint);
|
|
+ }
|
|
+ cpumap >>= 1;
|
|
+ }
|
|
+ return cpulist;
|
|
+}
|
|
+
|
|
+static PyObject *pyxc_cpupool_create(XcObject *self,
|
|
+ PyObject *args,
|
|
+ PyObject *kwds)
|
|
+{
|
|
+ uint32_t cpupool = 0, sched = XEN_SCHEDULER_CREDIT;
|
|
+
|
|
+ static char *kwd_list[] = { "pool", "sched", NULL };
|
|
+
|
|
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list, &cpupool,
|
|
+ &sched))
|
|
+ return NULL;
|
|
+
|
|
+ if ( xc_cpupool_create(self->xc_handle, &cpupool, sched) < 0 )
|
|
+ return pyxc_error_to_exception();
|
|
+
|
|
+ return PyInt_FromLong(cpupool);
|
|
+}
|
|
+
|
|
+static PyObject *pyxc_cpupool_destroy(XcObject *self,
|
|
+ PyObject *args)
|
|
+{
|
|
+ uint32_t cpupool;
|
|
+
|
|
+ if (!PyArg_ParseTuple(args, "i", &cpupool))
|
|
+ return NULL;
|
|
+
|
|
+ if (xc_cpupool_destroy(self->xc_handle, cpupool) != 0)
|
|
+ return pyxc_error_to_exception();
|
|
+
|
|
+ Py_INCREF(zero);
|
|
+ return zero;
|
|
+}
|
|
+
|
|
+static PyObject *pyxc_cpupool_getinfo(XcObject *self,
|
|
+ PyObject *args,
|
|
+ PyObject *kwds)
|
|
+{
|
|
+ PyObject *list, *info_dict;
|
|
+
|
|
+ uint32_t first_pool = 0;
|
|
+ int max_pools = 1024, nr_pools, i;
|
|
+ xc_cpupoolinfo_t *info;
|
|
+
|
|
+ static char *kwd_list[] = { "first_pool", "max_pools", NULL };
|
|
+
|
|
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list,
|
|
+ &first_pool, &max_pools) )
|
|
+ return NULL;
|
|
+
|
|
+ info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
|
|
+ if (info == NULL)
|
|
+ return PyErr_NoMemory();
|
|
+
|
|
+ nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info);
|
|
+
|
|
+ if (nr_pools < 0)
|
|
+ {
|
|
+ free(info);
|
|
+ return pyxc_error_to_exception();
|
|
+ }
|
|
+
|
|
+ list = PyList_New(nr_pools);
|
|
+ for ( i = 0 ; i < nr_pools; i++ )
|
|
+ {
|
|
+ info_dict = Py_BuildValue(
|
|
+ "{s:i,s:i,s:i,s:N}",
|
|
+ "cpupool", (int)info[i].cpupool_id,
|
|
+ "sched", info[i].sched_id,
|
|
+ "n_dom", info[i].n_dom,
|
|
+ "cpulist", cpumap_to_cpulist(info[i].cpumap));
|
|
+ if ( info_dict == NULL )
|
|
+ {
|
|
+ Py_DECREF(list);
|
|
+ if ( info_dict != NULL ) { Py_DECREF(info_dict); }
|
|
+ free(info);
|
|
+ return NULL;
|
|
+ }
|
|
+ PyList_SetItem(list, i, info_dict);
|
|
+ }
|
|
+
|
|
+ free(info);
|
|
+
|
|
+ return list;
|
|
+}
|
|
+
|
|
+static PyObject *pyxc_cpupool_addcpu(XcObject *self,
|
|
+ PyObject *args,
|
|
+ PyObject *kwds)
|
|
+{
|
|
+ uint32_t cpupool;
|
|
+ int cpu = -1;
|
|
+
|
|
+ static char *kwd_list[] = { "cpupool", "cpu", NULL };
|
|
+
|
|
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list,
|
|
+ &cpupool, &cpu) )
|
|
+ return NULL;
|
|
+
|
|
+ if (xc_cpupool_addcpu(self->xc_handle, cpupool, cpu) != 0)
|
|
+ return pyxc_error_to_exception();
|
|
+
|
|
+ Py_INCREF(zero);
|
|
+ return zero;
|
|
+}
|
|
+
|
|
+static PyObject *pyxc_cpupool_removecpu(XcObject *self,
|
|
+ PyObject *args,
|
|
+ PyObject *kwds)
|
|
+{
|
|
+ uint32_t cpupool;
|
|
+ int cpu = -1;
|
|
+
|
|
+ static char *kwd_list[] = { "cpupool", "cpu", NULL };
|
|
+
|
|
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list,
|
|
+ &cpupool, &cpu) )
|
|
+ return NULL;
|
|
+
|
|
+ if (xc_cpupool_removecpu(self->xc_handle, cpupool, cpu) != 0)
|
|
+ return pyxc_error_to_exception();
|
|
+
|
|
+ Py_INCREF(zero);
|
|
+ return zero;
|
|
+}
|
|
+
|
|
+static PyObject *pyxc_cpupool_movedomain(XcObject *self,
|
|
+ PyObject *args,
|
|
+ PyObject *kwds)
|
|
+{
|
|
+ uint32_t cpupool, domid;
|
|
+
|
|
+ static char *kwd_list[] = { "cpupool", "domid", NULL };
|
|
+
|
|
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list,
|
|
+ &cpupool, &domid) )
|
|
+ return NULL;
|
|
+
|
|
+ if (xc_cpupool_movedomain(self->xc_handle, cpupool, domid) != 0)
|
|
+ return pyxc_error_to_exception();
|
|
+
|
|
+ Py_INCREF(zero);
|
|
+ return zero;
|
|
+}
|
|
+
|
|
+static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
|
|
+{
|
|
+ uint64_t cpumap;
|
|
+
|
|
+ if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
|
|
+ return pyxc_error_to_exception();
|
|
+
|
|
+ return cpumap_to_cpulist(cpumap);
|
|
+}
|
|
|
|
static PyMethodDef pyxc_methods[] = {
|
|
{ "handle",
|
|
@@ -1866,7 +2040,8 @@ static PyMethodDef pyxc_methods[] = {
|
|
" maxmem_kb [int]: Maximum memory limit, in kilobytes\n"
|
|
" cpu_time [long]: CPU time consumed, in nanoseconds\n"
|
|
" shutdown_reason [int]: Numeric code from guest OS, explaining "
|
|
- "reason why it shut itself down.\n" },
|
|
+ "reason why it shut itself down.\n"
|
|
+ " cpupool [int] Id of cpupool domain is bound to.\n" },
|
|
|
|
{ "vcpu_getinfo",
|
|
(PyCFunction)pyxc_vcpu_getinfo,
|
|
@@ -2264,6 +2439,66 @@ static PyMethodDef pyxc_methods[] = {
|
|
" enable [int,0|1]: Disable or enable?\n"
|
|
"Returns: [int] 0 on success; -1 on error.\n" },
|
|
|
|
+ { "cpupool_create",
|
|
+ (PyCFunction)pyxc_cpupool_create,
|
|
+ METH_VARARGS | METH_KEYWORDS, "\n"
|
|
+ "Create new cpupool.\n"
|
|
+ " pool [int, 0]: cpupool identifier to use (allocated if zero).\n"
|
|
+ " sched [int]: scheduler to use (credit if unspecified).\n\n"
|
|
+ "Returns: [int] new cpupool identifier; -1 on error.\n" },
|
|
+
|
|
+ { "cpupool_destroy",
|
|
+ (PyCFunction)pyxc_cpupool_destroy,
|
|
+ METH_VARARGS, "\n"
|
|
+ "Destroy a cpupool.\n"
|
|
+ " pool [int]: Identifier of cpupool to be destroyed.\n\n"
|
|
+ "Returns: [int] 0 on success; -1 on error.\n" },
|
|
+
|
|
+ { "cpupool_getinfo",
|
|
+ (PyCFunction)pyxc_cpupool_getinfo,
|
|
+ METH_VARARGS | METH_KEYWORDS, "\n"
|
|
+ "Get information regarding a set of cpupools, in increasing id order.\n"
|
|
+ " first_pool [int, 0]: First cpupool to retrieve info about.\n"
|
|
+ " max_pools [int, 1024]: Maximum number of cpupools to retrieve info"
|
|
+ " about.\n\n"
|
|
+ "Returns: [list of dicts] if list length is less than 'max_pools'\n"
|
|
+ " parameter then there was an error, or the end of the\n"
|
|
+ " cpupool-id space was reached.\n"
|
|
+ " pool [int]: Identifier of cpupool to which this info pertains\n"
|
|
+ " sched [int]: Scheduler used for this cpupool\n"
|
|
+ " n_dom [int]: Number of Domains in this cpupool\n"
|
|
+ " cpulist [list]: List of CPUs this cpupool is using\n" },
|
|
+
|
|
+ { "cpupool_addcpu",
|
|
+ (PyCFunction)pyxc_cpupool_addcpu,
|
|
+ METH_VARARGS | METH_KEYWORDS, "\n"
|
|
+ "Add a cpu to a cpupool.\n"
|
|
+ " pool [int]: Identifier of cpupool.\n"
|
|
+ " cpu [int, -1]: Cpu to add (lowest free if -1)\n\n"
|
|
+ "Returns: [int] 0 on success; -1 on error.\n" },
|
|
+
|
|
+ { "cpupool_removecpu",
|
|
+ (PyCFunction)pyxc_cpupool_removecpu,
|
|
+ METH_VARARGS | METH_KEYWORDS, "\n"
|
|
+ "Remove a cpu from a cpupool.\n"
|
|
+ " pool [int]: Identifier of cpupool.\n"
|
|
+ " cpu [int, -1]: Cpu to remove (highest used if -1)\n\n"
|
|
+ "Returns: [int] 0 on success; -1 on error.\n" },
|
|
+
|
|
+ { "cpupool_movedomain",
|
|
+ (PyCFunction)pyxc_cpupool_movedomain,
|
|
+ METH_VARARGS | METH_KEYWORDS, "\n"
|
|
+ "Move a domain to another cpupool.\n"
|
|
+ " pool [int]: Identifier of cpupool to move domain to.\n"
|
|
+ " dom [int]: Domain to move\n\n"
|
|
+ "Returns: [int] 0 on success; -1 on error.\n" },
|
|
+
|
|
+ { "cpupool_freeinfo",
|
|
+ (PyCFunction)pyxc_cpupool_freeinfo,
|
|
+ METH_NOARGS, "\n"
|
|
+ "Get info about cpus not in any cpupool.\n"
|
|
+ "Returns: [list]: List of CPUs\n" },
|
|
+
|
|
{ NULL, NULL, 0, NULL }
|
|
};
|
|
|
|
Index: xen-4.0.0-testing/tools/python/xen/util/sxputils.py
|
|
===================================================================
|
|
--- /dev/null
|
|
+++ xen-4.0.0-testing/tools/python/xen/util/sxputils.py
|
|
@@ -0,0 +1,64 @@
|
|
+#============================================================================
|
|
+# This library is free software; you can redistribute it and/or
|
|
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
|
|
+# License as published by the Free Software Foundation.
|
|
+#
|
|
+# This library is distributed in the hope that it will be useful,
|
|
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+# Lesser General Public License for more details.
|
|
+#
|
|
+# You should have received a copy of the GNU Lesser General Public
|
|
+# License along with this library; if not, write to the Free Software
|
|
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+#============================================================================
|
|
+# Copyright (c) 2009 Fujitsu Technology Solutions
|
|
+#============================================================================
|
|
+
|
|
+""" convert sxp to map / map to sxp.
|
|
+"""
|
|
+
|
|
+import types
|
|
+from xen.xend import sxp
|
|
+
|
|
+def map2sxp(map_val):
|
|
+ """ conversion of all key-value pairs of a map (recursively) to sxp.
|
|
+ @param map_val: map; if a value contains a list or dict it is also
|
|
+ converted to sxp
|
|
+ @type map_val: dict
|
|
+ @return sxp expr
|
|
+ @rtype: list
|
|
+ """
|
|
+ sxp_vals = []
|
|
+ for (k, v) in map_val.items():
|
|
+ if isinstance(v, types.DictionaryType):
|
|
+ sxp_vals += [[k] + map2sxp(v)]
|
|
+ elif isinstance(v, types.ListType):
|
|
+ sxp_vals += [[k] + v]
|
|
+ else:
|
|
+ sxp_vals += [[k, v]]
|
|
+ return sxp_vals
|
|
+
|
|
+def sxp2map( s ):
|
|
+ """ conversion of sxp to map.
|
|
+ @param s: sxp expr
|
|
+ @type s: list
|
|
+ @return: map
|
|
+ @rtype: dict
|
|
+ """
|
|
+ sxphash = {}
|
|
+
|
|
+ for child in sxp.children( s ):
|
|
+ if isinstance( child, types.ListType ) and len( child ) > 1:
|
|
+ if isinstance( child[1], types.ListType ) and len( child[1] ) > 1:
|
|
+ sxphash[ child[0] ] = sxp2map( child )
|
|
+ else:
|
|
+ childs = sxp.children(child)
|
|
+ if len(childs) > 1:
|
|
+ sxphash[ child[0] ] = childs
|
|
+ else:
|
|
+ sxphash[ child[0] ] = childs[0]
|
|
+
|
|
+ return sxphash
|
|
+
|
|
+
|
|
Index: xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendAPI.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py
|
|
@@ -51,6 +51,7 @@ from XendDPCI import XendDPCI
|
|
from XendPSCSI import XendPSCSI, XendPSCSI_HBA
|
|
from XendDSCSI import XendDSCSI, XendDSCSI_HBA
|
|
from XendXSPolicy import XendXSPolicy, XendACMPolicy
|
|
+from xen.xend.XendCPUPool import XendCPUPool
|
|
|
|
from XendAPIConstants import *
|
|
from xen.util.xmlrpclib2 import stringify
|
|
@@ -498,6 +499,7 @@ classes = {
|
|
'PSCSI_HBA' : valid_object("PSCSI_HBA"),
|
|
'DSCSI' : valid_object("DSCSI"),
|
|
'DSCSI_HBA' : valid_object("DSCSI_HBA"),
|
|
+ 'cpu_pool' : valid_object("cpu_pool"),
|
|
}
|
|
|
|
autoplug_classes = {
|
|
@@ -514,6 +516,7 @@ autoplug_classes = {
|
|
'DSCSI_HBA' : XendDSCSI_HBA,
|
|
'XSPolicy' : XendXSPolicy,
|
|
'ACMPolicy' : XendACMPolicy,
|
|
+ 'cpu_pool' : XendCPUPool,
|
|
}
|
|
|
|
class XendAPI(object):
|
|
@@ -914,7 +917,8 @@ class XendAPI(object):
|
|
'API_version_minor',
|
|
'API_version_vendor',
|
|
'API_version_vendor_implementation',
|
|
- 'enabled']
|
|
+ 'enabled',
|
|
+ 'resident_cpu_pools']
|
|
|
|
host_attr_rw = ['name_label',
|
|
'name_description',
|
|
@@ -1014,6 +1018,8 @@ class XendAPI(object):
|
|
return xen_api_todo()
|
|
def host_get_logging(self, _, host_ref):
|
|
return xen_api_todo()
|
|
+ def host_get_resident_cpu_pools(self, _, host_ref):
|
|
+ return xen_api_success(XendCPUPool.get_all())
|
|
|
|
# object methods
|
|
def host_disable(self, session, host_ref):
|
|
@@ -1076,7 +1082,9 @@ class XendAPI(object):
|
|
'PBDs': XendPBD.get_all(),
|
|
'PPCIs': XendPPCI.get_all(),
|
|
'PSCSIs': XendPSCSI.get_all(),
|
|
- 'PSCSI_HBAs': XendPSCSI_HBA.get_all()}
|
|
+ 'PSCSI_HBAs': XendPSCSI_HBA.get_all(),
|
|
+ 'resident_cpu_pools': XendCPUPool.get_all(),
|
|
+ }
|
|
return xen_api_success(record)
|
|
|
|
def host_tmem_thaw(self, _, host_ref, cli_id):
|
|
@@ -1185,7 +1193,10 @@ class XendAPI(object):
|
|
'stepping',
|
|
'flags',
|
|
'utilisation',
|
|
- 'features']
|
|
+ 'features',
|
|
+ 'cpu_pool']
|
|
+
|
|
+ host_cpu_funcs = [('get_unassigned_cpus', 'Set(host_cpu)')]
|
|
|
|
# attributes
|
|
def _host_cpu_get(self, ref, field):
|
|
@@ -1210,21 +1221,28 @@ class XendAPI(object):
|
|
return self._host_cpu_get(ref, 'flags')
|
|
def host_cpu_get_utilisation(self, _, ref):
|
|
return xen_api_success(XendNode.instance().get_host_cpu_load(ref))
|
|
+ def host_cpu_get_cpu_pool(self, _, ref):
|
|
+ return xen_api_success(XendCPUPool.get_cpu_pool_by_cpu_ref(ref))
|
|
|
|
# object methods
|
|
def host_cpu_get_record(self, _, ref):
|
|
node = XendNode.instance()
|
|
record = dict([(f, node.get_host_cpu_field(ref, f))
|
|
for f in self.host_cpu_attr_ro
|
|
- if f not in ['uuid', 'host', 'utilisation']])
|
|
+ if f not in ['uuid', 'host', 'utilisation', 'cpu_pool']])
|
|
record['uuid'] = ref
|
|
record['host'] = node.uuid
|
|
record['utilisation'] = node.get_host_cpu_load(ref)
|
|
+ record['cpu_pool'] = XendCPUPool.get_cpu_pool_by_cpu_ref(ref)
|
|
return xen_api_success(record)
|
|
|
|
# class methods
|
|
def host_cpu_get_all(self, session):
|
|
return xen_api_success(XendNode.instance().get_host_cpu_refs())
|
|
+ def host_cpu_get_unassigned_cpus(self, session):
|
|
+ return xen_api_success(
|
|
+ [ref for ref in XendNode.instance().get_host_cpu_refs()
|
|
+ if len(XendCPUPool.get_cpu_pool_by_cpu_ref(ref)) == 0])
|
|
|
|
|
|
# Xen API: Class host_metrics
|
|
@@ -1284,6 +1302,7 @@ class XendAPI(object):
|
|
'is_control_domain',
|
|
'metrics',
|
|
'crash_dumps',
|
|
+ 'cpu_pool',
|
|
]
|
|
|
|
VM_attr_rw = ['name_label',
|
|
@@ -1312,7 +1331,9 @@ class XendAPI(object):
|
|
'platform',
|
|
'PCI_bus',
|
|
'other_config',
|
|
- 'security_label']
|
|
+ 'security_label',
|
|
+ 'pool_name',
|
|
+ ]
|
|
|
|
VM_methods = [('clone', 'VM'),
|
|
('start', None),
|
|
@@ -1340,7 +1361,9 @@ class XendAPI(object):
|
|
('set_memory_dynamic_min_live', None),
|
|
('send_trigger', None),
|
|
('migrate', None),
|
|
- ('destroy', None)]
|
|
+ ('destroy', None),
|
|
+ ('cpu_pool_migrate', None),
|
|
+ ]
|
|
|
|
VM_funcs = [('create', 'VM'),
|
|
('restore', None),
|
|
@@ -1540,6 +1563,17 @@ class XendAPI(object):
|
|
return xen_api_success(
|
|
xd.get_vm_by_uuid(vm_ref) == xd.privilegedDomain())
|
|
|
|
+ def VM_get_cpu_pool(self, session, vm_ref):
|
|
+ dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
|
|
+ pool_ref = XendCPUPool.query_pool_ref(dom.get_cpu_pool())
|
|
+ return xen_api_success(pool_ref)
|
|
+
|
|
+ def VM_get_pool_name(self, session, vm_ref):
|
|
+ return self.VM_get('pool_name', session, vm_ref)
|
|
+
|
|
+ def VM_set_pool_name(self, session, vm_ref, value):
|
|
+ return self.VM_set('pool_name', session, vm_ref, value)
|
|
+
|
|
def VM_set_name_label(self, session, vm_ref, label):
|
|
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
|
|
dom.setName(label)
|
|
@@ -1618,7 +1652,8 @@ class XendAPI(object):
|
|
if key.startswith("cpumap"):
|
|
vcpu = int(key[6:])
|
|
try:
|
|
- xendom.domain_pincpu(xeninfo.getDomid(), vcpu, value)
|
|
+ cpus = map(int, value.split(","))
|
|
+ xendom.domain_pincpu(xeninfo.getDomid(), vcpu, cpus)
|
|
except Exception, ex:
|
|
log.exception(ex)
|
|
|
|
@@ -1836,7 +1871,9 @@ class XendAPI(object):
|
|
'is_control_domain': xeninfo.info['is_control_domain'],
|
|
'metrics': xeninfo.get_metrics(),
|
|
'security_label': xeninfo.get_security_label(),
|
|
- 'crash_dumps': []
|
|
+ 'crash_dumps': [],
|
|
+ 'pool_name': xeninfo.info.get('pool_name'),
|
|
+ 'cpu_pool' : XendCPUPool.query_pool_ref(xeninfo.get_cpu_pool()),
|
|
}
|
|
return xen_api_success(record)
|
|
|
|
@@ -1934,6 +1971,25 @@ class XendAPI(object):
|
|
xendom.domain_restore(src, bool(paused))
|
|
return xen_api_success_void()
|
|
|
|
+ def VM_cpu_pool_migrate(self, session, vm_ref, cpu_pool_ref):
|
|
+ xendom = XendDomain.instance()
|
|
+ xeninfo = xendom.get_vm_by_uuid(vm_ref)
|
|
+ domid = xeninfo.getDomid()
|
|
+ pool = XendAPIStore.get(cpu_pool_ref, XendCPUPool.getClass())
|
|
+ if pool == None:
|
|
+ return xen_api_error(['HANDLE_INVALID', 'cpu_pool', cpu_pool_ref])
|
|
+ if domid is not None:
|
|
+ if domid == 0:
|
|
+ return xen_api_error(['OPERATION_NOT_ALLOWED',
|
|
+ 'could not move Domain-0'])
|
|
+ try:
|
|
+ XendCPUPool.move_domain(cpu_pool_ref, domid)
|
|
+ except Exception, ex:
|
|
+ return xen_api_error(['INTERNAL_ERROR',
|
|
+ 'could not move domain'])
|
|
+ self.VM_set('pool_name', session, vm_ref, pool.get_name_label())
|
|
+ return xen_api_success_void()
|
|
+
|
|
|
|
# Xen API: Class VBD
|
|
# ----------------------------------------------------------------
|
|
Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
|
|
===================================================================
|
|
--- /dev/null
|
|
+++ xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
|
|
@@ -0,0 +1,903 @@
|
|
+#============================================================================
|
|
+# This library is free software; you can redistribute it and/or
|
|
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
|
|
+# License as published by the Free Software Foundation.
|
|
+#
|
|
+# This library is distributed in the hope that it will be useful,
|
|
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+# Lesser General Public License for more details.
|
|
+#
|
|
+# You should have received a copy of the GNU Lesser General Public
|
|
+# License along with this library; if not, write to the Free Software
|
|
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+#============================================================================
|
|
+# Copyright (c) 2009 Fujitsu Technology Solutions.
|
|
+#============================================================================
|
|
+
|
|
+""" CPU Pool support including XEN-API and Legacy API.
|
|
+"""
|
|
+
|
|
+import types
|
|
+import threading
|
|
+import re
|
|
+import xen.lowlevel.xc
|
|
+import XendNode
|
|
+import XendDomain
|
|
+from xen.xend.XendLogging import log
|
|
+from xen.xend.XendBase import XendBase
|
|
+from xen.xend import XendAPIStore
|
|
+from xen.xend.XendConstants import XS_POOLROOT
|
|
+from xen.xend import uuid as genuuid
|
|
+from xen.xend.XendError import VmError, XendAPIError, PoolError
|
|
+from xen.xend.xenstore.xstransact import xstransact
|
|
+from xen.util.sxputils import sxp2map, map2sxp
|
|
+
|
|
+
|
|
+XEND_ERROR_INTERNAL = 'INTERNAL_ERROR'
|
|
+XEND_ERROR_UNKOWN_SCHED_POLICY = 'UNKOWN_SCHED_POLICY'
|
|
+XEND_ERROR_BAD_POOL_STATE = 'POOL_BAD_STATE'
|
|
+XEND_ERROR_POOL_PARAM = 'PARAMETER_ERROR'
|
|
+XEND_ERROR_INSUFFICIENT_CPUS = 'INSUFFICIENT_CPUS'
|
|
+XEND_ERROR_POOL_RECONF = 'POOL_RECONF'
|
|
+XEND_ERROR_INVALID_CPU = 'INVAILD_CPU'
|
|
+XEND_ERROR_LAST_CPU_NOT_REM = 'LAST_CPU_NOT_REMOVEABLE'
|
|
+
|
|
+
|
|
+XEN_SCHEDULER_TO_ID = {
|
|
+ 'credit' : xen.lowlevel.xc.XEN_SCHEDULER_CREDIT,
|
|
+ 'sedf' : xen.lowlevel.xc.XEN_SCHEDULER_SEDF,
|
|
+ }
|
|
+
|
|
+xc = xen.lowlevel.xc.xc()
|
|
+
|
|
+class XendCPUPool(XendBase):
|
|
+ """ CPU Pool management.
|
|
+ @ivar pool_lock: Lock to secure modification of pool data
|
|
+ @type pool_lock: Rlock
|
|
+ """
|
|
+
|
|
+ pool_lock = threading.RLock()
|
|
+
|
|
+ def getClass(cls):
|
|
+ return "cpu_pool"
|
|
+
|
|
+ def getAttrRO(cls):
|
|
+ attrRO = ['resident_on',
|
|
+ 'started_VMs',
|
|
+ 'host_CPUs',
|
|
+ 'activated',
|
|
+ ]
|
|
+ return XendBase.getAttrRO() + attrRO
|
|
+
|
|
+ def getAttrRW(cls):
|
|
+ attrRW = ['name_label',
|
|
+ 'name_description',
|
|
+ 'auto_power_on',
|
|
+ 'ncpu',
|
|
+ 'sched_policy',
|
|
+ 'proposed_CPUs',
|
|
+ 'other_config',
|
|
+ ]
|
|
+ return XendBase.getAttrRW() + attrRW
|
|
+
|
|
+ def getMethods(cls):
|
|
+ methods = ['destroy',
|
|
+ 'activate',
|
|
+ 'deactivate',
|
|
+ 'add_host_CPU_live',
|
|
+ 'remove_host_CPU_live',
|
|
+ 'add_to_proposed_CPUs',
|
|
+ 'remove_from_proposed_CPUs',
|
|
+ 'add_to_other_config',
|
|
+ 'remove_from_other_config',
|
|
+ ]
|
|
+ return XendBase.getMethods() + methods
|
|
+
|
|
+ def getFuncs(cls):
|
|
+ funcs = ['create',
|
|
+ 'get_by_name_label',
|
|
+ ]
|
|
+ return XendBase.getFuncs() + funcs
|
|
+
|
|
+ getClass = classmethod(getClass)
|
|
+ getAttrRO = classmethod(getAttrRO)
|
|
+ getAttrRW = classmethod(getAttrRW)
|
|
+ getMethods = classmethod(getMethods)
|
|
+ getFuncs = classmethod(getFuncs)
|
|
+
|
|
+
|
|
+ #
|
|
+ # XenAPI function calls
|
|
+ #
|
|
+
|
|
+ def create(cls, record):
|
|
+ """ Create a new managed pool instance.
|
|
+ @param record: attributes of pool
|
|
+ @type record: dict
|
|
+ @return: uuid of created pool
|
|
+ @rtype: str
|
|
+ """
|
|
+ new_uuid = genuuid.createString()
|
|
+ XendCPUPool(record, new_uuid)
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+ return new_uuid
|
|
+
|
|
+ create = classmethod(create)
|
|
+
|
|
+
|
|
+ def get_by_name_label(cls, name_label):
|
|
+ """ Query a Pool(ref) by its name.
|
|
+ @return: ref of pool
|
|
+ @rtype: str
|
|
+ """
|
|
+ cls.pool_lock.acquire()
|
|
+ try:
|
|
+ return [ inst.get_uuid()
|
|
+ for inst in XendAPIStore.get_all(cls.getClass())
|
|
+ if inst.name_label == name_label
|
|
+ ]
|
|
+ finally:
|
|
+ cls.pool_lock.release()
|
|
+
|
|
+ get_by_name_label = classmethod(get_by_name_label)
|
|
+
|
|
+
|
|
+ def get_cpu_pool_by_cpu_ref(cls, host_cpu):
|
|
+ """ Query cpu_pool ref the given cpu belongs to.
|
|
+ @param host_cpu: ref of host_cpu to lookup
|
|
+ @type host_cpu: str
|
|
+ @return: list cpu_pool refs (list contains not more than one element)
|
|
+ @rtype: list of str
|
|
+ """
|
|
+ node = XendNode.instance()
|
|
+ cpu_nr = node.get_host_cpu_field(host_cpu, 'number')
|
|
+ for pool_rec in xc.cpupool_getinfo():
|
|
+ if cpu_nr in pool_rec['cpulist']:
|
|
+ # pool found; return the ref
|
|
+ return cls.query_pool_ref(pool_rec['cpupool'])
|
|
+ return []
|
|
+
|
|
+ get_cpu_pool_by_cpu_ref = classmethod(get_cpu_pool_by_cpu_ref)
|
|
+
|
|
+
|
|
+ def get_all_managed(cls):
|
|
+ """ Query all managed pools.
|
|
+ @return: uuids of all managed pools
|
|
+ @rtype: list of str
|
|
+ """
|
|
+ cls.pool_lock.acquire()
|
|
+ try:
|
|
+ managed_pools = [ inst.get_uuid()
|
|
+ for inst in XendAPIStore.get_all(cls.getClass())
|
|
+ if inst.is_managed() ]
|
|
+ finally:
|
|
+ cls.pool_lock.release()
|
|
+ return managed_pools
|
|
+
|
|
+ get_all_managed = classmethod(get_all_managed)
|
|
+
|
|
+
|
|
+ #
|
|
+ # XenAPI methods calls
|
|
+ #
|
|
+
|
|
+ def __init__(self, record, new_uuid, managed_pool=True):
|
|
+ XendBase.__init__(self, new_uuid, record)
|
|
+ try:
|
|
+ self._managed = managed_pool
|
|
+ self.name_label = None
|
|
+
|
|
+ name = record.get('name_label', 'Pool-Unnamed')
|
|
+ self._checkName(name)
|
|
+ self.name_label = name
|
|
+ self.name_description = record.get('name_description',
|
|
+ self.name_label)
|
|
+ self.proposed_cpus = [ int(cpu)
|
|
+ for cpu in record.get('proposed_CPUs', []) ]
|
|
+ self.auto_power_on = bool(record.get('auto_power_on', False))
|
|
+ self.ncpu = int(record.get('ncpu', 1))
|
|
+ self.sched_policy = record.get('sched_policy', '')
|
|
+ self.other_config = record.get('other_config', {})
|
|
+ except Exception, ex:
|
|
+ XendBase.destroy(self)
|
|
+ raise ex
|
|
+
|
|
+
|
|
+ def get_resident_on(self):
|
|
+ """ Always return uuid of own node.
|
|
+ @return: uuid of this node
|
|
+ @rytpe: str
|
|
+ """
|
|
+ return XendNode.instance().uuid
|
|
+
|
|
+ def get_started_VMs(self):
|
|
+ """ Query all VMs currently assigned to pool.
|
|
+ @return: ref of all VMs assigned to pool; if pool is not active,
|
|
+ an empty list will be returned
|
|
+ @rtype: list of str
|
|
+ """
|
|
+ if self.get_activated():
|
|
+ # search VMs related to this pool
|
|
+ pool_id = self.query_pool_id()
|
|
+ started_VMs = [ vm.get_uuid()
|
|
+ for vm in XendDomain.instance().list('all')
|
|
+ if vm.get_cpu_pool() == pool_id ]
|
|
+ else:
|
|
+ # pool not active, so it couldn't have any started VMs
|
|
+ started_VMs = []
|
|
+
|
|
+ return started_VMs
|
|
+
|
|
+ def get_host_CPUs(self):
|
|
+ """ Query all cpu refs of this pool currently asisgned .
|
|
+ - Read pool id of this pool from xenstore
|
|
+ - Read cpu configuration from hypervisor
|
|
+ - lookup cpu number -> cpu ref
|
|
+ @return: host_cpu refs
|
|
+ @rtype: list of str
|
|
+ """
|
|
+ if self.get_activated():
|
|
+ node = XendNode.instance()
|
|
+ pool_id = self.query_pool_id()
|
|
+ if pool_id == None:
|
|
+ raise PoolError(XEND_ERROR_INTERNAL,
|
|
+ [self.getClass(), 'get_host_CPUs'])
|
|
+ cpus = []
|
|
+ for pool_rec in xc.cpupool_getinfo():
|
|
+ if pool_rec['cpupool'] == pool_id:
|
|
+ cpus = pool_rec['cpulist']
|
|
+
|
|
+ # query host_cpu ref for any cpu of the pool
|
|
+ host_CPUs = [ cpu_ref
|
|
+ for cpu_ref in node.get_host_cpu_refs()
|
|
+ if node.get_host_cpu_field(cpu_ref, 'number')
|
|
+ in cpus ]
|
|
+ else:
|
|
+ # pool not active, so it couldn't have any assigned cpus
|
|
+ host_CPUs = []
|
|
+
|
|
+ return host_CPUs
|
|
+
|
|
+ def get_activated(self):
|
|
+ """ Query if the pool is registered in XendStore.
|
|
+ If pool uuid is not in XenStore, the pool is not activated.
|
|
+ @return: True, if activated
|
|
+ @rtype: bool
|
|
+ """
|
|
+ return self.query_pool_id() != None
|
|
+
|
|
+ def get_name_label(self):
|
|
+ return self.name_label
|
|
+
|
|
+ def get_name_description(self):
|
|
+ return self.name_description
|
|
+
|
|
+ def get_auto_power_on(self):
|
|
+ return self.auto_power_on
|
|
+
|
|
+ def get_ncpu(self):
|
|
+ return self.ncpu
|
|
+
|
|
+ def get_sched_policy(self):
|
|
+ if len(self.sched_policy) == 0:
|
|
+ # default scheduler selected
|
|
+ return XendNode.instance().get_vcpus_policy()
|
|
+ else:
|
|
+ return self.sched_policy
|
|
+
|
|
+ def get_proposed_CPUs(self):
|
|
+ return [ str(cpu) for cpu in self.proposed_cpus ]
|
|
+
|
|
+ def get_other_config(self):
|
|
+ return self.other_config
|
|
+
|
|
+ def set_name_label(self, name_label):
|
|
+ self._checkName(name_label)
|
|
+ self.name_label = name_label
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def set_name_description(self, name_descr):
|
|
+ self.name_description = name_descr
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def set_auto_power_on(self, auto_power_on):
|
|
+ self.auto_power_on = bool(int(auto_power_on))
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def set_ncpu(self, ncpu):
|
|
+ _ncpu = int(ncpu)
|
|
+ if _ncpu < 1:
|
|
+ raise PoolError(XEND_ERROR_POOL_PARAM, 'ncpu')
|
|
+ self.ncpu = _ncpu
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def set_sched_policy(self, sched_policy):
|
|
+ if self.get_activated():
|
|
+ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
|
|
+ self.sched_policy = sched_policy
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def set_proposed_CPUs(self, proposed_cpus):
|
|
+ if self.get_activated():
|
|
+ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
|
|
+ self.proposed_cpus = [ int(cpu) for cpu in proposed_cpus ]
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def set_other_config(self, other_config):
|
|
+ self.other_config = other_config
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def destroy(self):
|
|
+ """ In order to destroy a cpu pool, it must be deactivated """
|
|
+ self.pool_lock.acquire()
|
|
+ try:
|
|
+ if self.get_activated():
|
|
+ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
|
|
+ XendBase.destroy(self)
|
|
+ finally:
|
|
+ self.pool_lock.release()
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def activate(self):
|
|
+ """ Create pool in hypervisor and add cpus.
|
|
+ Preconditions:
|
|
+ - pool not already active
|
|
+ - enough unbound cpus available
|
|
+ Actions:
|
|
+ - create pool in hypervisor
|
|
+ - select free cpus (preferred from proposed_CPUs list) and bind it to
|
|
+ the pool
|
|
+ - create entries in Xenstore
|
|
+ """
|
|
+ self.pool_lock.acquire()
|
|
+ try:
|
|
+ if self.get_activated():
|
|
+ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
|
|
+ if self.sched_policy != XendNode.instance().get_vcpus_policy():
|
|
+ raise PoolError(XEND_ERROR_UNKOWN_SCHED_POLICY)
|
|
+ unbound_cpus = set(self.unbound_cpus())
|
|
+ if len(unbound_cpus) < self.ncpu:
|
|
+ raise PoolError(XEND_ERROR_INSUFFICIENT_CPUS,
|
|
+ [str(self.ncpu), str(len(unbound_cpus))])
|
|
+
|
|
+ # build list of cpu numbers to bind to pool
|
|
+ cpu_set = set(self.proposed_cpus).intersection(unbound_cpus)
|
|
+ if len(cpu_set) < self.ncpu:
|
|
+ pool_cpus = (list(cpu_set) +
|
|
+ list(unbound_cpus.difference(cpu_set)))
|
|
+ else:
|
|
+ pool_cpus = list(cpu_set)
|
|
+ pool_cpus = pool_cpus[0:self.ncpu]
|
|
+
|
|
+ # create pool in hypervisor
|
|
+ pool_id = xc.cpupool_create(
|
|
+ sched = XEN_SCHEDULER_TO_ID.get(self.sched_policy, 0))
|
|
+
|
|
+ self.update_XS(pool_id)
|
|
+ # add cpus
|
|
+ for cpu in pool_cpus:
|
|
+ xc.cpupool_addcpu(pool_id, cpu)
|
|
+
|
|
+ finally:
|
|
+ self.pool_lock.release()
|
|
+
|
|
+ def deactivate(self):
|
|
+ """ Delete pool in hypervisor
|
|
+ Preconditions:
|
|
+ - pool is activated
|
|
+ - no running VMs in pool
|
|
+ Actions:
|
|
+ - call hypervisor for deletion
|
|
+ - remove path of pool in xenstore
|
|
+ """
|
|
+ self.pool_lock.acquire()
|
|
+ try:
|
|
+ if not self.get_activated():
|
|
+ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated')
|
|
+ if len(self.get_started_VMs()) != 0:
|
|
+ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'in use')
|
|
+
|
|
+ pool_id = self.query_pool_id()
|
|
+ # remove cpus from pool
|
|
+ cpus = []
|
|
+ for pool_rec in xc.cpupool_getinfo():
|
|
+ if pool_rec['cpupool'] == pool_id:
|
|
+ cpus = pool_rec['cpulist']
|
|
+ for cpu_number in cpus:
|
|
+ xc.cpupool_removecpu(pool_id, cpu_number)
|
|
+ xc.cpupool_destroy(pool_id)
|
|
+
|
|
+ # update XenStore
|
|
+ xs_path = XS_POOLROOT + "%s/" % pool_id
|
|
+ xstransact.Remove(xs_path)
|
|
+ finally:
|
|
+ self.pool_lock.release()
|
|
+
|
|
+ def add_host_CPU_live(self, cpu_ref):
|
|
+ """ Add cpu to pool, if it is currently not assigned to a pool.
|
|
+ @param cpu_ref: reference of host_cpu instance to add
|
|
+ @type cpu_ref: str
|
|
+ """
|
|
+ if not self.get_activated():
|
|
+ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated')
|
|
+ node = XendNode.instance()
|
|
+ number = node.get_host_cpu_field(cpu_ref, 'number')
|
|
+
|
|
+ self.pool_lock.acquire()
|
|
+ try:
|
|
+ pool_id = self.query_pool_id()
|
|
+ other_pool_ref = self.get_cpu_pool_by_cpu_ref(cpu_ref)
|
|
+ if len(other_pool_ref) != 0:
|
|
+ raise PoolError(XEND_ERROR_INVALID_CPU,
|
|
+ 'cpu already assigned to pool "%s"' % other_pool_ref[0])
|
|
+ xc.cpupool_addcpu(pool_id, number)
|
|
+ finally:
|
|
+ self.pool_lock.release()
|
|
+
|
|
+ if number not in self.proposed_cpus:
|
|
+ self.proposed_cpus.append(number)
|
|
+ self._update_ncpu(pool_id)
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def remove_host_CPU_live(self, cpu_ref):
|
|
+ """ Remove cpu from pool.
|
|
+ After successfull call, the cpu is free.
|
|
+ Remove of the last cpu of the pool is rejected.
|
|
+ @param cpu_ref: reference of host_cpu instance to remove
|
|
+ @type cpu_ref: str
|
|
+ """
|
|
+ if not self.get_activated():
|
|
+ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated')
|
|
+ node = XendNode.instance()
|
|
+ number = node.get_host_cpu_field(cpu_ref, 'number')
|
|
+
|
|
+ self.pool_lock.acquire()
|
|
+ try:
|
|
+ pool_id = self.query_pool_id()
|
|
+ pool_rec = {}
|
|
+ for pool in xc.cpupool_getinfo():
|
|
+ if pool['cpupool'] == pool_id:
|
|
+ pool_rec = pool
|
|
+ break
|
|
+
|
|
+ if number in pool_rec['cpulist']:
|
|
+ if len(pool_rec['cpulist']) < 2 and pool_rec['n_dom'] > 0:
|
|
+ raise PoolError(XEND_ERROR_LAST_CPU_NOT_REM,
|
|
+ 'could not remove last cpu')
|
|
+ xc.cpupool_removecpu(pool_id, number)
|
|
+ else:
|
|
+ raise PoolError(XEND_ERROR_INVALID_CPU,
|
|
+ 'CPU not assigned to pool')
|
|
+ finally:
|
|
+ self.pool_lock.release()
|
|
+
|
|
+ if number in self.proposed_cpus:
|
|
+ self.proposed_cpus.remove(number)
|
|
+ self._update_ncpu(pool_id)
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def add_to_proposed_CPUs(self, cpu):
|
|
+ if self.get_activated():
|
|
+ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
|
|
+
|
|
+ _cpu = int(cpu)
|
|
+ if _cpu not in self.proposed_cpus:
|
|
+ self.proposed_cpus.append(_cpu)
|
|
+ self.proposed_cpus.sort()
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def remove_from_proposed_CPUs(self, cpu):
|
|
+ if self.get_activated():
|
|
+ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated')
|
|
+ _cpu = int(cpu)
|
|
+ if _cpu in self.proposed_cpus:
|
|
+ self.proposed_cpus.remove(_cpu)
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def add_to_other_config(self, key, value):
|
|
+ self.other_config[key] = value
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+ def remove_from_other_config(self, key):
|
|
+ if key in self.other_config:
|
|
+ del self.other_config[key]
|
|
+ if self._managed:
|
|
+ XendNode.instance().save_cpu_pools()
|
|
+
|
|
+
|
|
+ #
|
|
+ # Legacy RPC calls
|
|
+ #
|
|
+ def pool_new(cls, config):
|
|
+ try:
|
|
+ record = sxp2map(config)
|
|
+ if record.has_key('proposed_CPUs') and \
|
|
+ not isinstance(record['proposed_CPUs'], types.ListType):
|
|
+ record['proposed_CPUs'] = [record['proposed_CPUs']]
|
|
+ new_uuid = cls.create(record)
|
|
+ except XendAPIError, ex:
|
|
+ raise VmError(ex.get_api_error())
|
|
+ return new_uuid
|
|
+
|
|
+ def pool_create(cls, config):
|
|
+ try:
|
|
+ record = sxp2map(config)
|
|
+ if record.has_key('proposed_CPUs') and \
|
|
+ not isinstance(record['proposed_CPUs'], types.ListType):
|
|
+ record['proposed_CPUs'] = [record['proposed_CPUs']]
|
|
+ new_uuid = genuuid.createString()
|
|
+ pool = XendCPUPool(record, new_uuid, False)
|
|
+ pool.activate()
|
|
+ except XendAPIError, ex:
|
|
+ raise VmError(ex.get_api_error())
|
|
+
|
|
+ def pool_start(cls, poolname):
|
|
+ pool = cls.lookup_pool(poolname)
|
|
+ if not pool:
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
+ try:
|
|
+ pool.activate()
|
|
+ except XendAPIError, ex:
|
|
+ raise VmError(ex.get_api_error())
|
|
+
|
|
+ def pool_list(cls, names):
|
|
+ sxprs = []
|
|
+ try:
|
|
+ node = XendNode.instance()
|
|
+ xd = XendDomain.instance()
|
|
+ pools = cls.get_all_records()
|
|
+ for (pool_uuid, pool_vals) in pools.items():
|
|
+ if pool_vals['name_label'] in names or len(names) == 0:
|
|
+ # conv host_cpu refs to cpu number
|
|
+ cpus = [ node.get_host_cpu_field(cpu_ref, 'number')
|
|
+ for cpu_ref in pool_vals['host_CPUs'] ]
|
|
+ cpus.sort()
|
|
+ pool_vals['host_CPU_numbers'] = cpus
|
|
+ # query VMs names. Take in account, that a VM
|
|
+ # returned by get_all_records could be destroy, now
|
|
+ vm_names = [ vm.getName()
|
|
+ for vm in map(xd.get_vm_by_uuid,
|
|
+ pool_vals['started_VMs'])
|
|
+ if vm ]
|
|
+ pool_vals['started_VM_names'] = vm_names
|
|
+ pool_vals['auto_power_on'] = int(pool_vals['auto_power_on'])
|
|
+ sxprs += [[pool_uuid] + map2sxp(pool_vals)]
|
|
+ except XendAPIError, ex:
|
|
+ raise VmError(ex.get_api_error())
|
|
+ return sxprs
|
|
+
|
|
+ def pool_destroy(cls, poolname):
|
|
+ pool = cls.lookup_pool(poolname)
|
|
+ if not pool:
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
+ try:
|
|
+ pool.deactivate()
|
|
+ if not pool.is_managed():
|
|
+ pool.destroy()
|
|
+ except XendAPIError, ex:
|
|
+ raise VmError(ex.get_api_error())
|
|
+
|
|
+ def pool_delete(cls, poolname):
|
|
+ pool = cls.lookup_pool(poolname)
|
|
+ if not pool:
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
+ try:
|
|
+ pool.destroy()
|
|
+ except XendAPIError, ex:
|
|
+ raise VmError(ex.get_api_error())
|
|
+
|
|
+ def pool_cpu_add(cls, poolname, cpu):
|
|
+ pool = cls.lookup_pool(poolname)
|
|
+ if not pool:
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
+ try:
|
|
+ cpu_ref = cls._cpu_number_to_ref(int(cpu))
|
|
+ if cpu_ref:
|
|
+ pool.add_host_CPU_live(cpu_ref)
|
|
+ else:
|
|
+ raise PoolError(XEND_ERROR_INVALID_CPU,
|
|
+ 'CPU unknown')
|
|
+ except XendAPIError, ex:
|
|
+ raise VmError(ex.get_api_error())
|
|
+
|
|
+ def pool_cpu_remove(cls, poolname, cpu):
|
|
+ pool = cls.lookup_pool(poolname)
|
|
+ if not pool:
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
+ try:
|
|
+ cpu_ref = cls._cpu_number_to_ref(int(cpu))
|
|
+ if cpu_ref:
|
|
+ pool.remove_host_CPU_live(cpu_ref)
|
|
+ else:
|
|
+ raise PoolError(XEND_ERROR_INVALID_CPU,
|
|
+ 'CPU unknown')
|
|
+ except XendAPIError, ex:
|
|
+ raise VmError(ex.get_api_error())
|
|
+
|
|
+ def pool_migrate(cls, domname, poolname):
|
|
+ dom = XendDomain.instance()
|
|
+ pool = cls.lookup_pool(poolname)
|
|
+ if not pool:
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
+ dominfo = dom.domain_lookup_nr(domname)
|
|
+ if not dominfo:
|
|
+ raise VmError('unknown domain %s' % domname)
|
|
+ domid = dominfo.getDomid()
|
|
+ if domid is not None:
|
|
+ if domid == 0:
|
|
+ raise VmError('could not move Domain-0')
|
|
+ try:
|
|
+ cls.move_domain(pool.get_uuid(), domid)
|
|
+ except Exception, ex:
|
|
+ raise VmError('could not move domain')
|
|
+ dominfo.info['pool_name'] = poolname
|
|
+ dom.managed_config_save(dominfo)
|
|
+
|
|
+ pool_new = classmethod(pool_new)
|
|
+ pool_create = classmethod(pool_create)
|
|
+ pool_start = classmethod(pool_start)
|
|
+ pool_list = classmethod(pool_list)
|
|
+ pool_destroy = classmethod(pool_destroy)
|
|
+ pool_delete = classmethod(pool_delete)
|
|
+ pool_cpu_add = classmethod(pool_cpu_add)
|
|
+ pool_cpu_remove = classmethod(pool_cpu_remove)
|
|
+ pool_migrate = classmethod(pool_migrate)
|
|
+
|
|
+
|
|
+ #
|
|
+ # methods
|
|
+ #
|
|
+
|
|
+ def is_managed(self):
|
|
+ """ Check, if pool is managed.
|
|
+ @return: True, if managed
|
|
+ @rtype: bool
|
|
+ """
|
|
+ return self._managed
|
|
+
|
|
+ def query_pool_id(self):
|
|
+ """ Get corresponding pool-id of pool instance from XenStore.
|
|
+ @return: pool id or None
|
|
+ @rytpe: int
|
|
+ """
|
|
+ self.pool_lock.acquire()
|
|
+ try:
|
|
+ for pool_id in xstransact.List(XS_POOLROOT):
|
|
+ uuid = xstransact.Read(XS_POOLROOT + "%s/" % pool_id, 'uuid')
|
|
+ if uuid == self.get_uuid():
|
|
+ return int(pool_id)
|
|
+ finally:
|
|
+ self.pool_lock.release()
|
|
+
|
|
+ return None
|
|
+
|
|
+ def update_XS(self, pool_id):
|
|
+ """ Write (or update) data in xenstore taken from instance.
|
|
+ @param pool_id: Pool id to build path to pool data in xenstore
|
|
+ @type pool_id: int
|
|
+ """
|
|
+ self.pool_lock.acquire()
|
|
+ try:
|
|
+ xs_path = XS_POOLROOT + "%s/" % pool_id
|
|
+ xs_entries = { 'uuid' : self.get_uuid(),
|
|
+ 'name' : self.name_label,
|
|
+ 'description' : self.name_description
|
|
+ }
|
|
+ xstransact.Mkdir(xs_path)
|
|
+ xstransact.Mkdir(xs_path, 'other_config')
|
|
+ xstransact.Write(xs_path, xs_entries)
|
|
+ xstransact.Write('%s%s' % (xs_path, 'other_config'),
|
|
+ self.other_config)
|
|
+ finally:
|
|
+ self.pool_lock.release()
|
|
+
|
|
+ def _update_ncpu(self, pool_id):
|
|
+ for pool_rec in xc.cpupool_getinfo():
|
|
+ if pool_rec['cpupool'] == pool_id:
|
|
+ self.ncpu = len(pool_rec['cpulist'])
|
|
+
|
|
+ def _checkName(self, name):
|
|
+ """ Check if a pool name is valid. Valid names contain alphabetic
|
|
+ characters, digits, or characters in '_-.:/+'.
|
|
+ The same name cannot be used for more than one pool at the same
|
|
+ time.
|
|
+ @param name: name
|
|
+ @type name: str
|
|
+ @raise: PoolError if invalid
|
|
+ """
|
|
+ if name is None or name == '':
|
|
+ raise PoolError(XEND_ERROR_POOL_PARAM, 'Missing Pool Name')
|
|
+ if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
|
|
+ raise PoolError(XEND_ERROR_POOL_PARAM, 'Invalid Pool Name')
|
|
+
|
|
+ pool = self.lookup_pool(name)
|
|
+ if pool and pool.get_uuid() != self.get_uuid():
|
|
+ raise PoolError(XEND_ERROR_POOL_PARAM,
|
|
+ 'Pool name "%s" already exists' % name)
|
|
+
|
|
+
|
|
+ #
|
|
+ # class methods
|
|
+ #
|
|
+
|
|
+ def recreate_active_pools(cls):
|
|
+ """ Read active pool config from hypervisor and create pool instances.
|
|
+ - Query pool ids and assigned CPUs from hypervisor.
|
|
+ - Query additional information for any pool from xenstore.
|
|
+ If an entry for a pool id is missing in xenstore, it will be
|
|
+ recreated with a new uuid and generic name (this is an error case)
|
|
+ - Create an XendCPUPool instance for any pool id
|
|
+ Function have to be called after recreation of managed pools.
|
|
+ """
|
|
+ log.debug('recreate_active_pools')
|
|
+
|
|
+ for pool_rec in xc.cpupool_getinfo():
|
|
+ pool = pool_rec['cpupool']
|
|
+
|
|
+ # read pool data from xenstore
|
|
+ path = XS_POOLROOT + "%s/" % pool
|
|
+ uuid = xstransact.Read(path, 'uuid')
|
|
+ if not uuid:
|
|
+ # xenstore entry missing / invaild; create entry with new uuid
|
|
+ uuid = genuuid.createString()
|
|
+ name = "Pool-%s" % pool
|
|
+ try:
|
|
+ inst = XendCPUPool( { 'name_label' : name }, uuid, False )
|
|
+ inst.update_XS(pool)
|
|
+ except PoolError, ex:
|
|
+ # log error and skip domain
|
|
+ log.error('cannot recreate pool %s; skipping (reason: %s)' \
|
|
+ % (name, ex))
|
|
+ else:
|
|
+ (name, descr) = xstransact.Read(path, 'name', 'description')
|
|
+ other_config = {}
|
|
+ for key in xstransact.List(path + 'other_config'):
|
|
+ other_config[key] = xstransact.Read(
|
|
+ path + 'other_config/%s' % key)
|
|
+
|
|
+ # check existance of pool instance
|
|
+ inst = XendAPIStore.get(uuid, cls.getClass())
|
|
+ if inst:
|
|
+ # update attributes of existing instance
|
|
+ inst.name_label = name
|
|
+ inst.name_description = descr
|
|
+ inst.other_config = other_config
|
|
+ else:
|
|
+ # recreate instance
|
|
+ try:
|
|
+ inst = XendCPUPool(
|
|
+ { 'name_label' : name,
|
|
+ 'name_description' : descr,
|
|
+ 'other_config' : other_config,
|
|
+ 'proposed_CPUs' : pool_rec['cpulist'],
|
|
+ 'ncpu' : len(pool_rec['cpulist']),
|
|
+ },
|
|
+ uuid, False )
|
|
+ except PoolError, ex:
|
|
+ # log error and skip domain
|
|
+ log.error(
|
|
+ 'cannot recreate pool %s; skipping (reason: %s)' \
|
|
+ % (name, ex))
|
|
+
|
|
+ recreate_active_pools = classmethod(recreate_active_pools)
|
|
+
|
|
+
|
|
+ def recreate(cls, record, current_uuid):
|
|
+ """ Recreate a pool instance while xend restart.
|
|
+ @param record: attributes of pool
|
|
+ @type record: dict
|
|
+ @param current_uuid: uuid of pool to create
|
|
+ @type current_uuid: str
|
|
+ """
|
|
+ XendCPUPool(record, current_uuid)
|
|
+
|
|
+ recreate = classmethod(recreate)
|
|
+
|
|
+
|
|
+ def autostart_pools(cls):
|
|
+ """ Start managed pools that are marked as autostart pools.
|
|
+ Function is called after recreation of managed domains while
|
|
+ xend restart.
|
|
+ """
|
|
+ cls.pool_lock.acquire()
|
|
+ try:
|
|
+ for inst in XendAPIStore.get_all(cls.getClass()):
|
|
+ if inst.is_managed() and inst.auto_power_on and \
|
|
+ inst.query_pool_id() == None:
|
|
+ inst.activate()
|
|
+ finally:
|
|
+ cls.pool_lock.release()
|
|
+
|
|
+ autostart_pools = classmethod(autostart_pools)
|
|
+
|
|
+
|
|
+ def move_domain(cls, pool_ref, domid):
|
|
+ cls.pool_lock.acquire()
|
|
+ try:
|
|
+ pool = XendAPIStore.get(pool_ref, cls.getClass())
|
|
+ pool_id = pool.query_pool_id()
|
|
+
|
|
+ xc.cpupool_movedomain(pool_id, domid)
|
|
+ finally:
|
|
+ cls.pool_lock.release()
|
|
+
|
|
+ move_domain = classmethod(move_domain)
|
|
+
|
|
+
|
|
+ def query_pool_ref(cls, pool_id):
|
|
+ """ Get pool ref by pool id.
|
|
+ Take the ref from xenstore.
|
|
+ @param pool_id:
|
|
+ @type pool_id: int
|
|
+ @return: ref
|
|
+ @rtype: str
|
|
+ """
|
|
+ uuid = xstransact.Read(XS_POOLROOT + "%s/" % pool_id, 'uuid')
|
|
+ if uuid:
|
|
+ return [uuid]
|
|
+ else:
|
|
+ return []
|
|
+
|
|
+ query_pool_ref = classmethod(query_pool_ref)
|
|
+
|
|
+
|
|
+ def lookup_pool(cls, id_or_name):
|
|
+ """ Search XendCPUPool instance with given id_or_name.
|
|
+ @param id_or_name: pool id or pool nameto search
|
|
+ @type id_or_name: [int, str]
|
|
+ @return: instane or None if not found
|
|
+ @rtype: XendCPUPool
|
|
+ """
|
|
+ pool_uuid = None
|
|
+ try:
|
|
+ pool_id = int(id_or_name)
|
|
+ # pool id given ?
|
|
+ pool_uuid = cls.query_pool_ref(pool_id)
|
|
+ if not pool_uuid:
|
|
+ # not found -> search name
|
|
+ pool_uuid = cls.get_by_name_label(id_or_name)
|
|
+ except ValueError:
|
|
+ # pool name given
|
|
+ pool_uuid = cls.get_by_name_label(id_or_name)
|
|
+
|
|
+ if len(pool_uuid) > 0:
|
|
+ return XendAPIStore.get(pool_uuid[0], cls.getClass())
|
|
+ else:
|
|
+ return None
|
|
+
|
|
+ lookup_pool = classmethod(lookup_pool)
|
|
+
|
|
+
|
|
+ def _cpu_number_to_ref(cls, number):
|
|
+ node = XendNode.instance()
|
|
+ for cpu_ref in node.get_host_cpu_refs():
|
|
+ if node.get_host_cpu_field(cpu_ref, 'number') == number:
|
|
+ return cpu_ref
|
|
+ return None
|
|
+
|
|
+ _cpu_number_to_ref = classmethod(_cpu_number_to_ref)
|
|
+
|
|
+
|
|
+ def unbound_cpus(cls):
|
|
+ """ Build list containing the numbers of all cpus not bound to a pool.
|
|
+ Info is taken from Hypervisor.
|
|
+ @return: list of cpu numbers
|
|
+ @rytpe: list of int
|
|
+ """
|
|
+ return xc.cpupool_freeinfo()
|
|
+
|
|
+ unbound_cpus = classmethod(unbound_cpus)
|
|
+
|
|
Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
|
|
@@ -134,6 +134,7 @@ XENAPI_CFG_TO_LEGACY_CFG = {
|
|
'PV_bootloader': 'bootloader',
|
|
'PV_bootloader_args': 'bootloader_args',
|
|
'Description': 'description',
|
|
+ 'pool_name' : 'pool_name',
|
|
}
|
|
|
|
LEGACY_CFG_TO_XENAPI_CFG = reverse_dict(XENAPI_CFG_TO_LEGACY_CFG)
|
|
@@ -240,6 +241,7 @@ XENAPI_CFG_TYPES = {
|
|
'superpages' : int,
|
|
'memory_sharing': int,
|
|
'Description': str,
|
|
+ 'pool_name' : str,
|
|
}
|
|
|
|
# List of legacy configuration keys that have no equivalent in the
|
|
@@ -285,6 +287,7 @@ LEGACY_CFG_TYPES = {
|
|
'bootloader': str,
|
|
'bootloader_args': str,
|
|
'description': str,
|
|
+ 'pool_name': str,
|
|
}
|
|
|
|
# Values that should be stored in xenstore's /vm/<uuid> that is used
|
|
@@ -306,6 +309,7 @@ LEGACY_XENSTORE_VM_PARAMS = [
|
|
'on_xend_stop',
|
|
'bootloader',
|
|
'bootloader_args',
|
|
+ 'pool_name',
|
|
]
|
|
|
|
##
|
|
@@ -414,6 +418,7 @@ class XendConfig(dict):
|
|
'other_config': {},
|
|
'platform': {},
|
|
'target': 0,
|
|
+ 'pool_name' : 'Pool-0',
|
|
'superpages': 0,
|
|
'description': '',
|
|
}
|
|
Index: xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConstants.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py
|
|
@@ -133,6 +133,8 @@ VTPM_DELETE_SCRIPT = auxbin.scripts_dir(
|
|
|
|
XS_VMROOT = "/vm/"
|
|
|
|
+XS_POOLROOT = "/local/pool/"
|
|
+
|
|
NR_PCI_FUNC = 8
|
|
NR_PCI_DEV = 32
|
|
NR_PCI_DEVFN = NR_PCI_FUNC * NR_PCI_DEV
|
|
Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
|
|
@@ -60,6 +60,7 @@ from xen.xend.xenstore.xsutil import Get
|
|
from xen.xend.xenstore.xswatch import xswatch
|
|
from xen.xend.XendConstants import *
|
|
from xen.xend.XendAPIConstants import *
|
|
+from xen.xend.XendCPUPool import XendCPUPool
|
|
from xen.xend.server.DevConstants import xenbusState
|
|
from xen.xend.server.BlktapController import TAPDISK_DEVICE, parseDeviceString
|
|
|
|
@@ -2566,6 +2567,19 @@ class XendDomainInfo:
|
|
oos = self.info['platform'].get('oos', 1)
|
|
oos_off = 1 - int(oos)
|
|
|
|
+ # look-up pool id to use
|
|
+ pool_name = self.info['pool_name']
|
|
+ if len(pool_name) == 0:
|
|
+ pool_name = "Pool-0"
|
|
+
|
|
+ pool = XendCPUPool.lookup_pool(pool_name)
|
|
+
|
|
+ if pool is None:
|
|
+ raise VmError("unknown pool %s" % pool_name)
|
|
+ pool_id = pool.query_pool_id()
|
|
+ if pool_id is None:
|
|
+ raise VmError("pool %s not activated" % pool_name)
|
|
+
|
|
flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3)
|
|
|
|
try:
|
|
@@ -2587,6 +2601,11 @@ class XendDomainInfo:
|
|
failmsg += ', error=%i' % int(self.domid)
|
|
raise VmError(failmsg)
|
|
|
|
+ try:
|
|
+ xc.cpupool_movedomain(pool_id, self.domid)
|
|
+ except Exception, e:
|
|
+ raise VmError('Moving domain to target pool failed')
|
|
+
|
|
self.dompath = GetDomainPath(self.domid)
|
|
|
|
self._recreateDom()
|
|
@@ -3614,6 +3633,11 @@ class XendDomainInfo:
|
|
|
|
retval = xc.sched_credit_domain_get(self.getDomid())
|
|
return retval
|
|
+ def get_cpu_pool(self):
|
|
+ if self.getDomid() is None:
|
|
+ return None
|
|
+ xeninfo = dom_get(self.domid)
|
|
+ return xeninfo['cpupool']
|
|
def get_power_state(self):
|
|
return XEN_API_VM_POWER_STATE[self._stateGet()]
|
|
def get_platform(self):
|
|
Index: xen-4.0.0-testing/tools/python/xen/xend/XendError.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendError.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xend/XendError.py
|
|
@@ -18,6 +18,7 @@
|
|
|
|
from xmlrpclib import Fault
|
|
|
|
+import types
|
|
import XendClient
|
|
|
|
class XendInvalidDomain(Fault):
|
|
@@ -186,6 +187,26 @@ class DirectPCIError(XendAPIError):
|
|
def __str__(self):
|
|
return 'DIRECT_PCI_ERROR: %s' % self.error
|
|
|
|
+class PoolError(XendAPIError):
|
|
+ def __init__(self, error, spec=None):
|
|
+ XendAPIError.__init__(self)
|
|
+ self.spec = []
|
|
+ if spec:
|
|
+ if isinstance(spec, types.ListType):
|
|
+ self.spec = spec
|
|
+ else:
|
|
+ self.spec = [spec]
|
|
+ self.error = error
|
|
+
|
|
+ def get_api_error(self):
|
|
+ return [self.error] + self.spec
|
|
+
|
|
+ def __str__(self):
|
|
+ if self.spec:
|
|
+ return '%s: %s' % (self.error, self.spec)
|
|
+ else:
|
|
+ return '%s' % self.error
|
|
+
|
|
class VDIError(XendAPIError):
|
|
def __init__(self, error, vdi):
|
|
XendAPIError.__init__(self)
|
|
Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendNode.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
|
|
@@ -43,6 +43,7 @@ from XendStateStore import XendStateStor
|
|
from XendMonitor import XendMonitor
|
|
from XendPPCI import XendPPCI
|
|
from XendPSCSI import XendPSCSI, XendPSCSI_HBA
|
|
+from xen.xend.XendCPUPool import XendCPUPool
|
|
|
|
class XendNode:
|
|
"""XendNode - Represents a Domain 0 Host."""
|
|
@@ -159,6 +160,8 @@ class XendNode:
|
|
|
|
self._init_PSCSIs()
|
|
|
|
+ self._init_cpu_pools()
|
|
+
|
|
|
|
def _init_networks(self):
|
|
# Initialise networks
|
|
@@ -366,6 +369,18 @@ class XendNode:
|
|
for physical_host, pscsi_HBA_uuid in pscsi_HBA_table.items():
|
|
XendPSCSI_HBA(pscsi_HBA_uuid, {'physical_host': physical_host})
|
|
|
|
+ def _init_cpu_pools(self):
|
|
+ # Initialise cpu_pools
|
|
+ saved_cpu_pools = self.state_store.load_state(XendCPUPool.getClass())
|
|
+ if saved_cpu_pools:
|
|
+ for cpu_pool_uuid, cpu_pool in saved_cpu_pools.items():
|
|
+ try:
|
|
+ XendCPUPool.recreate(cpu_pool, cpu_pool_uuid)
|
|
+ except CreateUnspecifiedAttributeError:
|
|
+ log.warn("Error recreating %s %s",
|
|
+ (XendCPUPool.getClass(), cpu_pool_uuid))
|
|
+ XendCPUPool.recreate_active_pools()
|
|
+
|
|
|
|
def add_network(self, interface):
|
|
# TODO
|
|
@@ -586,6 +601,7 @@ class XendNode:
|
|
self.save_PPCIs()
|
|
self.save_PSCSIs()
|
|
self.save_PSCSI_HBAs()
|
|
+ self.save_cpu_pools()
|
|
|
|
def save_PIFs(self):
|
|
pif_records = dict([(pif_uuid, XendAPIStore.get(
|
|
@@ -628,6 +644,12 @@ class XendNode:
|
|
for pscsi_HBA_uuid in XendPSCSI_HBA.get_all()])
|
|
self.state_store.save_state('pscsi_HBA', pscsi_HBA_records)
|
|
|
|
+ def save_cpu_pools(self):
|
|
+ cpu_pool_records = dict([(cpu_pool_uuid, XendAPIStore.get(
|
|
+ cpu_pool_uuid, XendCPUPool.getClass()).get_record())
|
|
+ for cpu_pool_uuid in XendCPUPool.get_all_managed()])
|
|
+ self.state_store.save_state(XendCPUPool.getClass(), cpu_pool_records)
|
|
+
|
|
def shutdown(self):
|
|
return 0
|
|
|
|
@@ -939,6 +961,7 @@ class XendNode:
|
|
self.format_node_to_memory(info, 'node_to_memory')
|
|
info['node_to_dma32_mem'] = \
|
|
self.format_node_to_memory(info, 'node_to_dma32_mem')
|
|
+ info['free_cpus'] = len(XendCPUPool.unbound_cpus())
|
|
|
|
# FIXME: These are hard-coded to be the inverse of the getXenMemory
|
|
# functions in image.py. Find a cleaner way.
|
|
@@ -958,6 +981,7 @@ class XendNode:
|
|
'virt_caps',
|
|
'total_memory',
|
|
'free_memory',
|
|
+ 'free_cpus',
|
|
'max_free_memory',
|
|
'max_para_memory',
|
|
'max_hvm_memory',
|
|
Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xend/server/SrvServer.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
|
|
@@ -52,6 +52,7 @@ from xen.xend import XendNode, XendOptio
|
|
from xen.xend.XendLogging import log
|
|
from xen.xend.XendClient import XEN_API_SOCKET
|
|
from xen.xend.XendDomain import instance as xenddomain
|
|
+from xen.xend.XendCPUPool import XendCPUPool
|
|
from xen.web.SrvDir import SrvDir
|
|
|
|
from SrvRoot import SrvRoot
|
|
@@ -147,6 +148,12 @@ class XendServers:
|
|
status.close()
|
|
status = None
|
|
|
|
+ # auto start pools before domains are started
|
|
+ try:
|
|
+ XendCPUPool.autostart_pools()
|
|
+ except Exception, e:
|
|
+ log.exception("Failed while autostarting pools")
|
|
+
|
|
# Reaching this point means we can auto start domains
|
|
try:
|
|
xenddomain().autostart_domains()
|
|
Index: xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xend/server/XMLRPCServer.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py
|
|
@@ -33,6 +33,7 @@ from xen.xend.XendClient import XML_RPC_
|
|
from xen.xend.XendConstants import DOM_STATE_RUNNING
|
|
from xen.xend.XendLogging import log
|
|
from xen.xend.XendError import XendInvalidDomain
|
|
+from xen.xend.XendCPUPool import XendCPUPool
|
|
|
|
# vcpu_avail is a long and is not needed by the clients. It's far easier
|
|
# to just remove it then to try and marshal the long.
|
|
@@ -98,6 +99,10 @@ methods = ['device_create', 'device_conf
|
|
|
|
exclude = ['domain_create', 'domain_restore']
|
|
|
|
+POOL_FUNCS = ['pool_create', 'pool_new', 'pool_start', 'pool_list',
|
|
+ 'pool_destroy', 'pool_delete', 'pool_cpu_add', 'pool_cpu_remove',
|
|
+ 'pool_migrate']
|
|
+
|
|
class XMLRPCServer:
|
|
def __init__(self, auth, use_xenapi, use_tcp = False,
|
|
ssl_key_file = None, ssl_cert_file = None,
|
|
@@ -197,6 +202,11 @@ class XMLRPCServer:
|
|
if name not in exclude:
|
|
self.server.register_function(fn, "xend.domain.%s" % name[7:])
|
|
|
|
+ # Functions in XendPool
|
|
+ for name in POOL_FUNCS:
|
|
+ fn = getattr(XendCPUPool, name)
|
|
+ self.server.register_function(fn, "xend.cpu_pool.%s" % name[5:])
|
|
+
|
|
# Functions in XendNode and XendDmesg
|
|
for type, lst, n in [(XendNode,
|
|
['info', 'pciinfo', 'send_debug_keys',
|
|
Index: xen-4.0.0-testing/tools/python/xen/xm/create.dtd
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xm/create.dtd
|
|
+++ xen-4.0.0-testing/tools/python/xen/xm/create.dtd
|
|
@@ -50,6 +50,7 @@
|
|
s3_integrity CDATA #REQUIRED
|
|
vcpus_max CDATA #REQUIRED
|
|
vcpus_at_startup CDATA #REQUIRED
|
|
+ pool_name CDATA #REQUIRED
|
|
actions_after_shutdown %NORMAL_EXIT; #REQUIRED
|
|
actions_after_reboot %NORMAL_EXIT; #REQUIRED
|
|
actions_after_crash %CRASH_BEHAVIOUR; #REQUIRED
|
|
Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xm/create.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xm/create.py
|
|
@@ -659,6 +659,10 @@ gopts.var('suppress_spurious_page_faults
|
|
fn=set_bool, default=None,
|
|
use="""Do not inject spurious page faults into this guest""")
|
|
|
|
+gopts.var('pool', val='POOL NAME',
|
|
+ fn=set_value, default=None,
|
|
+ use="""CPU pool to use for the VM""")
|
|
+
|
|
gopts.var('pci_msitranslate', val='TRANSLATE',
|
|
fn=set_int, default=1,
|
|
use="""Global PCI MSI-INTx translation flag (0=disable;
|
|
@@ -1149,6 +1153,8 @@ def make_config(vals):
|
|
config.append(['localtime', vals.localtime])
|
|
if vals.oos:
|
|
config.append(['oos', vals.oos])
|
|
+ if vals.pool:
|
|
+ config.append(['pool_name', vals.pool])
|
|
|
|
config_image = configure_image(vals)
|
|
if vals.bootloader:
|
|
Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
|
|
@@ -56,6 +56,7 @@ from xen.util.xmlrpcclient import Server
|
|
import xen.util.xsm.xsm as security
|
|
from xen.util.xsm.xsm import XSMError
|
|
from xen.util.acmpolicy import ACM_LABEL_UNLABELED_DISPLAY
|
|
+from xen.util.sxputils import sxp2map, map2sxp as map_to_sxp
|
|
from xen.util import auxbin
|
|
|
|
import XenAPI
|
|
@@ -235,6 +236,23 @@ SUBCOMMAND_HELP = {
|
|
'tmem-freeable' : ('', 'Print freeable tmem (in MiB).'),
|
|
'tmem-shared-auth' : ('[<Domain>|-a|--all] [--uuid=<uuid>] [--auth=<0|1>]', 'De/authenticate shared tmem pool.'),
|
|
|
|
+ #
|
|
+ # pool commands
|
|
+ #
|
|
+ 'pool-create' : ('<ConfigFile> [vars]',
|
|
+ 'Create a CPU pool based an ConfigFile.'),
|
|
+ 'pool-new' : ('<ConfigFile> [vars]',
|
|
+ 'Adds a CPU pool to Xend CPU pool management'),
|
|
+ 'pool-start' : ('<CPU Pool>', 'Starts a Xend CPU pool'),
|
|
+ 'pool-list' : ('[<CPU Pool>] [-l|--long] [-c|--cpus]', 'List CPU pools on host'),
|
|
+ 'pool-destroy' : ('<CPU Pool>', 'Deactivates a CPU pool'),
|
|
+ 'pool-delete' : ('<CPU Pool>',
|
|
+ 'Removes a CPU pool from Xend management'),
|
|
+ 'pool-cpu-add' : ('<CPU Pool> <CPU nr>', 'Adds a CPU to a CPU pool'),
|
|
+ 'pool-cpu-remove': ('<CPU Pool> <CPU nr>', 'Removes a CPU from a CPU pool'),
|
|
+ 'pool-migrate' : ('<Domain> <CPU Pool>',
|
|
+ 'Moves a domain into a CPU pool'),
|
|
+
|
|
# security
|
|
|
|
'addlabel' : ('<label> {dom <ConfigFile>|res <resource>|mgt <managed domain>} [<policy>]',
|
|
@@ -281,6 +299,7 @@ SUBCOMMAND_OPTIONS = {
|
|
('-l', '--long', 'Output all VM details in SXP'),
|
|
('', '--label', 'Include security labels'),
|
|
('', '--state=<state>', 'Select only VMs with the specified state'),
|
|
+ ('', '--pool=<pool>', 'Select only VMs in specified cpu pool'),
|
|
),
|
|
'console': (
|
|
('-q', '--quiet', 'Do not print an error message if the domain does not exist'),
|
|
@@ -342,6 +361,10 @@ SUBCOMMAND_OPTIONS = {
|
|
('-u', '--uuid', 'Specify uuid (abcdef01-2345-6789-01234567890abcdef).'),
|
|
('-A', '--auth', '0=auth,1=deauth'),
|
|
),
|
|
+ 'pool-list': (
|
|
+ ('-l', '--long', 'Output all CPU pool details in SXP format'),
|
|
+ ('-c', '--cpus', 'Output list of CPUs used by a pool'),
|
|
+ ),
|
|
}
|
|
|
|
common_commands = [
|
|
@@ -486,9 +509,21 @@ tmem_commands = [
|
|
"tmem-shared-auth",
|
|
]
|
|
|
|
+pool_commands = [
|
|
+ "pool-create",
|
|
+ "pool-new",
|
|
+ "pool-start",
|
|
+ "pool-list",
|
|
+ "pool-destroy",
|
|
+ "pool-delete",
|
|
+ "pool-cpu-add",
|
|
+ "pool-cpu-remove",
|
|
+ "pool-migrate",
|
|
+ ]
|
|
+
|
|
all_commands = (domain_commands + host_commands + scheduler_commands +
|
|
device_commands + vnet_commands + security_commands +
|
|
- acm_commands + flask_commands + tmem_commands +
|
|
+ acm_commands + flask_commands + tmem_commands + pool_commands +
|
|
['shell', 'event-monitor'])
|
|
|
|
|
|
@@ -885,7 +920,7 @@ def datetime_to_secs(v):
|
|
v = str(v).replace(c, "")
|
|
return time.mktime(time.strptime(v[0:14], '%Y%m%dT%H%M%S'))
|
|
|
|
-def getDomains(domain_names, state, full = 0):
|
|
+def getDomains(domain_names, state, full = 0, pool = None):
|
|
if serverType == SERVER_XEN_API:
|
|
doms_sxp = []
|
|
doms_dict = []
|
|
@@ -894,6 +929,9 @@ def getDomains(domain_names, state, full
|
|
dom_metrics_recs = server.xenapi.VM_metrics.get_all_records()
|
|
|
|
for dom_ref, dom_rec in dom_recs.items():
|
|
+ if pool and pool != dom_rec['pool_name']:
|
|
+ continue
|
|
+
|
|
dom_metrics_rec = dom_metrics_recs[dom_rec['metrics']]
|
|
|
|
states = ('running', 'blocked', 'paused', 'shutdown',
|
|
@@ -934,7 +972,15 @@ def getDomains(domain_names, state, full
|
|
if domain_names:
|
|
return [server.xend.domain(dom, full) for dom in domain_names]
|
|
else:
|
|
- return server.xend.domains_with_state(True, state, full)
|
|
+ doms = server.xend.domains_with_state(True, state, full)
|
|
+ if not pool:
|
|
+ return doms
|
|
+ else:
|
|
+ doms_in_pool = []
|
|
+ for dom in doms:
|
|
+ if sxp.child_value(dom, 'pool_name', '') == pool:
|
|
+ doms_in_pool.append(dom)
|
|
+ return doms_in_pool
|
|
|
|
|
|
def xm_list(args):
|
|
@@ -942,10 +988,11 @@ def xm_list(args):
|
|
show_vcpus = 0
|
|
show_labels = 0
|
|
state = 'all'
|
|
+ pool = None
|
|
try:
|
|
(options, params) = getopt.gnu_getopt(args, 'lv',
|
|
['long','vcpus','label',
|
|
- 'state='])
|
|
+ 'state=','pool='])
|
|
except getopt.GetoptError, opterr:
|
|
err(opterr)
|
|
usage('list')
|
|
@@ -959,18 +1006,24 @@ def xm_list(args):
|
|
show_labels = 1
|
|
if k in ['--state']:
|
|
state = v
|
|
+ if k in ['--pool']:
|
|
+ pool = v
|
|
|
|
if state != 'all' and len(params) > 0:
|
|
raise OptionError(
|
|
"You may specify either a state or a particular VM, but not both")
|
|
|
|
+ if pool and len(params) > 0:
|
|
+ raise OptionError(
|
|
+ "You may specify either a pool or a particular VM, but not both")
|
|
+
|
|
if show_vcpus:
|
|
print >>sys.stderr, (
|
|
"xm list -v is deprecated. Please use xm vcpu-list.")
|
|
xm_vcpu_list(params)
|
|
return
|
|
|
|
- doms = getDomains(params, state, use_long)
|
|
+ doms = getDomains(params, state, use_long, pool)
|
|
|
|
if use_long:
|
|
map(PrettyPrint.prettyprint, doms)
|
|
@@ -1806,6 +1859,13 @@ def xm_info(args):
|
|
else:
|
|
return ""
|
|
|
|
+ def getFreeCpuCount():
|
|
+ cnt = 0
|
|
+ for host_cpu_record in host_cpu_records:
|
|
+ if len(host_cpu_record.get("cpu_pool", [])) == 0:
|
|
+ cnt += 1
|
|
+ return cnt
|
|
+
|
|
info = {
|
|
"host": getVal(["name_label"]),
|
|
"release": getVal(["software_version", "release"]),
|
|
@@ -1817,6 +1877,7 @@ def xm_info(args):
|
|
"threads_per_core": getVal(["cpu_configuration", "threads_per_core"]),
|
|
"cpu_mhz": getCpuMhz(),
|
|
"hw_caps": getCpuFeatures(),
|
|
+ "free_cpus": getFreeCpuCount(),
|
|
"total_memory": int(host_metrics_record["memory_total"])/1024/1024,
|
|
"free_memory": int(host_metrics_record["memory_free"])/1024/1024,
|
|
"xen_major": getVal(["software_version", "xen_major"]),
|
|
@@ -3451,6 +3512,169 @@ def xm_tmem_shared_auth(args):
|
|
else:
|
|
return server.xend.node.tmem_shared_auth(domid,uuid_str,auth)
|
|
|
|
+def get_pool_ref(name):
|
|
+ refs = server.xenapi.cpu_pool.get_by_name_label(name)
|
|
+ if len(refs) > 0:
|
|
+ return refs[0]
|
|
+ else:
|
|
+ err('unknown pool name')
|
|
+ sys.exit(1)
|
|
+
|
|
+def xm_pool_start(args):
|
|
+ arg_check(args, "pool-start", 1)
|
|
+ if serverType == SERVER_XEN_API:
|
|
+ ref = get_pool_ref(args[0])
|
|
+ server.xenapi.cpu_pool.activate(ref)
|
|
+ else:
|
|
+ server.xend.cpu_pool.start(args[0])
|
|
+
|
|
+def brief_pool_list(sxprs):
|
|
+ format_str = "%-16s %3s %8s %s %s"
|
|
+ for sxpr in sxprs:
|
|
+ if sxpr == sxprs[0]:
|
|
+ print "Name CPUs Sched Active Domain count"
|
|
+ record = sxp2map(sxpr)
|
|
+ name = record['name_label']
|
|
+ sched_policy = record['sched_policy']
|
|
+ if record['activated']:
|
|
+ cpus = record.get('host_CPU_numbers', [])
|
|
+ vms = record.get('started_VM_names', [])
|
|
+ if not isinstance(cpus, types.ListType):
|
|
+ cpus = [cpus]
|
|
+ if not isinstance(vms, types.ListType):
|
|
+ vms = [vms]
|
|
+ cpu_count = len(cpus)
|
|
+ vm_count = len(vms)
|
|
+ active = 'y'
|
|
+ else:
|
|
+ cpu_count = record['ncpu']
|
|
+ vm_count = 0
|
|
+ active = 'n'
|
|
+ print format_str % (name, cpu_count, sched_policy, active, vm_count)
|
|
+
|
|
+def brief_pool_list_cpus(sxprs):
|
|
+ format_str = "%-16s %s"
|
|
+ for sxpr in sxprs:
|
|
+ if sxpr == sxprs[0]:
|
|
+ print format_str % ("Name", "CPU list")
|
|
+ record = sxp2map(sxpr)
|
|
+ name = record['name_label']
|
|
+ cpus = ""
|
|
+ if record['activated']:
|
|
+ cpus = record.get('host_CPU_numbers', [])
|
|
+ if isinstance(cpus, types.ListType):
|
|
+ cpus.sort()
|
|
+ cpus = reduce(lambda x,y: x + "%s," % y, cpus, "")
|
|
+ cpus = cpus[0:len(cpus)-1]
|
|
+ else:
|
|
+ cpus = str(cpus)
|
|
+ if len(cpus) == 0:
|
|
+ cpus = "-"
|
|
+ print format_str % (name, cpus)
|
|
+
|
|
+def xm_pool_list(args):
|
|
+ arg_check(args, "pool-list", 0, 2)
|
|
+ try:
|
|
+ (options, params) = getopt.gnu_getopt(args, 'lc', ['long','cpus'])
|
|
+ except getopt.GetoptError, opterr:
|
|
+ err(opterr)
|
|
+ usage('pool-list')
|
|
+ if len(params) > 1:
|
|
+ err("Only one pool name for selection allowed")
|
|
+ usage('pool-list')
|
|
+
|
|
+ use_long = False
|
|
+ show_cpus = False
|
|
+ for (k, _) in options:
|
|
+ if k in ['-l', '--long']:
|
|
+ use_long = True
|
|
+ if k in ['-c', '--cpus']:
|
|
+ show_cpus = True
|
|
+
|
|
+ if serverType == SERVER_XEN_API:
|
|
+ pools = server.xenapi.cpu_pool.get_all_records()
|
|
+ cpu_recs = server.xenapi.host_cpu.get_all_records()
|
|
+ sxprs = []
|
|
+ for pool in pools.values():
|
|
+ if pool['name_label'] in params or len(params) == 0:
|
|
+ started_VM_names = [['started_VM_names'] + [
|
|
+ server.xenapi.VM.get_name_label(started_VM)
|
|
+ for started_VM in pool['started_VMs'] ] ]
|
|
+ host_CPU_numbers = [['host_CPU_numbers'] + [
|
|
+ cpu_recs[cpu_ref]['number']
|
|
+ for cpu_ref in pool['host_CPUs'] ] ]
|
|
+ sxpr = [ pool['uuid'] ] + map_to_sxp(pool) + \
|
|
+ host_CPU_numbers + started_VM_names
|
|
+ sxprs.append(sxpr)
|
|
+ else:
|
|
+ sxprs = server.xend.cpu_pool.list(params)
|
|
+
|
|
+ if len(params) > 0 and len(sxprs) == 0:
|
|
+ # pool not found
|
|
+ err("Pool '%s' does not exist." % params[0])
|
|
+
|
|
+ if use_long:
|
|
+ for sxpr in sxprs:
|
|
+ PrettyPrint.prettyprint(sxpr)
|
|
+ elif show_cpus:
|
|
+ brief_pool_list_cpus(sxprs)
|
|
+ else:
|
|
+ brief_pool_list(sxprs)
|
|
+
|
|
+def xm_pool_destroy(args):
|
|
+ arg_check(args, "pool-destroy", 1)
|
|
+ if serverType == SERVER_XEN_API:
|
|
+ ref = get_pool_ref(args[0])
|
|
+ server.xenapi.cpu_pool.deactivate(ref)
|
|
+ else:
|
|
+ server.xend.cpu_pool.destroy(args[0])
|
|
+
|
|
+def xm_pool_delete(args):
|
|
+ arg_check(args, "pool-delete", 1)
|
|
+ if serverType == SERVER_XEN_API:
|
|
+ ref = get_pool_ref(args[0])
|
|
+ server.xenapi.cpu_pool.destroy(ref)
|
|
+ else:
|
|
+ server.xend.cpu_pool.delete(args[0])
|
|
+
|
|
+def xm_pool_cpu_add(args):
|
|
+ arg_check(args, "pool-cpu-add", 2)
|
|
+ if serverType == SERVER_XEN_API:
|
|
+ ref = get_pool_ref(args[0])
|
|
+ cpu_ref_list = server.xenapi.host_cpu.get_all_records()
|
|
+ cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
|
|
+ if c_rec['number'] == args[1] ]
|
|
+ if len(cpu_ref) == 0:
|
|
+ err('cpu number unknown')
|
|
+ else:
|
|
+ server.xenapi.cpu_pool.add_host_CPU_live(ref, cpu_ref[0])
|
|
+ else:
|
|
+ server.xend.cpu_pool.cpu_add(args[0], args[1])
|
|
+
|
|
+def xm_pool_cpu_remove(args):
|
|
+ arg_check(args, "pool-cpu-remove", 2)
|
|
+ if serverType == SERVER_XEN_API:
|
|
+ ref = get_pool_ref(args[0])
|
|
+ cpu_ref_list = server.xenapi.host_cpu.get_all_records()
|
|
+ cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
|
|
+ if c_rec['number'] == args[1] ]
|
|
+ if len(cpu_ref) == 0:
|
|
+ err('cpu number unknown')
|
|
+ else:
|
|
+ server.xenapi.cpu_pool.remove_host_CPU_live(ref, cpu_ref[0])
|
|
+ else:
|
|
+ server.xend.cpu_pool.cpu_remove(args[0], args[1])
|
|
+
|
|
+def xm_pool_migrate(args):
|
|
+ arg_check(args, "pool-migrate", 2)
|
|
+ domname = args[0]
|
|
+ poolname = args[1]
|
|
+ if serverType == SERVER_XEN_API:
|
|
+ pool_ref = get_pool_ref(poolname)
|
|
+ server.xenapi.VM.cpu_pool_migrate(get_single_vm(domname), pool_ref)
|
|
+ else:
|
|
+ server.xend.cpu_pool.migrate(domname, poolname)
|
|
+
|
|
|
|
commands = {
|
|
"shell": xm_shell,
|
|
@@ -3536,6 +3760,14 @@ commands = {
|
|
"usb-list-assignable-devices": xm_usb_list_assignable_devices,
|
|
"usb-hc-create": xm_usb_hc_create,
|
|
"usb-hc-destroy": xm_usb_hc_destroy,
|
|
+ # pool
|
|
+ "pool-start": xm_pool_start,
|
|
+ "pool-list": xm_pool_list,
|
|
+ "pool-destroy": xm_pool_destroy,
|
|
+ "pool-delete": xm_pool_delete,
|
|
+ "pool-cpu-add": xm_pool_cpu_add,
|
|
+ "pool-cpu-remove": xm_pool_cpu_remove,
|
|
+ "pool-migrate": xm_pool_migrate,
|
|
# tmem
|
|
"tmem-thaw": xm_tmem_thaw,
|
|
"tmem-freeze": xm_tmem_freeze,
|
|
@@ -3567,6 +3799,8 @@ IMPORTED_COMMANDS = [
|
|
'resetpolicy',
|
|
'getenforce',
|
|
'setenforce',
|
|
+ 'pool-create',
|
|
+ 'pool-new',
|
|
]
|
|
|
|
for c in IMPORTED_COMMANDS:
|
|
Index: xen-4.0.0-testing/tools/python/xen/xm/pool-create.py
|
|
===================================================================
|
|
--- /dev/null
|
|
+++ xen-4.0.0-testing/tools/python/xen/xm/pool-create.py
|
|
@@ -0,0 +1,51 @@
|
|
+#============================================================================
|
|
+# This library is free software; you can redistribute it and/or
|
|
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
|
|
+# License as published by the Free Software Foundation.
|
|
+#
|
|
+# This library is distributed in the hope that it will be useful,
|
|
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+# Lesser General Public License for more details.
|
|
+#
|
|
+# You should have received a copy of the GNU Lesser General Public
|
|
+# License along with this library; if not, write to the Free Software
|
|
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+#============================================================================
|
|
+# Copyright (C) 2009 Fujitsu Technology Solutions
|
|
+#============================================================================
|
|
+
|
|
+""" Create a new unmanaged pool.
|
|
+"""
|
|
+
|
|
+import sys
|
|
+from xen.xm.main import serverType, SERVER_XEN_API, server
|
|
+from xen.xm.pool import parseCommandLine, err, help as help_options
|
|
+from xen.util.sxputils import sxp2map
|
|
+
|
|
+def help():
|
|
+ return help_options()
|
|
+
|
|
+
|
|
+def main(argv):
|
|
+ try:
|
|
+ (opts, config) = parseCommandLine(argv)
|
|
+ except StandardError, ex:
|
|
+ err(str(ex))
|
|
+
|
|
+ if not opts:
|
|
+ return
|
|
+
|
|
+ if serverType == SERVER_XEN_API:
|
|
+ record = sxp2map(config)
|
|
+ if type(record.get('proposed_CPUs', [])) != list:
|
|
+ record['proposed_CPUs'] = [record['proposed_CPUs']]
|
|
+ ref = server.xenapi.cpu_pool.create(record)
|
|
+ if ref:
|
|
+ server.xenapi.cpu_pool.activate(ref)
|
|
+ else:
|
|
+ server.xend.cpu_pool.create(config)
|
|
+
|
|
+if __name__ == '__main__':
|
|
+ main(sys.argv)
|
|
+
|
|
Index: xen-4.0.0-testing/tools/python/xen/xm/pool-new.py
|
|
===================================================================
|
|
--- /dev/null
|
|
+++ xen-4.0.0-testing/tools/python/xen/xm/pool-new.py
|
|
@@ -0,0 +1,50 @@
|
|
+#============================================================================
|
|
+# This library is free software; you can redistribute it and/or
|
|
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
|
|
+# License as published by the Free Software Foundation.
|
|
+#
|
|
+# This library is distributed in the hope that it will be useful,
|
|
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+# Lesser General Public License for more details.
|
|
+#
|
|
+# You should have received a copy of the GNU Lesser General Public
|
|
+# License along with this library; if not, write to the Free Software
|
|
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+#============================================================================
|
|
+# Copyright (C) 2009 Fujitsu Technology Solutions
|
|
+#============================================================================
|
|
+
|
|
+""" Create a new managed pool.
|
|
+"""
|
|
+
|
|
+import sys
|
|
+from xen.xm.main import serverType, SERVER_XEN_API, server
|
|
+from xen.xm.pool import parseCommandLine, err, help as help_options
|
|
+from xen.util.sxputils import sxp2map
|
|
+
|
|
+
|
|
+def help():
|
|
+ return help_options()
|
|
+
|
|
+
|
|
+def main(argv):
|
|
+ try:
|
|
+ (opts, config) = parseCommandLine(argv)
|
|
+ except StandardError, ex:
|
|
+ err(str(ex))
|
|
+
|
|
+ if not opts:
|
|
+ return
|
|
+
|
|
+ if serverType == SERVER_XEN_API:
|
|
+ record = sxp2map(config)
|
|
+ if type(record.get('proposed_CPUs', [])) != list:
|
|
+ record['proposed_CPUs'] = [record['proposed_CPUs']]
|
|
+ server.xenapi.cpu_pool.create(record)
|
|
+ else:
|
|
+ server.xend.cpu_pool.new(config)
|
|
+
|
|
+if __name__ == '__main__':
|
|
+ main(sys.argv)
|
|
+
|
|
Index: xen-4.0.0-testing/tools/python/xen/xm/pool.py
|
|
===================================================================
|
|
--- /dev/null
|
|
+++ xen-4.0.0-testing/tools/python/xen/xm/pool.py
|
|
@@ -0,0 +1,236 @@
|
|
+#============================================================================
|
|
+# This library is free software; you can redistribute it and/or
|
|
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
|
|
+# License as published by the Free Software Foundation.
|
|
+#
|
|
+# This library is distributed in the hope that it will be useful,
|
|
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+# Lesser General Public License for more details.
|
|
+#
|
|
+# You should have received a copy of the GNU Lesser General Public
|
|
+# License along with this library; if not, write to the Free Software
|
|
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+#============================================================================
|
|
+# Copyright (C) 2009 Fujitsu Technology Solutions
|
|
+#============================================================================
|
|
+
|
|
+""" Common function of cmds pool-new / pool-create.
|
|
+"""
|
|
+
|
|
+import sys
|
|
+import types
|
|
+import os
|
|
+
|
|
+from xen.xend import PrettyPrint
|
|
+from xen.xend import sxp
|
|
+
|
|
+from xen.xm.opts import Opts, set_value, set_true, append_value, OptionError
|
|
+
|
|
+GOPTS = Opts(use="""[options] [vars]
|
|
+
|
|
+Create a pool.
|
|
+
|
|
+Pool creation parameters can be set by command-line switches, from
|
|
+a python configuration script or an SXP config file. See documentation
|
|
+for --defconfig, --config. Configuration variables can be set using
|
|
+VAR=VAL on the command line. For example name=Pool-1 sets name to Pool-1.
|
|
+
|
|
+""")
|
|
+
|
|
+GOPTS.opt('help', short='h',
|
|
+ fn=set_true, default=0,
|
|
+ use="Print this help.")
|
|
+
|
|
+GOPTS.opt('help_config',
|
|
+ fn=set_true, default=0,
|
|
+ use="Print the available configuration variables (vars) for the "
|
|
+ "configuration script.")
|
|
+
|
|
+GOPTS.opt('path', val='PATH',
|
|
+ fn=set_value, default='.:/etc/xen/pool',
|
|
+ use="Search path for configuration scripts. "
|
|
+ "The value of PATH is a colon-separated directory list.")
|
|
+
|
|
+GOPTS.opt('defconfig', short='f', val='FILE',
|
|
+ fn=set_value, default='xmdefconfig',
|
|
+ use="Use the given Python configuration script."
|
|
+ "The configuration script is loaded after arguments have been "
|
|
+ "processed. Each command-line option sets a configuration "
|
|
+ "variable named after its long option name, and these "
|
|
+ "variables are placed in the environment of the script before "
|
|
+ "it is loaded. Variables for options that may be repeated have "
|
|
+ "list values. Other variables can be set using VAR=VAL on the "
|
|
+ "command line. "
|
|
+ "After the script is loaded, option values that were not set "
|
|
+ "on the command line are replaced by the values set in the script.")
|
|
+
|
|
+GOPTS.default('defconfig')
|
|
+
|
|
+GOPTS.opt('config', short='F', val='FILE',
|
|
+ fn=set_value, default=None,
|
|
+ use="CPU pool configuration to use (SXP).\n"
|
|
+ "SXP is the underlying configuration format used by Xen.\n"
|
|
+ "SXP configurations can be hand-written or generated from Python "
|
|
+ "configuration scripts, using the -n (dryrun) option to print "
|
|
+ "the configuration.")
|
|
+
|
|
+GOPTS.opt('dryrun', short='n',
|
|
+ fn=set_true, default=0,
|
|
+ use="Dry run - prints the resulting configuration in SXP but "
|
|
+ "does not create the CPU pool.")
|
|
+
|
|
+GOPTS.var('name', val='NAME', fn=set_value, default=None,
|
|
+ use="CPU pool name.")
|
|
+
|
|
+GOPTS.var('sched', val='SCHED', fn=set_value, default='credit',
|
|
+ use="Scheduler to use for the CPU pool.")
|
|
+
|
|
+GOPTS.var('cpus', val='CPUS', fn=set_value, default=1,
|
|
+ use="CPUS to assign to the CPU pool.")
|
|
+
|
|
+GOPTS.var('other_config', val='OTHER_CONFIG', fn=append_value, default=[],
|
|
+ use="Additional info for CPU pool")
|
|
+
|
|
+
|
|
+def sxp2map(sxp_val):
|
|
+ record = {}
|
|
+ for x in sxp_val:
|
|
+ if isinstance(x, (types.ListType, types.TupleType)) \
|
|
+ and len(x) > 1:
|
|
+ if isinstance(x[1], (types.ListType, types.TupleType)):
|
|
+ record[x[0]] = sxp2map(x[1])
|
|
+ else:
|
|
+ record[x[0]] = x[1]
|
|
+ return record
|
|
+
|
|
+def err(msg):
|
|
+ print >> sys.stderr, "Error: %s" % msg
|
|
+ sys.exit(-1)
|
|
+
|
|
+def make_cpus_config(cfg_cpus):
|
|
+ """ Taken from XendConfig. """
|
|
+ # Convert 'cpus' to list of list of ints
|
|
+
|
|
+ cpus_list = []
|
|
+ # Convert the following string to list of ints.
|
|
+ # The string supports a list of ranges (0-3),
|
|
+ # seperated by commas, and negation (^1).
|
|
+ # Precedence is settled by order of the string:
|
|
+ # "0-3,^1" -> [0,2,3]
|
|
+ # "0-3,^1,1" -> [0,1,2,3]
|
|
+ def cnv(s):
|
|
+ l = []
|
|
+ for c in s.split(','):
|
|
+ if c.find('-') != -1:
|
|
+ (x, y) = c.split('-')
|
|
+ for i in range(int(x), int(y)+1):
|
|
+ l.append(int(i))
|
|
+ else:
|
|
+ # remove this element from the list
|
|
+ if len(c) > 0:
|
|
+ if c[0] == '^':
|
|
+ l = [x for x in l if x != int(c[1:])]
|
|
+ else:
|
|
+ l.append(int(c))
|
|
+ return l
|
|
+
|
|
+ if type(cfg_cpus) == list:
|
|
+ if len(cfg_cpus) > 0 and type(cfg_cpus[0]) == list:
|
|
+ # If sxp_cfg was created from config.sxp,
|
|
+ # the form of 'cpus' is list of list of string.
|
|
+ # Convert 'cpus' to list of list of ints.
|
|
+ # Conversion examples:
|
|
+ # [['1']] -> [[1]]
|
|
+ # [['0','2'],['1','3']] -> [[0,2],[1,3]]
|
|
+ try:
|
|
+ for c1 in cfg_cpus:
|
|
+ cpus = []
|
|
+ for c2 in c1:
|
|
+ cpus.append(int(c2))
|
|
+ cpus_list.append(cpus)
|
|
+ except ValueError, e:
|
|
+ raise err('cpus = %s: %s' % (cfg_cpus, e))
|
|
+ else:
|
|
+ # Conversion examples:
|
|
+ # ["1"] -> [[1]]
|
|
+ # ["0,2","1,3"] -> [[0,2],[1,3]]
|
|
+ # ["0-3,^1","1-4,^2"] -> [[0,2,3],[1,3,4]]
|
|
+ try:
|
|
+ for c in cfg_cpus:
|
|
+ cpus = cnv(c)
|
|
+ cpus_list.append(cpus)
|
|
+ except ValueError, e:
|
|
+ raise err('cpus = %s: %s' % (cfg_cpus, e))
|
|
+ else:
|
|
+ # Conversion examples:
|
|
+ # cpus=1:
|
|
+ # "1" -> [[1]]
|
|
+ # "0-3,^1" -> [[0,2,3]]
|
|
+ # cpus=2:
|
|
+ # "1" -> [[1],[1]]
|
|
+ # "0-3,^1" -> [[0,2,3],[0,2,3]]
|
|
+ try:
|
|
+ cpus_list = cnv(cfg_cpus)
|
|
+ except ValueError, e:
|
|
+ err('cpus = %s: %s' % (cfg_cpus, e))
|
|
+ return cpus_list
|
|
+
|
|
+def make_config(vals):
|
|
+ config = ['pool']
|
|
+ config += [['name_label', vals.name]]
|
|
+ config += [['sched_policy', vals.sched]]
|
|
+ if type(vals.cpus) == int:
|
|
+ config += [['ncpu', vals.cpus], ['proposed_CPUs' , []]]
|
|
+ elif type(vals.cpus) == str and len(vals.cpus) > 1 and vals.cpus[0] == '#':
|
|
+ try:
|
|
+ config += [['ncpu', int(vals.cpus[1:])], ['proposed_CPUs' , []]]
|
|
+ except ValueError, ex:
|
|
+ err('Wrong illegal of parameter "cpus"')
|
|
+ else:
|
|
+ prop_cpus = make_cpus_config(vals.cpus)
|
|
+ config += [['ncpu', len(prop_cpus)],
|
|
+ ['proposed_CPUs'] + prop_cpus]
|
|
+ other_config = []
|
|
+ for entry in vals.other_config:
|
|
+ if '=' in entry:
|
|
+ (var, val) = entry.strip().split('=', 1)
|
|
+ other_config.append([var, val])
|
|
+ config += [['other_config'] + other_config]
|
|
+ return config
|
|
+
|
|
+def parseCommandLine(argv):
|
|
+ GOPTS.reset()
|
|
+ args = GOPTS.parse(argv)
|
|
+
|
|
+ if GOPTS.vals.help or GOPTS.vals.help_config:
|
|
+ if GOPTS.vals.help_config:
|
|
+ print GOPTS.val_usage()
|
|
+ return (None, None)
|
|
+
|
|
+ # Process remaining args as config variables.
|
|
+ for arg in args:
|
|
+ if '=' in arg:
|
|
+ (var, val) = arg.strip().split('=', 1)
|
|
+ GOPTS.setvar(var.strip(), val.strip())
|
|
+ if GOPTS.vals.config:
|
|
+ try:
|
|
+ config = sxp.parse(file(GOPTS.vals.config))[0]
|
|
+ except IOError, ex:
|
|
+ raise OptionError("Cannot read file %s: %s" % (config, ex[1]))
|
|
+ else:
|
|
+ GOPTS.load_defconfig()
|
|
+ if not GOPTS.getopt('name') and GOPTS.getopt('defconfig'):
|
|
+ GOPTS.setopt('name', os.path.basename(
|
|
+ GOPTS.getopt('defconfig')))
|
|
+ config = make_config(GOPTS.vals)
|
|
+
|
|
+ if GOPTS.vals.dryrun:
|
|
+ PrettyPrint.prettyprint(config)
|
|
+ return (None, None)
|
|
+
|
|
+ return (GOPTS, config)
|
|
+
|
|
+def help():
|
|
+ return str(GOPTS)
|
|
+
|
|
Index: xen-4.0.0-testing/tools/python/xen/xm/xenapi_create.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xm/xenapi_create.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xm/xenapi_create.py
|
|
@@ -310,6 +310,8 @@ class xenapi_create:
|
|
get_child_nodes_as_dict(vm, "platform", "key", "value"),
|
|
"other_config":
|
|
get_child_nodes_as_dict(vm, "other_config", "key", "value"),
|
|
+ "pool_name":
|
|
+ vm.attributes["pool_name"].value,
|
|
"PV_bootloader":
|
|
"",
|
|
"PV_kernel":
|
|
@@ -696,6 +698,8 @@ class sxp2xml:
|
|
= str(get_child_by_name(config, "s3_integrity", 0))
|
|
vm.attributes["superpages"] \
|
|
= str(get_child_by_name(config, "superpages", 0))
|
|
+ vm.attributes["pool_name"] \
|
|
+ = str(get_child_by_name(config, "pool_name", "Pool-0"))
|
|
|
|
sec_data = get_child_by_name(config, "security")
|
|
if sec_data:
|