diff --git a/32on64-extra-mem.patch b/32on64-extra-mem.patch index da8f123..263cf4a 100644 --- a/32on64-extra-mem.patch +++ b/32on64-extra-mem.patch @@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py +++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py -@@ -2863,7 +2863,7 @@ class XendDomainInfo: +@@ -2902,7 +2902,7 @@ class XendDomainInfo: self.guest_bitsize = self.image.getBitSize() # Make sure there's enough RAM available for the domain diff --git a/README.SuSE b/README.SuSE index 227ac48..f161bbc 100644 --- a/README.SuSE +++ b/README.SuSE @@ -31,6 +31,7 @@ optional packages are also installed: vm-install (Optional, to install VMs) python-gtk (Optional, to install VMs graphically) virt-manager (Optional, to manage VMs graphically) + virt-viewer (Optional, to view VMs outside virt-manager) tightvnc (Optional, to view VMs outside virt-manager) Additional packages: @@ -328,7 +329,7 @@ documentation for workarounds. Networking ---------- -Your virtual machines become much more useful if your can reach them via the +Your virtual machines become much more useful if you can reach them via the network. Starting with openSUSE11.1 and SLE11, networking in domain 0 is configured and managed via YaST. The yast2-networking module can be used to create and manage bridged networks. During initial installation, a bridged diff --git a/bdrv_open2_flags_2.patch b/bdrv_open2_flags_2.patch index 9f09ffd..2213d1a 100644 --- a/bdrv_open2_flags_2.patch +++ b/bdrv_open2_flags_2.patch @@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c =================================================================== --- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c +++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c -@@ -225,6 +225,7 @@ static int open_disk(struct td_state *s, +@@ -227,6 +227,7 @@ static int open_disk(struct td_state *s, BlockDriver* drv; char* devname; static int devnumber = 0; @@ -10,7 +10,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c int i; DPRINTF("Opening %s as blktap%d\n", path, devnumber); -@@ -247,7 +248,7 @@ static int open_disk(struct td_state *s, +@@ -249,7 +250,7 @@ static int open_disk(struct td_state *s, DPRINTF("%s driver specified\n", drv ? drv->format_name : "No"); /* Open the image */ @@ -23,7 +23,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c =================================================================== --- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c +++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -@@ -133,7 +133,8 @@ static void insert_media(void *opaque) +@@ -136,7 +136,8 @@ static void insert_media(void *opaque) else format = &bdrv_raw; @@ -33,7 +33,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c #ifdef CONFIG_STUBDOM { char *buf, *backend, *params_path, *params; -@@ -397,9 +398,9 @@ void xenstore_parse_domain_config(int hv +@@ -400,9 +401,9 @@ void xenstore_parse_domain_config(int hv { char **e_danger = NULL; char *buf = NULL; @@ -45,7 +45,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c unsigned int len, num, hd_index, pci_devid = 0; BlockDriverState *bs; BlockDriver *format; -@@ -461,7 +462,8 @@ void xenstore_parse_domain_config(int hv +@@ -464,7 +465,8 @@ void xenstore_parse_domain_config(int hv } for (i = 0; i < num; i++) { @@ -55,8 +55,8 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c /* read the backend path */ xenstore_get_backend_path(&bpath, "vbd", danger_path, hvm_domid, e_danger[i]); if (bpath == NULL) -@@ -560,6 +562,17 @@ void xenstore_parse_domain_config(int hv - } +@@ -550,6 +552,17 @@ void xenstore_parse_domain_config(int hv + format = &bdrv_raw; } + /* read the mode of the device */ @@ -73,7 +73,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c #if 0 /* Phantom VBDs are disabled because the use of paths * from guest-controlled areas in xenstore is unsafe. -@@ -612,7 +625,7 @@ void xenstore_parse_domain_config(int hv +@@ -617,7 +630,7 @@ void xenstore_parse_domain_config(int hv #ifdef CONFIG_STUBDOM if (pasprintf(&danger_buf, "%s/device/vbd/%s", danger_path, e_danger[i]) == -1) continue; @@ -82,12 +82,12 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c pstrcpy(bs->filename, sizeof(bs->filename), params); } #else -@@ -641,7 +654,7 @@ void xenstore_parse_domain_config(int hv +@@ -646,7 +659,7 @@ void xenstore_parse_domain_config(int hv } } pstrcpy(bs->filename, sizeof(bs->filename), params); -- if (bdrv_open2(bs, params, BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) -+ if (bdrv_open2(bs, params, flags|BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) +- if (bdrv_open2(bs, params, BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) { ++ if (bdrv_open2(bs, params, flags|BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) { fprintf(stderr, "qemu: could not open vbd '%s' or hard disk image '%s' (drv '%s' format '%s')\n", buf, params, drv ? drv : "?", format ? format->format_name : "0"); - } - + } else { + char* snapshot = get_snapshot_name(atoi(e_danger[i])); diff --git a/blktap-pv-cdrom.patch b/blktap-pv-cdrom.patch index ccaf60a..ccf8b9a 100644 --- a/blktap-pv-cdrom.patch +++ b/blktap-pv-cdrom.patch @@ -741,7 +741,7 @@ Index: xen-4.0.0-testing/tools/blktap/lib/blktaplib.h =================================================================== --- xen-4.0.0-testing.orig/tools/blktap/lib/blktaplib.h +++ xen-4.0.0-testing/tools/blktap/lib/blktaplib.h -@@ -219,6 +219,7 @@ typedef struct msg_pid { +@@ -220,6 +220,7 @@ typedef struct msg_pid { #define DISK_TYPE_RAM 3 #define DISK_TYPE_QCOW 4 #define DISK_TYPE_QCOW2 5 diff --git a/blktap.patch b/blktap.patch index 7be792a..3d67c84 100644 --- a/blktap.patch +++ b/blktap.patch @@ -5,7 +5,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py +++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py -@@ -3262,7 +3262,7 @@ class XendDomainInfo: +@@ -3286,7 +3286,7 @@ class XendDomainInfo: (fn, BOOTLOADER_LOOPBACK_DEVICE)) vbd = { @@ -18,7 +18,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c =================================================================== --- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c +++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -@@ -396,9 +396,9 @@ void xenstore_parse_domain_config(int hv +@@ -397,9 +397,9 @@ void xenstore_parse_domain_config(int hv { char **e_danger = NULL; char *buf = NULL; @@ -30,7 +30,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c unsigned int len, num, hd_index, pci_devid = 0; BlockDriverState *bs; BlockDriver *format; -@@ -438,6 +438,14 @@ void xenstore_parse_domain_config(int hv +@@ -439,6 +439,14 @@ void xenstore_parse_domain_config(int hv e_danger[i]); if (bpath == NULL) continue; @@ -45,7 +45,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c /* read the name of the device */ if (pasprintf(&buf, "%s/dev", bpath) == -1) continue; -@@ -712,6 +720,7 @@ void xenstore_parse_domain_config(int hv +@@ -715,6 +723,7 @@ void xenstore_parse_domain_config(int hv free(danger_type); free(params); free(dev); diff --git a/cdrom-removable.patch b/cdrom-removable.patch index 422d00e..1175f1c 100644 --- a/cdrom-removable.patch +++ b/cdrom-removable.patch @@ -1,7 +1,5 @@ -Index: xen-4.0.0-testing/tools/python/xen/xend/server/HalDaemon.py -=================================================================== --- /dev/null -+++ xen-4.0.0-testing/tools/python/xen/xend/server/HalDaemon.py ++++ b/tools/python/xen/xend/server/HalDaemon.py @@ -0,0 +1,243 @@ +#!/usr/bin/env python +# -*- mode: python; -*- @@ -246,10 +244,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/HalDaemon.py + print 'Falling off end' + + -Index: xen-4.0.0-testing/tools/python/xen/xend/server/Hald.py -=================================================================== --- /dev/null -+++ xen-4.0.0-testing/tools/python/xen/xend/server/Hald.py ++++ b/tools/python/xen/xend/server/Hald.py @@ -0,0 +1,125 @@ +#============================================================================ +# This library is free software; you can redistribute it and/or @@ -376,10 +372,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/Hald.py + watcher.run() + time.sleep(10) + watcher.shutdown() -Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py -=================================================================== ---- xen-4.0.0-testing.orig/tools/python/xen/xend/server/SrvServer.py -+++ xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py +--- a/tools/python/xen/xend/server/SrvServer.py ++++ b/tools/python/xen/xend/server/SrvServer.py @@ -56,6 +56,7 @@ from xen.web.SrvDir import SrvDir from SrvRoot import SrvRoot @@ -397,15 +391,22 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py def create(): root = SrvDir() -Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c -+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -@@ -513,6 +513,19 @@ void xenstore_parse_domain_config(int hv - params = newparams; - format = &bdrv_raw; - } -+ /* if cdrom pyhsical put a watch on media-present */ +--- a/tools/ioemu-remote/xenstore.c ++++ b/tools/ioemu-remote/xenstore.c +@@ -18,6 +18,7 @@ + #include "exec-all.h" + #include "sysemu.h" + ++#include "console.h" + #include "hw.h" + #include "pci.h" + #include "qemu-timer.h" +@@ -548,6 +549,21 @@ void xenstore_parse_domain_config(int hv + #endif + + bs = bdrv_new(dev); ++ ++ /* if cdrom physical put a watch on media-present */ + if (bdrv_get_type_hint(bs) == BDRV_TYPE_CDROM) { + if (drv && !strcmp(drv, "phy")) { + if (pasprintf(&buf, "%s/media-present", bpath) != -1) { @@ -418,14 +419,15 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c + } + } + } - - #if 0 - /* Phantom VBDs are disabled because the use of paths -@@ -938,6 +951,50 @@ void xenstore_record_dm_state(const char ++ + /* check if it is a cdrom */ + if (danger_type && !strcmp(danger_type, "cdrom")) { + bdrv_set_type_hint(bs, BDRV_TYPE_CDROM); +@@ -938,6 +954,50 @@ void xenstore_record_dm_state(const char xenstore_record_dm("state", state); } -+void xenstore_process_media_change_event(char **vec) ++static void xenstore_process_media_change_event(char **vec) +{ + char *media_present = NULL; + unsigned int len; @@ -472,7 +474,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c void xenstore_process_event(void *opaque) { char **vec, *offset, *bpath = NULL, *buf = NULL, *drv = NULL, *image = NULL; -@@ -968,6 +1025,11 @@ void xenstore_process_event(void *opaque +@@ -968,6 +1028,11 @@ void xenstore_process_event(void *opaque xenstore_watch_callbacks[i].cb(vec[XS_WATCH_TOKEN], xenstore_watch_callbacks[i].opaque); diff --git a/cpu-pools-docs.patch b/cpu-pools-docs.patch new file mode 100644 index 0000000..e28f7c3 --- /dev/null +++ b/cpu-pools-docs.patch @@ -0,0 +1,1484 @@ +Index: xen-4.0.0-testing/docs/xen-api/coversheet.tex +=================================================================== +--- xen-4.0.0-testing.orig/docs/xen-api/coversheet.tex ++++ xen-4.0.0-testing/docs/xen-api/coversheet.tex +@@ -52,6 +52,7 @@ Mike Day, IBM & Daniel Veillard, Red Hat + Jim Fehlig, Novell & Tom Wilkie, University of Cambridge \\ + Jon Harrop, XenSource & Yosuke Iwamatsu, NEC \\ + Masaki Kanno, FUJITSU \\ ++Lutz Dube, FUJITSU TECHNOLOGY SOLUTIONS \\ + \end{tabular} + \end{large} + +Index: xen-4.0.0-testing/docs/xen-api/revision-history.tex +=================================================================== +--- xen-4.0.0-testing.orig/docs/xen-api/revision-history.tex ++++ xen-4.0.0-testing/docs/xen-api/revision-history.tex +@@ -50,6 +50,12 @@ + between classes. Added host.PSCSI\_HBAs and VM.DSCSI\_HBAs + fields.\tabularnewline + \hline ++ 1.0.10 & 10th Jan. 10 & L. Dube & ++ Added definitions of new classes cpu\_pool. Updated the table ++ and the diagram representing relationships between classes. ++ Added fields host.resident\_cpu\_pools, VM.cpu\_pool and ++ host\_cpu.cpu\_pool. ++ \hline + \end{tabular} + \end{center} + \end{flushleft} +Index: xen-4.0.0-testing/docs/xen-api/xenapi-coversheet.tex +=================================================================== +--- xen-4.0.0-testing.orig/docs/xen-api/xenapi-coversheet.tex ++++ xen-4.0.0-testing/docs/xen-api/xenapi-coversheet.tex +@@ -17,12 +17,12 @@ + \newcommand{\coversheetlogo}{xen.eps} + + %% Document date +-\newcommand{\datestring}{20th November 2009} ++\newcommand{\datestring}{10th January 2010} + + \newcommand{\releasestatement}{Stable Release} + + %% Document revision +-\newcommand{\revstring}{API Revision 1.0.9} ++\newcommand{\revstring}{API Revision 1.0.10} + + %% Document authors + \newcommand{\docauthors}{ +Index: xen-4.0.0-testing/docs/xen-api/xenapi-datamodel-graph.dot +=================================================================== +--- xen-4.0.0-testing.orig/docs/xen-api/xenapi-datamodel-graph.dot ++++ xen-4.0.0-testing/docs/xen-api/xenapi-datamodel-graph.dot +@@ -14,7 +14,7 @@ fontname="Verdana"; + + node [ shape=box ]; session VM host network VIF PIF SR VDI VBD PBD user; + node [ shape=box ]; XSPolicy ACMPolicy DPCI PPCI host_cpu console VTPM; +-node [ shape=box ]; DSCSI PSCSI DSCSI_HBA PSCSI_HBA; ++node [ shape=box ]; DSCSI PSCSI DSCSI_HBA PSCSI_HBA cpu_pool; + node [ shape=ellipse ]; VM_metrics VM_guest_metrics host_metrics; + node [ shape=ellipse ]; PIF_metrics VIF_metrics VBD_metrics PBD_metrics; + session -> host [ arrowhead="none" ] +@@ -51,4 +51,7 @@ DSCSI_HBA -> PSCSI_HBA [ arrowhead="crow + PSCSI -> host [ arrowhead="none", arrowtail="crow" ] + PSCSI_HBA -> host [ arrowhead="none", arrowtail="crow" ] + PSCSI -> PSCSI_HBA [ arrowhead="none", arrowtail="crow" ] ++cpu_pool -> host_cpu [ arrowhead="crow", arrowtail="none" ] ++cpu_pool -> VM [ arrowhead="crow", arrowtail="none" ] ++host -> cpu_pool [ arrowhead="crow", arrowtail="none" ] + } +Index: xen-4.0.0-testing/docs/xen-api/xenapi-datamodel.tex +=================================================================== +--- xen-4.0.0-testing.orig/docs/xen-api/xenapi-datamodel.tex ++++ xen-4.0.0-testing/docs/xen-api/xenapi-datamodel.tex +@@ -56,6 +56,7 @@ Name & Description \\ + {\tt debug} & A basic class for testing \\ + {\tt XSPolicy} & A class for handling Xen Security Policies \\ + {\tt ACMPolicy} & A class for handling ACM-type policies \\ ++{\tt cpu\_pool} & A container for VMs which should shared the same host\_cpu(s) \\ + \hline + \end{tabular}\end{center} + \section{Relationships Between Classes} +@@ -88,6 +89,9 @@ PSCSI.HBA & PSCSI\_HBA.PSCSIs & one-to-m + PSCSI\_HBA.host & host.PSCSI\_HBAs & one-to-many\\ + host.resident\_VMs & VM.resident\_on & many-to-one\\ + host.host\_CPUs & host\_cpu.host & many-to-one\\ ++host.resident\_cpu\_pools & cpu\_pool.resident\_on & many-to-one\\ ++cpu\_pool.started\_VMs & VM.cpu\_pool & many-to-one\\ ++cpu\_pool.host\_CPUs & host\_cpu.cpu\_pool & many-to-one\\ + \hline + \end{tabular}\end{center} + +@@ -499,6 +503,56 @@ error code and a message describing the + \begin{verbatim}SECURITY_ERROR(xserr, message)\end{verbatim} + \begin{center}\rule{10em}{0.1pt}\end{center} + ++\subsubsection{POOL\_BAD\_STATE} ++ ++You attempted an operation on a pool that was not in an appropriate state ++at the time; for example, you attempted to activate a pool that was ++already activated. ++ ++\vspace{0.3cm} ++{\bf Signature:} ++\begin{verbatim}POOL_BAD_STATE(current pool state)\end{verbatim} ++\begin{center}\rule{10em}{0.1pt}\end{center} ++ ++\subsubsection{INSUFFICIENT\_CPUS} ++ ++You attempted to activate a cpu\_pool but there are not enough ++unallocated CPUs to satisfy the request. ++ ++\vspace{0.3cm} ++{\bf Signature:} ++\begin{verbatim}INSUFFICIENT_CPUS(needed cpu count, available cpu count)\end{verbatim} ++\begin{center}\rule{10em}{0.1pt}\end{center} ++ ++\subsubsection{UNKOWN\_SCHED\_POLICY} ++ ++The specified scheduler policy is unkown to the host. ++ ++\vspace{0.3cm} ++{\bf Signature:} ++\begin{verbatim}UNKOWN_SCHED_POLICY()\end{verbatim} ++\begin{center}\rule{10em}{0.1pt}\end{center} ++ ++\subsubsection{INVALID\_CPU} ++ ++You tried to reconfigure a cpu\_pool with a CPU that is unkown to the host ++or has a wrong state. ++ ++\vspace{0.3cm} ++{\bf Signature:} ++\begin{verbatim}INVALID_CPU(message)\end{verbatim} ++\begin{center}\rule{10em}{0.1pt}\end{center} ++ ++\subsubsection{LAST\_CPU\_NOT\_REMOVEABLE} ++ ++You tried to remove the last CPU from a cpu\_pool that has one or more ++active domains. ++ ++\vspace{0.3cm} ++{\bf Signature:} ++\begin{verbatim}LAST_CPU_NOT_REMOVEABLE(message)\end{verbatim} ++\begin{center}\rule{10em}{0.1pt}\end{center} ++ + + \newpage + \section{Class: session} +@@ -4847,6 +4901,135 @@ references to objects with match names + \vspace{0.3cm} + \vspace{0.3cm} + \vspace{0.3cm} ++\subsubsection{RPC name:~get\_cpu\_pool} ++ ++{\bf Overview:} ++Get the cpu\_pool field of the given VM. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((cpu_pool ref) Set) get_cpu_pool (session_id s, VM ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt VM ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(cpu\_pool ref) Set ++} ++ ++ ++references to cpu\_pool objects. ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_pool\_name} ++ ++{\bf Overview:} ++Get the pool\_name field of the given VM. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} string get_cpu_pool (session_id s, VM ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt VM ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++string ++} ++ ++ ++name of cpu pool to use ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~cpu\_pool\_migrate} ++ ++{\bf Overview:} ++Migrate the VM to another cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void cpu_pool_migrate (session_id s, VM ref self, cpu_pool ref pool)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt VM ref } & self & reference to the object \\ \hline ++{\tt cpu\_pool ref} & pool & reference to new cpu\_pool \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent{\bf Possible Error Codes:} {\tt POOL\_BAD\_STATE, VM\_BAD\_POWER\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_pool\_name} ++ ++{\bf Overview:} ++Set cpu pool name to use for next activation. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void set_pool_name (session_id s, VM ref self, string pool\_name)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt VM ref } & self & reference to the object \\ \hline ++{\tt string} & pool\_name & New pool name \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++ ++ ++ + + \vspace{1cm} + \newpage +@@ -5681,6 +5864,7 @@ $\mathit{RO}_\mathit{run}$ & {\tt PSCSI + $\mathit{RO}_\mathit{run}$ & {\tt PSCSI\_HBAs} & (PSCSI\_HBA ref) Set & physical SCSI host bus adapters \\ + $\mathit{RO}_\mathit{run}$ & {\tt host\_CPUs} & (host\_cpu ref) Set & The physical CPUs on this host \\ + $\mathit{RO}_\mathit{run}$ & {\tt metrics} & host\_metrics ref & metrics associated with this host \\ ++$\mathit{RO}_\mathit{run}$ & {\tt resident\_cpu\_pools} & (cpu\_pool ref) Set & list of cpu\_pools currently resident on the host \\ + \hline + \end{longtable} + \subsection{RPCs associated with class: host} +@@ -7229,6 +7413,38 @@ references to objects with match names + \vspace{0.3cm} + \vspace{0.3cm} + \vspace{0.3cm} ++\subsubsection{RPC name:~get\_resident\_cpu\_pools} ++ ++{\bf Overview:} ++Get the resident\_cpu\_pools field of the given host. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((cpu_pool ref) Set) get_resident_cpu_pools (session_id s, host ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt host ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(cpu\_pool ref) Set ++} ++ ++ ++references to all known cpu\_pools. ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++ + + \vspace{1cm} + \newpage +@@ -7484,6 +7700,7 @@ $\mathit{RO}_\mathit{run}$ & {\tt stepp + $\mathit{RO}_\mathit{run}$ & {\tt flags} & string & the flags of the physical CPU (a decoded version of the features field) \\ + $\mathit{RO}_\mathit{run}$ & {\tt features} & string & the physical CPU feature bitmap \\ + $\mathit{RO}_\mathit{run}$ & {\tt utilisation} & float & the current CPU utilisation \\ ++$\mathit{RO}_\mathit{run}$ & {\tt cpu\_pool} & (cpu\_pool ref) Set & reference to cpu\_pool the cpu belongs to \\ + \hline + \end{longtable} + \subsection{RPCs associated with class: host\_cpu} +@@ -7896,6 +8113,70 @@ all fields from the object + \vspace{0.3cm} + \vspace{0.3cm} + \vspace{0.3cm} ++\subsubsection{RPC name:~get\_cpu\_pool} ++ ++{\bf Overview:} ++Get the cpu\_pool field of the given host\_cpu. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((cpu_pool) Set) get_cpu_pool (session_id s, host_cpu ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt host\_cpu ref } & self & reference to the object \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(cpu\_pool) Set ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_unassigned\_cpus} ++ ++{\bf Overview:} ++Get a reference to all cpus that are not assigend to any cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((host_cpu) Set) get_unassigned_cpus (session_id s)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(host\_cpu ref) Set ++} ++ ++ ++Set of free (not assigned) cpus ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++ ++ + + \vspace{1cm} + \newpage +@@ -18892,3 +19173,1073 @@ all fields from the object + \vspace{0.3cm} + \vspace{0.3cm} + ++\newpage ++\section{Class: cpu\_pool} ++\subsection{Fields for class: cpu\_pool} ++\begin{longtable}{|lllp{0.38\textwidth}|} ++\hline ++\multicolumn{1}{|l}{Name} & \multicolumn{3}{l|}{\bf cpu\_pool} \\ ++\multicolumn{1}{|l}{Description} & \multicolumn{3}{l|}{\parbox{11cm}{\em A CPU pool}} \\ ++\hline ++Quals & Field & Type & Description \\ ++\hline ++$\mathit{RO}_\mathit{run}$ & {\tt uuid} & string & unique identifier / object reference \\ ++$\mathit{RW}$ & {\tt name\_label} & string & name of cpu\_pool \\ ++$\mathit{RW}$ & {\tt name\_description} & string & cpu\_pool description \\ ++$\mathit{RO}_\mathit{run}$ & {\tt resident\_on} & host ref & the host the cpu\_pool is currently resident on \\ ++$\mathit{RW}$ & {\tt auto\_power\_on} & bool & True if this cpu\_pool should be activated automatically after host boot \\ ++$\mathit{RO}_\mathit{run}$ & {\tt started\_VMs} & (VM ref) Set & list of VMs currently started in this cpu\_pool \\ ++$\mathit{RW}$ & {\tt ncpu} & integer & number of host\_CPUs requested for this cpu\_pool at next start \\ ++$\mathit{RW}$ & {\tt sched\_policy} & string & scheduler policy on this cpu\_pool \\ ++$\mathit{RW}$ & {\tt proposed\_CPUs} & (string) Set & list of proposed host\_CPUs to assign at next activation \\ ++$\mathit{RO}_\mathit{run}$ & {\tt host\_CPUs} & (VM ref) Set & list of host\_cpus currently assigned to this cpu\_pool \\ ++$\mathit{RO}_\mathit{run}$ & {\tt activated} & bool & True if this cpu\_pool is activated \\ ++$\mathit{RW}$ & {\tt other\_config} & (string $\rightarrow$ string) Map & additional configuration \\ ++\hline ++\end{longtable} ++\subsection{RPCs associated with class: cpu\_pool} ++\subsubsection{RPC name:~activate} ++ ++{\bf Overview:} ++Activate the cpu\_pool and assign the given CPUs to it. ++CPUs specified in field proposed\_CPUs, that are not existing or not free, are ++ignored. If value of ncpu is greater than the number of CPUs in field ++proposed\_CPUs, additional free CPUs are assigned to the cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void activate (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE, INSUFFICIENT\_CPUS, UNKOWN\_SCHED\_POLICY} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~create} ++ ++{\bf Overview:} ++Create a new cpu\_pool instance, and return its handle. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} (cpu_pool ref) create (session_id s, cpu_pool record args)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool record } & args & All constructor arguments \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++cpu\_pool ref ++} ++ ++ ++reference to the newly created object ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~deactivate} ++ ++{\bf Overview:} ++Deactivate the cpu\_pool and release all CPUs assigned to it. ++This function can only be called if there are no domains active in the ++cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void deactivate (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} {\tt POOL\_BAD\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~destroy} ++ ++{\bf Overview:} ++Destroy the specified cpu\_pool. The cpu\_pool is completely removed from the ++system. ++This function can only be called if the cpu\_pool is deactivated. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void destroy (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} {\tt POOL\_BAD\_STATE} ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~add\_host\_CPU\_live} ++ ++ ++{\bf Overview:} ++Add a additional CPU immediatly to the cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void add_host_CPU_live (session_id s, cpu_pool ref self, host_cpu ref host_cpu)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt host\_cpu ref } & host\_cpu & CPU to add \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE, INVALID\_CPU} ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~remove\_host\_CPU\_live} ++ ++ ++{\bf Overview:} ++Remove a CPU immediatly from the cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void remove_host_CPU_live (session_id s, cpu_pool ref self, host_cpu ref host_cpu)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt host\_cpu ref } & host\_cpu & CPU to remove \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE, INVALID\_CPU, LAST\_CPU\_NOT\_REMOVEABLE} ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_all} ++ ++ ++{\bf Overview:} ++Return a list of all the cpu pools known to the system. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((cpu_pool ref) Set) get_all (session_id s)\end{verbatim} ++ ++ ++ \noindent {\bf Return Type:} ++{\tt ++(cpu\_pool ref) Set ++} ++A list of all the IDs of the cpu pools. ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_all\_records} ++ ++ ++{\bf Overview:} ++Return a map of all the cpu pool records known to the system. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} (((cpu_pool ref) -> (cpu_pool record)) Map) get_all_records (session_id s)\end{verbatim} ++ ++ ++ \noindent {\bf Return Type:} ++{\tt ++((cpu\_pool ref) $\rightarrow$ (cpu\_pool record)) Map ++} ++A map of all the cpu pool records indexed by cpu pool ref. ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_by\_name\_label} ++ ++{\bf Overview:} ++Get all the cpu\_pool instances with the given label. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((cpu_pool ref) Set) get_by_name_label (session_id s, string label)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt string } & label & label of object to return \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(cpu\_pool ref) Set ++} ++ ++ ++references to objects with matching names ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_by\_uuid} ++ ++{\bf Overview:} ++Get a reference to the cpu\_pool instance with the specified UUID. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} (cpu_pool ref) get_by_uuid (session_id s, string uuid)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt string } & uuid & UUID of object to return \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++cpu\_pool ref ++} ++ ++ ++reference to the object ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_activated} ++ ++ ++{\bf Overview:} ++Return the activation state of the cpu\_pool object. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} bool get_activated (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++bool ++} ++Returns {\bf true} if cpu\_pool is active. ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_auto\_power\_on} ++ ++ ++{\bf Overview:} ++Return the auto power attribute of the cpu\_pool object. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} bool get_auto_power_on (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++bool ++} ++Returns {\bf true} if cpu\_pool has to be activated on xend start. ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_host\_CPUs} ++ ++ ++{\bf Overview:} ++Return the list of host\_cpu refs assigned to the cpu\_pool object. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((host_cpu ref) Set) get_host_CPUs (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(host\_cpu ref) Set ++} ++Returns a list of references of all host cpus assigned to the cpu\_pool. ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_name\_description} ++ ++{\bf Overview:} ++Get the name/description field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} string get_name_description (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++string ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_name\_label} ++ ++{\bf Overview:} ++Get the name/label field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} string get_name_label (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++string ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_ncpu} ++ ++{\bf Overview:} ++Get the ncpu field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} int get_ncpu (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++int ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_proposed\_CPUs} ++ ++{\bf Overview:} ++Get the proposed\_CPUs field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((string) Set) get_proposed_CPUs (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(string) Set ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_other\_config} ++ ++{\bf Overview:} ++Get the other\_config field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} ((string -> string) Map) get_other_config (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++(string $\rightarrow$ string) Map ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_record} ++ ++{\bf Overview:} ++Get a record containing the current state of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} (cpu_pool record) get_record (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++cpu\_pool record ++} ++ ++ ++all fields of the object. ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_resident\_on} ++ ++{\bf Overview:} ++Get the resident\_on field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} (host ref) get_resident_on (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++host ref ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_sched\_policy} ++ ++{\bf Overview:} ++Get the sched\_policy field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} string get_sched_policy (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++string ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_started\_VMs} ++ ++{\bf Overview:} ++Get the started\_VMs field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} ((VM ref) Set) get_started_VMs (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++(VM ref) Set ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_uuid} ++ ++{\bf Overview:} ++Get the uuid field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} string get_uuid (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++string ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_auto\_power\_on} ++ ++{\bf Overview:} ++Set the auto\_power\_on field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} void set_auto_power_on (session_id s, cpu_pool ref self, bool value)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt bool } & value & new auto\_power\_on value \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_proposed\_CPUs} ++ ++{\bf Overview:} ++Set the proposed\_CPUs field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} void set_proposed_CPUs (session_id s, cpu_pool ref self, string Set cpus)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt string Set } & cpus & Set of preferred CPU (numbers) to use \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:add\_to\_proposed\_CPUs} ++ ++{\bf Overview:} ++Add a CPU (number) to the proposed\_CPUs field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} void add_to_proposed_CPUs (session_id s, cpu_pool ref self, integer cpu)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt integer } & cpu & Number of CPU to add \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:remove\_from\_proposed\_CPUs} ++ ++{\bf Overview:} ++Remove a CPU (number) from the proposed\_CPUs field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} void remove_from_proposed_CPUs (session_id s, cpu_pool ref self, integer cpu)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt integer } & cpu & Number of CPU to remove \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_name\_label} ++ ++{\bf Overview:} ++Set the name/label field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void set_name_label (session_id s, cpu_pool ref self, string value)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt string } & value & New value to set \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_ncpu} ++ ++{\bf Overview:} ++Set the ncpu field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void set_ncpu (session_id s, cpu_pool ref self, integer value)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt integer } & value & Number of cpus to use \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_other\_config} ++ ++{\bf Overview:} ++Set the other\_config field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void set_other_config (session_id s, cpu_pool ref self, (string -> string) Map value)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt (string $\rightarrow$ string) Map } & value & New value to set \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~add\_to\_other\_config} ++ ++{\bf Overview:} ++Add the given key-value pair to the other\_config field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void add_to_other_config (session_id s, cpu_pool ref self, string key, string value)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt string } & key & Key to add \\ \hline ++{\tt string } & value & Value to add \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~remove\_from\_other\_config} ++ ++{\bf Overview:} ++Remove the given key and its corresponding value from the other\_config ++field of the given cpu\_pool. If the key is not in that Map, then do nothing. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void remove_from_other_config (session_id s, cpu_pool ref self, string key)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt string } & key & Key to remove \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_sched\_policy} ++ ++{\bf Overview:} ++Set the sched\_policy field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void set_sched_policy (session_id s, cpu_pool ref self, string new_sched_policy)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt string } & new\_sched\_policy & New value to set \\ \hline ++\end{tabular} ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++ diff --git a/cpu-pools-libxc.patch b/cpu-pools-libxc.patch new file mode 100644 index 0000000..e9ba385 --- /dev/null +++ b/cpu-pools-libxc.patch @@ -0,0 +1,360 @@ +Index: xen-4.0.0-testing/tools/libxc/Makefile +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxc/Makefile ++++ xen-4.0.0-testing/tools/libxc/Makefile +@@ -8,6 +8,7 @@ CTRL_SRCS-y := + CTRL_SRCS-y += xc_core.c + CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c + CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c ++CTRL_SRCS-y += xc_cpupool.c + CTRL_SRCS-y += xc_domain.c + CTRL_SRCS-y += xc_evtchn.c + CTRL_SRCS-y += xc_misc.c +Index: xen-4.0.0-testing/tools/libxc/xc_cpupool.c +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/libxc/xc_cpupool.c +@@ -0,0 +1,154 @@ ++/****************************************************************************** ++ * xc_cpupool.c ++ * ++ * API for manipulating and obtaining information on cpupools. ++ * ++ * Copyright (c) 2009, J Gross. ++ */ ++ ++#include ++#include "xc_private.h" ++ ++int xc_cpupool_create(int xc_handle, ++ uint32_t *ppoolid, ++ uint32_t sched_id) ++{ ++ int err; ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE; ++ domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ? ++ XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid; ++ domctl.u.cpupool_op.sched_id = sched_id; ++ if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 ) ++ return err; ++ ++ *ppoolid = domctl.u.cpupool_op.cpupool_id; ++ return 0; ++} ++ ++int xc_cpupool_destroy(int xc_handle, ++ uint32_t poolid) ++{ ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY; ++ domctl.u.cpupool_op.cpupool_id = poolid; ++ return do_domctl_save(xc_handle, &domctl); ++} ++ ++int xc_cpupool_getinfo(int xc_handle, ++ uint32_t first_poolid, ++ uint32_t n_max, ++ xc_cpupoolinfo_t *info) ++{ ++ int err = 0; ++ int p; ++ uint32_t poolid = first_poolid; ++ uint8_t local[sizeof (info->cpumap)]; ++ DECLARE_DOMCTL; ++ ++ memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t)); ++ ++ for (p = 0; p < n_max; p++) ++ { ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO; ++ domctl.u.cpupool_op.cpupool_id = poolid; ++ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local); ++ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8; ++ ++ if ( (err = lock_pages(local, sizeof(local))) != 0 ) ++ { ++ PERROR("Could not lock memory for Xen hypercall"); ++ break; ++ } ++ err = do_domctl_save(xc_handle, &domctl); ++ unlock_pages(local, sizeof (local)); ++ ++ if ( err < 0 ) ++ break; ++ ++ info->cpupool_id = domctl.u.cpupool_op.cpupool_id; ++ info->sched_id = domctl.u.cpupool_op.sched_id; ++ info->n_dom = domctl.u.cpupool_op.n_dom; ++ bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8); ++ poolid = domctl.u.cpupool_op.cpupool_id + 1; ++ info++; ++ } ++ ++ if ( p == 0 ) ++ return err; ++ ++ return p; ++} ++ ++int xc_cpupool_addcpu(int xc_handle, ++ uint32_t poolid, ++ int cpu) ++{ ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU; ++ domctl.u.cpupool_op.cpupool_id = poolid; ++ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu; ++ return do_domctl_save(xc_handle, &domctl); ++} ++ ++int xc_cpupool_removecpu(int xc_handle, ++ uint32_t poolid, ++ int cpu) ++{ ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU; ++ domctl.u.cpupool_op.cpupool_id = poolid; ++ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu; ++ return do_domctl_save(xc_handle, &domctl); ++} ++ ++int xc_cpupool_movedomain(int xc_handle, ++ uint32_t poolid, ++ uint32_t domid) ++{ ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN; ++ domctl.u.cpupool_op.cpupool_id = poolid; ++ domctl.u.cpupool_op.domid = domid; ++ return do_domctl_save(xc_handle, &domctl); ++} ++ ++int xc_cpupool_freeinfo(int xc_handle, ++ uint64_t *cpumap) ++{ ++ int err; ++ uint8_t local[sizeof (*cpumap)]; ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO; ++ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local); ++ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8; ++ ++ if ( (err = lock_pages(local, sizeof(local))) != 0 ) ++ { ++ PERROR("Could not lock memory for Xen hypercall"); ++ return err; ++ } ++ ++ err = do_domctl_save(xc_handle, &domctl); ++ unlock_pages(local, sizeof (local)); ++ ++ if (err < 0) ++ return err; ++ ++ bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); ++ ++ return 0; ++} +Index: xen-4.0.0-testing/tools/libxc/xc_domain.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxc/xc_domain.c ++++ xen-4.0.0-testing/tools/libxc/xc_domain.c +@@ -6,6 +6,7 @@ + * Copyright (c) 2003, K A Fraser. + */ + ++#include + #include "xc_private.h" + #include "xg_save_restore.h" + #include +@@ -15,15 +16,21 @@ int xc_domain_create(int xc_handle, + uint32_t ssidref, + xen_domain_handle_t handle, + uint32_t flags, +- uint32_t *pdomid) ++ uint32_t *pdomid, ...) + { + int err; ++ va_list ap; + DECLARE_DOMCTL; + + domctl.cmd = XEN_DOMCTL_createdomain; + domctl.domain = (domid_t)*pdomid; + domctl.u.createdomain.ssidref = ssidref; + domctl.u.createdomain.flags = flags; ++ if ( flags & XEN_DOMCTL_CDF_pool ) { ++ va_start(ap, pdomid); ++ domctl.u.createdomain.cpupool = va_arg(ap, uint32_t); ++ va_end(ap); ++ } + memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t)); + if ( (err = do_domctl(xc_handle, &domctl)) != 0 ) + return err; +@@ -206,6 +213,7 @@ int xc_domain_getinfo(int xc_handle, + info->cpu_time = domctl.u.getdomaininfo.cpu_time; + info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus; + info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id; ++ info->cpupool = domctl.u.getdomaininfo.cpupool; + + memcpy(info->handle, domctl.u.getdomaininfo.handle, + sizeof(xen_domain_handle_t)); +Index: xen-4.0.0-testing/tools/libxc/xc_private.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxc/xc_private.h ++++ xen-4.0.0-testing/tools/libxc/xc_private.h +@@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl + return ret; + } + ++static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl) ++{ ++ int ret; ++ ++ do ++ { ++ ret = do_domctl(xc_handle, domctl); ++ } ++ while ( (ret < 0 ) && (errno == EAGAIN) ); ++ ++ return ret; ++} ++ + static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl) + { + int ret = -1; +Index: xen-4.0.0-testing/tools/libxc/xenctrl.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxc/xenctrl.h ++++ xen-4.0.0-testing/tools/libxc/xenctrl.h +@@ -171,6 +171,7 @@ typedef struct xc_dominfo { + unsigned int nr_online_vcpus; + unsigned int max_vcpu_id; + xen_domain_handle_t handle; ++ unsigned int cpupool; + } xc_dominfo_t; + + typedef xen_domctl_getdomaininfo_t xc_domaininfo_t; +@@ -207,7 +208,7 @@ int xc_domain_create(int xc_handle, + uint32_t ssidref, + xen_domain_handle_t handle, + uint32_t flags, +- uint32_t *pdomid); ++ uint32_t *pdomid, ...); + + + /* Functions to produce a dump of a given domain +@@ -500,6 +501,100 @@ int xc_domain_setdebugging(int xc_handle + unsigned int enable); + + /* ++ * CPUPOOL MANAGEMENT FUNCTIONS ++ */ ++ ++typedef struct xc_cpupoolinfo { ++ uint32_t cpupool_id; ++ uint32_t sched_id; ++ uint32_t n_dom; ++ uint64_t cpumap; ++} xc_cpupoolinfo_t; ++ ++/** ++ * Create a new cpupool. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm ppoolid pointer to the new cpupool id (in/out) ++ * @parm sched_id id of scheduler to use for pool ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_create(int xc_handle, ++ uint32_t *ppoolid, ++ uint32_t sched_id); ++ ++/** ++ * Destroy a cpupool. Pool must be unused and have no cpu assigned. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm poolid id of the cpupool to destroy ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_destroy(int xc_handle, ++ uint32_t poolid); ++ ++/** ++ * Get cpupool info. Returns info for up to the specified number of cpupools ++ * starting at the given id. ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm first_poolid lowest id for which info is returned ++ * @parm n_max maximum number of cpupools to return info ++ * @parm info pointer to xc_cpupoolinfo_t array ++ * return number of cpupool infos ++ */ ++int xc_cpupool_getinfo(int xc_handle, ++ uint32_t first_poolid, ++ uint32_t n_max, ++ xc_cpupoolinfo_t *info); ++ ++/** ++ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm poolid id of the cpupool ++ * @parm cpu cpu number to add ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_addcpu(int xc_handle, ++ uint32_t poolid, ++ int cpu); ++ ++/** ++ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm poolid id of the cpupool ++ * @parm cpu cpu number to remove ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_removecpu(int xc_handle, ++ uint32_t poolid, ++ int cpu); ++ ++/** ++ * Move domain to another cpupool. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm poolid id of the destination cpupool ++ * @parm domid id of the domain to move ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_movedomain(int xc_handle, ++ uint32_t poolid, ++ uint32_t domid); ++ ++/** ++ * Return map of cpus not in any cpupool. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm cpumap pointer where to store the cpumap ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_freeinfo(int xc_handle, ++ uint64_t *cpumap); ++ ++ ++/* + * EVENT CHANNEL FUNCTIONS + */ + diff --git a/cpu-pools-libxen.patch b/cpu-pools-libxen.patch new file mode 100644 index 0000000..bb4ab33 --- /dev/null +++ b/cpu-pools-libxen.patch @@ -0,0 +1,2180 @@ +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_all.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/include/xen/api/xen_all.h ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_all.h +@@ -37,4 +37,5 @@ + #include + #include + #include ++#include + #endif +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_cpu_pool.h +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_cpu_pool.h +@@ -0,0 +1,424 @@ ++/* ++ * Copyright (c) 2006-2007, XenSource Inc. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#ifndef XEN_CPU_POOL_H ++#define XEN_CPU_POOL_H ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * The cpu_pool class. ++ * ++ * Management of CPU pools. ++ */ ++ ++ ++/** ++ * Free the given xen_cpu_pool. The given handle must have been allocated ++ * by this library. ++ */ ++extern void ++xen_cpu_pool_free(xen_cpu_pool cpu_pool); ++ ++ ++typedef struct xen_cpu_pool_set ++{ ++ size_t size; ++ xen_cpu_pool *contents[]; ++} xen_cpu_pool_set; ++ ++/** ++ * Allocate a xen_cpu_pool_set of the given size. ++ */ ++extern xen_cpu_pool_set * ++xen_cpu_pool_set_alloc(size_t size); ++ ++/** ++ * Free the given xen_cpu_pool_set. The given set must have been allocated ++ * by this library. ++ */ ++extern void ++xen_cpu_pool_set_free(xen_cpu_pool_set *set); ++ ++ ++typedef struct xen_cpu_pool_record ++{ ++ xen_cpu_pool handle; ++ char *uuid; ++ char *name_label; ++ char *name_description; ++ struct xen_host_record_opt *resident_on; ++ bool auto_power_on; ++ struct xen_vm_record_opt_set *started_vms; ++ int64_t ncpu; ++ char *sched_policy; ++ struct xen_string_set *proposed_cpus; ++ struct xen_host_cpu_record_opt_set *host_cpus; ++ bool activated; ++ xen_string_string_map *other_config; ++} xen_cpu_pool_record; ++ ++/** ++ * Allocate a xen_cpu_pool_record. ++ */ ++extern xen_cpu_pool_record * ++xen_cpu_pool_record_alloc(void); ++ ++/** ++ * Free the given xen_cpu_pool_record, and all referenced values. The given ++ * record must have been allocated by this library. ++ */ ++extern void ++xen_cpu_pool_record_free(xen_cpu_pool_record *record); ++ ++ ++typedef struct xen_cpu_pool_record_opt ++{ ++ bool is_record; ++ union ++ { ++ xen_cpu_pool handle; ++ xen_cpu_pool_record *record; ++ } u; ++} xen_cpu_pool_record_opt; ++ ++/** ++ * Allocate a xen_cpu_pool_record_opt. ++ */ ++extern xen_cpu_pool_record_opt * ++xen_cpu_pool_record_opt_alloc(void); ++ ++/** ++ * Free the given xen_cpu_pool_record_opt, and all referenced values. The ++ * given record_opt must have been allocated by this library. ++ */ ++extern void ++xen_cpu_pool_record_opt_free(xen_cpu_pool_record_opt *record_opt); ++ ++ ++typedef struct xen_cpu_pool_record_set ++{ ++ size_t size; ++ xen_cpu_pool_record *contents[]; ++} xen_cpu_pool_record_set; ++ ++/** ++ * Allocate a xen_cpu_pool_record_set of the given size. ++ */ ++extern xen_cpu_pool_record_set * ++xen_cpu_pool_record_set_alloc(size_t size); ++ ++/** ++ * Free the given xen_cpu_pool_record_set, and all referenced values. The ++ * given set must have been allocated by this library. ++ */ ++extern void ++xen_cpu_pool_record_set_free(xen_cpu_pool_record_set *set); ++ ++ ++ ++typedef struct xen_cpu_pool_record_opt_set ++{ ++ size_t size; ++ xen_cpu_pool_record_opt *contents[]; ++} xen_cpu_pool_record_opt_set; ++ ++/** ++ * Allocate a xen_cpu_pool_record_opt_set of the given size. ++ */ ++extern xen_cpu_pool_record_opt_set * ++xen_cpu_pool_record_opt_set_alloc(size_t size); ++ ++/** ++ * Free the given xen_cpu_pool_record_opt_set, and all referenced values. ++ * The given set must have been allocated by this library. ++ */ ++extern void ++xen_cpu_pool_record_opt_set_free(xen_cpu_pool_record_opt_set *set); ++ ++ ++/** ++ * Get a record containing the current state of the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_record(xen_session *session, xen_cpu_pool_record **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get a reference to the cpu_pool instance with the specified UUID. ++ */ ++extern bool ++xen_cpu_pool_get_by_uuid(xen_session *session, xen_cpu_pool *result, char *uuid); ++ ++ ++/** ++ * Create a new cpu_pool instance, and return its handle. ++ */ ++extern bool ++xen_cpu_pool_create(xen_session *session, xen_cpu_pool *result, ++ xen_cpu_pool_record *record); ++ ++ ++/** ++ * Destroy the specified VBD instance. ++ */ ++extern bool ++xen_cpu_pool_destroy(xen_session *session, xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get the uuid field of the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_uuid(xen_session *session, char **result, xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Deactivate the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_deactivate(xen_session *session, xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Activate the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_activate(xen_session *session, xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Add a physical cpu to the active pool. ++ */ ++extern bool ++xen_cpu_pool_add_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_host_cpu host_cpu); ++ ++ ++/** ++ * Remove a physical cpu from the active pool. ++ */ ++extern bool ++xen_cpu_pool_remove_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_host_cpu host_cpu); ++ ++ ++/** ++ * Return a list of all the cpu_pools known to the system. ++ */ ++extern bool ++xen_cpu_pool_get_all(xen_session *session, struct xen_cpu_pool_set **result); ++ ++ ++/** ++ * Get the uuid field of the cpu_pool with given name. ++ */ ++extern bool ++xen_cpu_pool_get_by_name_label(xen_session *session, ++ struct xen_cpu_pool_set **result, char *label); ++ ++ ++/** ++ * Get activation state of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_activated(xen_session *session, bool *result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get auto_power_on option of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_auto_power_on(xen_session *session, bool *result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get host_cpu refs of all physical cpus of cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_host_CPUs(xen_session *session, struct xen_host_cpu_set **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get name description field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_name_description(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get name label field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_name_label(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get count of physical cpus to attach to cpu_pool on activation. ++ */ ++extern bool ++xen_cpu_pool_get_ncpu(xen_session *session, int64_t *result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get proposed_CPUs field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_proposed_CPUs(xen_session *session, struct xen_string_set **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get the other_config field of the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_other_config(xen_session *session, xen_string_string_map **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get host the cpu_pool is resident on. ++ */ ++extern bool ++xen_cpu_pool_get_resident_on(xen_session *session, xen_host *result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get sched_policy field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_sched_policy(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get set of started vms in given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_started_VMs(xen_session *session, xen_vm_set **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Set auto_power_on field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_auto_power_on(xen_session *session, xen_cpu_pool cpu_pool, ++ bool auto_power_on); ++ ++ ++/** ++ * Set proposed_CPUs field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_string_set *proposed_cpus); ++ ++ ++/** ++ * Add a proposed cpu to proposed_CPUs field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_add_to_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ char* proposed_cpu); ++ ++ ++/** ++ * Remove a proposed cpu from proposed_CPUs field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_remove_from_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ char* proposed_cpu); ++ ++ ++/** ++ * Set name_label field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_name_label(xen_session *session, xen_cpu_pool cpu_pool, ++ char *label); ++ ++ ++/** ++ * Set name_description field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_name_description(xen_session *session, xen_cpu_pool cpu_pool, ++ char *descr); ++ ++ ++/** ++ * Set ncpu field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_ncpu(xen_session *session, xen_cpu_pool cpu_pool, int64_t ncpu); ++ ++ ++/** ++ * Set the other_config field of the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_string_string_map *other_config); ++ ++ ++/** ++ * Add the given key-value pair to the other_config field of the given ++ * cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_add_to_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ char *key, char *value); ++ ++ ++/** ++ * Remove the given key and its corresponding value from the ++ * other_config field of the given cpu_pool. If the key is not in that Map, then ++ * do nothing. ++ */ ++extern bool ++xen_cpu_pool_remove_from_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ char *key); ++ ++/** ++ * Set sched_policy of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_sched_policy(xen_session *session, xen_cpu_pool cpu_pool, ++ char *sched_policy); ++ ++ ++#endif +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_cpu_pool_decl.h +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_cpu_pool_decl.h +@@ -0,0 +1,30 @@ ++/* ++ * Copyright (c) 2006-2007, XenSource Inc. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#ifndef XEN_CPU_POOL_DECL_H ++#define XEN_CPU_POOL_DECL_H ++ ++typedef void *xen_cpu_pool; ++ ++struct xen_cpu_pool_set; ++struct xen_cpu_pool_record; ++struct xen_cpu_pool_record_set; ++struct xen_cpu_pool_record_opt; ++struct xen_cpu_pool_record_opt_set; ++ ++#endif +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_host.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/include/xen/api/xen_host.h ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_host.h +@@ -29,7 +29,7 @@ + #include + #include + #include +- ++#include + + /* + * The host class. +@@ -91,6 +91,7 @@ typedef struct xen_host_record + struct xen_pbd_record_opt_set *pbds; + struct xen_host_cpu_record_opt_set *host_cpus; + struct xen_host_metrics_record_opt *metrics; ++ struct xen_cpu_pool_record_opt_set *resident_cpu_pools; + } xen_host_record; + + /** +@@ -494,4 +495,11 @@ extern bool + xen_host_get_all(xen_session *session, struct xen_host_set **result); + + ++/** ++ * Get list of resident cpu pools. ++ */ ++extern bool ++xen_host_get_resident_cpu_pools(xen_session *session, struct xen_cpu_pool_set **result, ++ xen_host host); ++ + #endif +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_host_cpu.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/include/xen/api/xen_host_cpu.h ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_host_cpu.h +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + + /* +@@ -72,6 +73,7 @@ typedef struct xen_host_cpu_record + char *flags; + char *features; + double utilisation; ++ struct xen_cpu_pool_record_opt_set *cpu_pools; + } xen_host_cpu_record; + + /** +@@ -244,4 +246,18 @@ extern bool + xen_host_cpu_get_all(xen_session *session, struct xen_host_cpu_set **result); + + ++/** ++ * Get the ref of the cpu_pool to which the host_cpu belongs. ++ */ ++extern bool ++xen_host_cpu_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set **result, xen_host_cpu host_cpu); ++ ++ ++/** ++ * Return a list of all the host_cpus not assigned to a cpu_pool. ++ */ ++extern bool ++xen_host_cpu_get_unassigned_cpus(xen_session *session, struct xen_host_cpu_set **result); ++ ++ + #endif +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_vm.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/include/xen/api/xen_vm.h ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_vm.h +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + + /* +@@ -113,6 +114,8 @@ typedef struct xen_vm_record + struct xen_vm_metrics_record_opt *metrics; + struct xen_vm_guest_metrics_record_opt *guest_metrics; + char *security_label; ++ char *pool_name; ++ struct xen_cpu_pool_record_opt_set *cpu_pool; + } xen_vm_record; + + /** +@@ -905,4 +908,33 @@ xen_vm_set_security_label(xen_session *s + extern bool + xen_vm_get_security_label(xen_session *session, char **result, xen_vm vm); + ++ ++/** ++ * Get the cpu_pool ref field of a domain. ++ */ ++extern bool ++xen_vm_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set **result, xen_vm vm); ++ ++ ++/** ++ * Get the pool_name field of a domain. ++ */ ++extern bool ++xen_vm_get_pool_name(xen_session *session, char **result, xen_vm vm); ++ ++ ++/** ++ * Set the pool_name field of a domain. ++ */ ++extern bool ++xen_vm_set_pool_name(xen_session *session, xen_vm vm, char *pool_name); ++ ++ ++/** ++ * Migrate the VM to another cpu_pool (on the same host). This can only be ++ * called when the specified VM is in the Running state. ++ */ ++extern bool ++xen_vm_cpu_pool_migrate(xen_session *session, xen_vm vm, xen_cpu_pool cpu_pool); ++ + #endif +Index: xen-4.0.0-testing/tools/libxen/src/xen_cpu_pool.c +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/libxen/src/xen_cpu_pool.c +@@ -0,0 +1,671 @@ ++/* ++ * Copyright (c) 2006-2007, XenSource Inc. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++ ++#include ++#include ++ ++#include "xen_internal.h" ++#include ++#include ++#include ++ ++XEN_FREE(xen_cpu_pool) ++XEN_SET_ALLOC_FREE(xen_cpu_pool) ++XEN_ALLOC(xen_cpu_pool_record) ++XEN_SET_ALLOC_FREE(xen_cpu_pool_record) ++XEN_ALLOC(xen_cpu_pool_record_opt) ++XEN_RECORD_OPT_FREE(xen_cpu_pool) ++XEN_SET_ALLOC_FREE(xen_cpu_pool_record_opt) ++ ++ ++static const struct_member xen_cpu_pool_record_struct_members[] = ++ { ++ { .key = "uuid", ++ .type = &abstract_type_string, ++ .offset = offsetof(xen_cpu_pool_record, uuid) }, ++ { .key = "name_label", ++ .type = &abstract_type_string, ++ .offset = offsetof(xen_cpu_pool_record, name_label) }, ++ { .key = "name_description", ++ .type = &abstract_type_string, ++ .offset = offsetof(xen_cpu_pool_record, name_description) }, ++ { .key = "resident_on", ++ .type = &abstract_type_ref, ++ .offset = offsetof(xen_cpu_pool_record, resident_on) }, ++ { .key = "auto_power_on", ++ .type = &abstract_type_bool, ++ .offset = offsetof(xen_cpu_pool_record, auto_power_on) }, ++ { .key = "started_VMs", ++ .type = &abstract_type_ref_set, ++ .offset = offsetof(xen_cpu_pool_record, started_vms) }, ++ { .key = "ncpu", ++ .type = &abstract_type_int, ++ .offset = offsetof(xen_cpu_pool_record, ncpu) }, ++ { .key = "sched_policy", ++ .type = &abstract_type_string, ++ .offset = offsetof(xen_cpu_pool_record, sched_policy) }, ++ { .key = "proposed_CPUs", ++ .type = &abstract_type_string_set, ++ .offset = offsetof(xen_cpu_pool_record, proposed_cpus) }, ++ { .key = "host_CPUs", ++ .type = &abstract_type_ref_set, ++ .offset = offsetof(xen_cpu_pool_record, host_cpus) }, ++ { .key = "activated", ++ .type = &abstract_type_bool, ++ .offset = offsetof(xen_cpu_pool_record, activated) }, ++ { .key = "other_config", ++ .type = &abstract_type_string_string_map, ++ .offset = offsetof(xen_cpu_pool_record, other_config) }, ++ }; ++ ++ ++const abstract_type xen_cpu_pool_record_abstract_type_ = ++ { ++ .typename = STRUCT, ++ .struct_size = sizeof(xen_cpu_pool_record), ++ .member_count = ++ sizeof(xen_cpu_pool_record_struct_members) / sizeof(struct_member), ++ .members = xen_cpu_pool_record_struct_members ++ }; ++ ++ ++void ++xen_cpu_pool_record_free(xen_cpu_pool_record *record) ++{ ++ if (record == NULL) ++ { ++ return; ++ } ++ free(record->handle); ++ free(record->uuid); ++ free(record->name_label); ++ free(record->name_description); ++ xen_host_record_opt_free(record->resident_on); ++ xen_vm_record_opt_set_free(record->started_vms); ++ free(record->sched_policy); ++ xen_string_set_free(record->proposed_cpus); ++ xen_host_cpu_record_opt_set_free(record->host_cpus); ++ xen_string_string_map_free(record->other_config); ++ free(record); ++} ++ ++ ++bool ++xen_cpu_pool_get_record(xen_session *session, xen_cpu_pool_record **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = xen_cpu_pool_record_abstract_type_; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_record"); ++ ++ if (session->ok) ++ { ++ (*result)->handle = xen_strdup_((*result)->uuid); ++ } ++ ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_by_uuid(xen_session *session, xen_cpu_pool *result, char *uuid) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = uuid } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_by_uuid"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_create(xen_session *session, xen_cpu_pool *result, ++ xen_cpu_pool_record *record) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &xen_cpu_pool_record_abstract_type_, ++ .u.struct_val = record } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.create"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_destroy(xen_session *session, xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ xen_call_(session, "cpu_pool.destroy", param_values, 1, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_uuid(xen_session *session, char **result, xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_uuid"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_deactivate(xen_session *session, xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ }; ++ ++ xen_call_(session, "cpu_pool.deactivate", param_values, 1, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_activate(xen_session *session, xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ }; ++ ++ xen_call_(session, "cpu_pool.activate", param_values, 1, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_add_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_host_cpu host_cpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = host_cpu }, ++ }; ++ ++ xen_call_(session, "cpu_pool.add_host_CPU_live", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_remove_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_host_cpu host_cpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = host_cpu }, ++ }; ++ ++ xen_call_(session, "cpu_pool.remove_host_CPU_live", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_all(xen_session *session, struct xen_cpu_pool_set **result) ++{ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ xen_call_(session, "cpu_pool.get_all", NULL, 0, &result_type, result); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_by_name_label(xen_session *session, ++ struct xen_cpu_pool_set **result, char *label) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = label } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_by_name_label"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_activated(xen_session *session, bool *result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_bool; ++ ++ XEN_CALL_("cpu_pool.get_activated"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_auto_power_on(xen_session *session, bool *result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_bool; ++ ++ XEN_CALL_("cpu_pool.get_auto_power_on"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_host_CPUs(xen_session *session, struct xen_host_cpu_set **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_host_CPUs"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_name_description(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_name_description"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_name_label(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_name_label"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_ncpu(xen_session *session, int64_t *result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_int; ++ ++ XEN_CALL_("cpu_pool.get_ncpu"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_proposed_CPUs(xen_session *session, struct xen_string_set **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_proposed_CPUs"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_other_config(xen_session *session, xen_string_string_map **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string_string_map; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_other_config"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_resident_on(xen_session *session, xen_host *result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_resident_on"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_sched_policy(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_sched_policy"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_started_VMs(xen_session *session, xen_vm_set **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_started_VMs"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_auto_power_on(xen_session *session, xen_cpu_pool cpu_pool, ++ bool auto_power_on) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_bool, ++ .u.bool_val = auto_power_on } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_auto_power_on", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_string_set *proposed_cpus) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string_set, ++ .u.set_val = (arbitrary_set *)proposed_cpus } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_proposed_CPUs", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_add_to_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ char* proposed_cpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = proposed_cpu } ++ }; ++ ++ xen_call_(session, "cpu_pool.add_to_proposed_CPUs", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_remove_from_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ char* proposed_cpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = proposed_cpu } ++ }; ++ ++ xen_call_(session, "cpu_pool.remove_from_proposed_CPUs", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_name_label(xen_session *session, xen_cpu_pool cpu_pool, ++ char *label) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = label } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_name_label", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_name_description(xen_session *session, xen_cpu_pool cpu_pool, ++ char *descr) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = descr } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_name_description", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_ncpu(xen_session *session, xen_cpu_pool cpu_pool, int64_t ncpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_int, ++ .u.int_val = ncpu } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_ncpu", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_string_string_map *other_config) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string_string_map, ++ .u.set_val = (arbitrary_set *)other_config } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_other_config", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_add_to_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ char *key, char *value) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = key }, ++ { .type = &abstract_type_string, ++ .u.string_val = value } ++ }; ++ ++ xen_call_(session, "cpu_pool.add_to_other_config", param_values, 3, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_remove_from_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ char *key) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = key } ++ }; ++ ++ xen_call_(session, "cpu_pool.remove_from_other_config", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_sched_policy(xen_session *session, xen_cpu_pool cpu_pool, ++ char *sched_policy) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = sched_policy } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_sched_policy", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ +Index: xen-4.0.0-testing/tools/libxen/src/xen_host.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/src/xen_host.c ++++ xen-4.0.0-testing/tools/libxen/src/xen_host.c +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + + + XEN_FREE(xen_host) +@@ -108,7 +109,10 @@ static const struct_member xen_host_reco + .offset = offsetof(xen_host_record, host_cpus) }, + { .key = "metrics", + .type = &abstract_type_ref, +- .offset = offsetof(xen_host_record, metrics) } ++ .offset = offsetof(xen_host_record, metrics) }, ++ { .key = "resident_cpu_pools", ++ .type = &abstract_type_ref_set, ++ .offset = offsetof(xen_host_record, resident_cpu_pools) } + }; + + const abstract_type xen_host_record_abstract_type_ = +@@ -148,6 +152,7 @@ xen_host_record_free(xen_host_record *re + xen_pbd_record_opt_set_free(record->pbds); + xen_host_cpu_record_opt_set_free(record->host_cpus); + xen_host_metrics_record_opt_free(record->metrics); ++ xen_cpu_pool_record_opt_set_free(record->resident_cpu_pools); + free(record); + } + +@@ -889,3 +894,22 @@ xen_host_get_uuid(xen_session *session, + XEN_CALL_("host.get_uuid"); + return session->ok; + } ++ ++ ++bool ++xen_host_get_resident_cpu_pools(xen_session *session, struct xen_cpu_pool_set **result, ++ xen_host host) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = host } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("host.get_resident_cpu_pools"); ++ return session->ok; ++} ++ +Index: xen-4.0.0-testing/tools/libxen/src/xen_host_cpu.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/src/xen_host_cpu.c ++++ xen-4.0.0-testing/tools/libxen/src/xen_host_cpu.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + + XEN_FREE(xen_host_cpu) +@@ -66,7 +67,10 @@ static const struct_member xen_host_cpu_ + .offset = offsetof(xen_host_cpu_record, features) }, + { .key = "utilisation", + .type = &abstract_type_float, +- .offset = offsetof(xen_host_cpu_record, utilisation) } ++ .offset = offsetof(xen_host_cpu_record, utilisation) }, ++ { .key = "cpu_pool", ++ .type = &abstract_type_ref_set, ++ .offset = offsetof(xen_host_cpu_record, cpu_pools) }, + }; + + const abstract_type xen_host_cpu_record_abstract_type_ = +@@ -94,6 +98,7 @@ xen_host_cpu_record_free(xen_host_cpu_re + free(record->stepping); + free(record->flags); + free(record->features); ++ xen_cpu_pool_record_opt_set_free(record->cpu_pools); + free(record); + } + +@@ -315,3 +320,34 @@ xen_host_cpu_get_uuid(xen_session *sessi + XEN_CALL_("host_cpu.get_uuid"); + return session->ok; + } ++ ++ ++bool ++xen_host_cpu_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set **result, xen_host_cpu host_cpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = host_cpu } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("host_cpu.get_cpu_pool"); ++ return session->ok; ++} ++ ++ ++bool ++xen_host_cpu_get_unassigned_cpus(xen_session *session, struct xen_host_cpu_set **result) ++{ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ xen_call_(session, "host_cpu.get_unassigned_cpus", NULL, 0, &result_type, result); ++ return session->ok; ++} ++ ++ ++ +Index: xen-4.0.0-testing/tools/libxen/src/xen_vm.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/src/xen_vm.c ++++ xen-4.0.0-testing/tools/libxen/src/xen_vm.c +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + + + XEN_FREE(xen_vm) +@@ -165,7 +166,13 @@ static const struct_member xen_vm_record + .offset = offsetof(xen_vm_record, guest_metrics) }, + { .key = "security_label", + .type = &abstract_type_string, +- .offset = offsetof(xen_vm_record, security_label) } ++ .offset = offsetof(xen_vm_record, security_label) }, ++ { .key = "pool_name", ++ .type = &abstract_type_string, ++ .offset = offsetof(xen_vm_record, pool_name) }, ++ { .key = "cpu_pool", ++ .type = &abstract_type_ref_set, ++ .offset = offsetof(xen_vm_record, cpu_pool) }, + }; + + const abstract_type xen_vm_record_abstract_type_ = +@@ -209,6 +216,7 @@ xen_vm_record_free(xen_vm_record *record + xen_string_string_map_free(record->other_config); + xen_vm_metrics_record_opt_free(record->metrics); + xen_vm_guest_metrics_record_opt_free(record->guest_metrics); ++ xen_cpu_pool_record_opt_set_free(record->cpu_pool); + free(record->security_label); + free(record); + } +@@ -1781,3 +1789,71 @@ xen_vm_get_security_label(xen_session *s + XEN_CALL_("VM.get_security_label"); + return session->ok; + } ++ ++ ++bool ++xen_vm_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set **result, xen_vm vm) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = vm }, ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("VM.get_cpu_pool"); ++ return session->ok; ++} ++ ++ ++bool ++xen_vm_get_pool_name(xen_session *session, char **result, xen_vm vm) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = vm }, ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("VM.get_pool_name"); ++ return session->ok; ++} ++ ++ ++bool ++xen_vm_set_pool_name(xen_session *session, xen_vm vm, char *pool_name) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = vm }, ++ { .type = &abstract_type_string, ++ .u.string_val = pool_name } ++ }; ++ ++ xen_call_(session, "VM.set_pool_name", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_vm_cpu_pool_migrate(xen_session *session, xen_vm vm, xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = vm }, ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ xen_call_(session, "VM.cpu_pool_migrate", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ +Index: xen-4.0.0-testing/tools/libxen/test/test_bindings.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/test/test_bindings.c ++++ xen-4.0.0-testing/tools/libxen/test/test_bindings.c +@@ -28,6 +28,7 @@ + #include + + //#define PRINT_XML ++//////////////#define POOL_TESTS + + static void usage() + { +@@ -125,6 +126,649 @@ static void print_error(xen_session *ses + } + + ++#ifdef POOL_TESTS ++#define NAME_DESCRIPTION "TestPool" ++#define NAME_DESCRIPTION_2 "TestPool-2" ++#define NAME_LABEL "Pool-1" ++#define NAME_LABEL_2 "Pool-2" ++#define SCHED_NAME "credit" ++#define NCPU_VAL 2 ++#define NCPU_VAL_2 1 ++ ++ ++static int pool_tests(xen_session *session, xen_host host) ++{ ++ int rc = 1; ++ xen_cpu_pool_set *pools = NULL; ++ xen_host_record *host_record = NULL; ++ xen_cpu_pool_record_opt *cpu_pool_opt = NULL; ++ xen_cpu_pool_record *cpu_pool_rec = NULL; ++ xen_host_cpu_set *host_cpu_set = NULL; ++ xen_host_cpu_record *host_cpu_record = NULL; ++ xen_vm_set *vm_set = NULL; ++ xen_cpu_pool pool = NULL; ++ xen_cpu_pool pool_out = NULL; ++ xen_string_string_map *pool_other_config = NULL; ++ xen_vm_record *vm_record = NULL; ++ xen_string_set *proposed_cpus = NULL; ++ xen_host res_host = NULL; ++ char *name_description = NULL; ++ char *name_label = NULL; ++ char *sched_policy = NULL; ++ char *pool_uuid = NULL; ++ int64_t ncpu; ++ ++ for (int loop= 0; loop < 1; loop++) ++ { ++ // Test extensions of class host ++ printf("Test cpu_pool extension of host class -----------------------------------------\n"); ++ ++ printf("host.get_resident_cpu_pools\n"); ++ if (!xen_host_get_resident_cpu_pools(session, &pools, host)) ++ { ++ break; ++ } ++ if (pools->size != 1) ++ { ++ printf("Wrong pool count; only one pool expected\n"); ++ break; ++ } ++ printf("Pool UUID %s\n", (char*)pools->contents[0]); ++ xen_cpu_pool_set_free(pools); ++ pools = NULL; ++ ++ printf("host.get_record\n"); ++ if (!xen_host_get_record(session, &host_record, host)) ++ { ++ break; ++ } ++ printf("Pool count %d\n", (int)host_record->resident_cpu_pools->size); ++ if (host_record->resident_cpu_pools->size != 1) ++ { ++ break; ++ } ++ cpu_pool_opt = host_record->resident_cpu_pools->contents[0]; ++ printf("Pool UUID %s\n", (char*)cpu_pool_opt->u.handle); ++ xen_host_record_free(host_record); ++ host_record = NULL; ++ cpu_pool_opt = NULL; ++ ++ ++ // Test extensions of class host_cpu ++ printf("host_cpu.get_all\n"); ++ if (!xen_host_cpu_get_all(session, &host_cpu_set)) ++ { ++ break; ++ } ++ ++ printf("host_cpu.get_cpu_pool & host_cpu.get_record\n"); ++ for (int i= 0; i < host_cpu_set->size; i++) ++ { ++ if (!xen_host_cpu_get_cpu_pool(session, &pools, host_cpu_set->contents[i])) ++ { ++ break; ++ } ++ if (pools->size > 1) ++ { ++ printf("Wrong pool count (xen_host_cpu_get_cpu_pool)\n"); ++ break; ++ } ++ ++ printf("host_cpu (get_cpu_pool) %s, cpu_pool %s\n", (char*)host_cpu_set->contents[i], ++ pools->size != 0 ? (char*)pools->contents[0] : "(None)"); ++ ++ if (!xen_host_cpu_get_record(session, &host_cpu_record, host_cpu_set->contents[i])) ++ { ++ break; ++ } ++ if (host_cpu_record->cpu_pools->size > 1) ++ { ++ printf("Wrong pool count (xen_host_cpu_get_record)\n"); ++ break; ++ } ++ ++ printf("host_cpu (get_record) %s, cpu_pool %s\n", (char*)host_cpu_set->contents[i], ++ host_cpu_record->cpu_pools->size != 0 ++ ? (char*)((xen_cpu_pool_record_opt*)(host_cpu_record->cpu_pools->contents[0])->u.handle) ++ : "(None)"); ++ ++ } ++ xen_host_cpu_record_free(host_cpu_record); ++ host_cpu_record = NULL; ++ xen_host_cpu_set_free(host_cpu_set); ++ host_cpu_set = NULL; ++ xen_cpu_pool_set_free(pools); ++ pools = NULL; ++ ++ printf("host_cpu.get_unassigned_cpus\n"); ++ if (!xen_host_cpu_get_unassigned_cpus(session, &host_cpu_set)) ++ { ++ break; ++ } ++ printf("Free cpus (not bound to a pool)\n"); ++ for (int i= 0; i < host_cpu_set->size; i++) ++ { ++ printf(" cpu UUID %s\n", (char*)host_cpu_set->contents[i]); ++ } ++ xen_host_cpu_set_free(host_cpu_set); ++ host_cpu_set = NULL; ++ ++ ++ printf("vm.get_record\n"); ++ if (!xen_vm_get_all(session, &vm_set)) ++ { ++ break; ++ } ++ ++ if (!xen_vm_get_record(session, &vm_record, vm_set->contents[0])) ++ { ++ break; ++ } ++ printf("VM %s, pool_name %s, cpu_pool %s\n", (char*)vm_set->contents[0], ++ vm_record->pool_name, (char*)vm_record->cpu_pool->contents[0]); ++ ++ xen_vm_record_free(vm_record); ++ vm_record = NULL; ++ ++ printf("vm.get_cpu_pool\n"); ++ if (!xen_vm_get_cpu_pool(session, &pools, vm_set->contents[0])) ++ { ++ break; ++ } ++ printf("vm_get_cpu_pool %s\n", (char*)pools->contents[0]); ++ ++ xen_vm_set_free(vm_set); ++ xen_cpu_pool_set_free(pools); ++ vm_set = NULL; ++ pools = NULL; ++ ++ ++ // Class cpu_pool ++ ++ // create ++ pool_other_config = xen_string_string_map_alloc(1); ++ pool_other_config->contents[0].key = strdup("type"); ++ pool_other_config->contents[0].val = strdup("bs2000"); ++ xen_string_set *proposed_CPUs_set = xen_string_set_alloc(1); ++ proposed_CPUs_set->contents[0] = strdup("3"); ++ ++ xen_cpu_pool_record new_cpu_pool_record = ++ { ++ .name_label = NAME_LABEL, ++ .name_description = NAME_DESCRIPTION, ++ .auto_power_on = false, ++ .ncpu = NCPU_VAL, ++ .sched_policy = SCHED_NAME, ++ .proposed_cpus = proposed_CPUs_set, ++ .other_config = pool_other_config, ++ }; ++ ++ printf("cpu_pool.create\n"); ++ if (!xen_cpu_pool_create(session, &pool, &new_cpu_pool_record)) ++ { ++ break; ++ } ++ printf("New Pool UUID %s\n", (char*)pool); ++ xen_string_set_free(proposed_CPUs_set); ++ proposed_CPUs_set = NULL; ++ xen_string_string_map_free(pool_other_config); ++ pool_other_config = NULL; ++ ++ // get_by_name_label ++ printf("cpu_pool.get_by_name_label\n"); ++ if (!xen_cpu_pool_get_by_name_label(session, &pools, "Pool-1")) ++ { ++ break; ++ } ++ if (strcmp((char*)pools->contents[0], (char*)pool) != 0) ++ { ++ break; ++ } ++ xen_cpu_pool_set_free(pools); ++ pools = NULL; ++ ++ ++ // get_by_uuid ++ printf("cpu_pool.get_by_uuid\n"); ++ if (!xen_cpu_pool_get_by_uuid(session, &pool_out, pool)) ++ { ++ break; ++ } ++ if (strcmp((char*)pool_out, (char*)pool) != 0) ++ { ++ printf("Wrong pool returned\n"); ++ break; ++ } ++ xen_cpu_pool_free(pool_out); ++ pool_out = NULL; ++ ++ // get_all ++ printf("cpu_pool.get_all\n"); ++ if (!xen_cpu_pool_get_all(session, &pools)) ++ { ++ break; ++ } ++ if (pools->size != 2) ++ { ++ printf("Wrong pool count (%d)\n", (int)pools->size); ++ break; ++ } ++ xen_cpu_pool_set_free(pools); ++ pools = NULL; ++ ++ ++ // get_activated ++ printf("cpu_pool.get_activated\n"); ++ bool activated_state = true; ++ if (!xen_cpu_pool_get_activated(session, &activated_state, pool)) ++ { ++ break; ++ } ++ if (activated_state) ++ { ++ printf("Pool must not be activated\n"); ++ break; ++ } ++ ++ ++ // get_auto_power_on ++ printf("cpu_pool.get_auto_power_on\n"); ++ bool power_state = true; ++ if (!xen_cpu_pool_get_auto_power_on(session, &power_state, pool)) ++ { ++ break; ++ } ++ if (power_state) ++ { ++ printf("Pool must not have attibute 'auto_power_on'\n"); ++ break; ++ } ++ ++ // get_host_CPUs ++ printf("cpu_pool.get_host_CPUs\n"); ++ if (!xen_cpu_pool_get_host_CPUs(session, &host_cpu_set, pool)) ++ { ++ break; ++ } ++ if (host_cpu_set->size != 0) ++ { ++ printf("Pool must not have any attached cpus\n"); ++ break; ++ } ++ xen_host_cpu_set_free(host_cpu_set); ++ host_cpu_set = NULL; ++ ++ ++ // get_name_description ++ printf("cpu_pool.get_name_description\n"); ++ if (!xen_cpu_pool_get_name_description(session, &name_description, pool)) ++ { ++ break; ++ } ++ if (strcmp(NAME_DESCRIPTION, name_description) != 0) ++ { ++ printf("Pool has wrong name_description\n"); ++ break; ++ } ++ free(name_description); ++ name_description = NULL; ++ ++ ++ // get_name_label ++ printf("cpu_pool.get_name_label\n"); ++ if (!xen_cpu_pool_get_name_label(session, &name_label, pool)) ++ { ++ break; ++ } ++ if (strcmp(NAME_LABEL, name_label) != 0) ++ { ++ printf("Pool has wrong name_label\n"); ++ break; ++ } ++ free(name_label); ++ name_label = NULL; ++ ++ // get_ncpu ++ printf("cpu_pool.get_ncpu\n"); ++ if (!xen_cpu_pool_get_ncpu(session, &ncpu, pool)) ++ { ++ break; ++ } ++ if (NCPU_VAL != ncpu) ++ { ++ printf("Pool has wrong ncpu\n"); ++ break; ++ } ++ ++ // get_proposed_CPUs ++ printf("cpu_pool.get_proposed_CPUs\n"); ++ if (!xen_cpu_pool_get_proposed_CPUs(session, &proposed_cpus, pool)) ++ { ++ break; ++ } ++ if (proposed_cpus->size != 1) ++ { ++ printf("Pool has wrong proposed_cpus count\n"); ++ break; ++ } ++ xen_string_set_free(proposed_cpus); ++ proposed_cpus = NULL; ++ ++ ++ // get_other_config ++ printf("cpu_pool.get_other_config\n"); ++ if (!xen_cpu_pool_get_other_config(session, &pool_other_config, pool)) ++ { ++ break; ++ } ++ if (pool_other_config->size != 1) ++ { ++ printf("Pool has wrong other_config element count\n"); ++ break; ++ } ++ if ((strcmp(pool_other_config->contents[0].key, "type") != 0) || ++ (strcmp(pool_other_config->contents[0].val, "bs2000") != 0)) ++ { ++ printf("Pool has wrong other_config attributes\n"); ++ break; ++ } ++ xen_string_string_map_free(pool_other_config); ++ pool_other_config = NULL; ++ ++ ++ // get_record ++ printf("cpu_pool.get_record\n"); ++ if (!xen_cpu_pool_get_record(session, &cpu_pool_rec, pool)) ++ { ++ break; ++ } ++ if ( (strcmp(cpu_pool_rec->name_label, NAME_LABEL) != 0) || ++ (strcmp(cpu_pool_rec->name_description, NAME_DESCRIPTION) != 0) || ++ (cpu_pool_rec->auto_power_on) || ++ (cpu_pool_rec->ncpu != NCPU_VAL) || ++ (cpu_pool_rec->started_vms->size != 0) || ++ (strcmp(cpu_pool_rec->sched_policy, SCHED_NAME) != 0) || ++ (cpu_pool_rec->proposed_cpus->size != 1) || ++ (cpu_pool_rec->host_cpus->size != 0) || ++ (cpu_pool_rec->activated) || ++ (strcmp(cpu_pool_rec->resident_on->u.handle, host) != 0) || ++ (strcmp(cpu_pool_rec->uuid, pool) != 0) || ++ (cpu_pool_rec->other_config->size != 1)) ++ { ++ printf("Wrong record output\n"); ++ break; ++ } ++ xen_cpu_pool_record_free(cpu_pool_rec); ++ cpu_pool_rec = NULL; ++ ++ ++ // get_resident_on ++ printf("cpu_pool.get_resident_on\n"); ++ if (!xen_cpu_pool_get_resident_on(session, &res_host, pool)) ++ { ++ break; ++ } ++ if (strcmp(res_host, host) != 0) ++ { ++ printf("Wrong resident host returned\n"); ++ break; ++ } ++ xen_host_free(res_host); ++ res_host = NULL; ++ ++ ++ // get_sched_policy ++ printf("cpu_pool.get_sched_policy\n"); ++ if (!xen_cpu_pool_get_sched_policy(session, &sched_policy, pool)) ++ { ++ break; ++ } ++ if (strcmp(sched_policy, SCHED_NAME) != 0) ++ { ++ printf("Wrong sched_policy returned\n"); ++ break; ++ } ++ free(sched_policy); ++ sched_policy = NULL; ++ ++ ++ // get_started_VMs ++ printf("cpu_pool.get_started_VMs\n"); ++ if (!xen_cpu_pool_get_started_VMs(session, &vm_set, pool)) ++ { ++ break; ++ } ++ if (vm_set->size != 0) ++ { ++ printf("Wrong count of started VMs\n"); ++ break; ++ } ++ xen_vm_set_free(vm_set); ++ vm_set = NULL; ++ ++ ++ // get_uuid ++ printf("cpu_pool.get_uuid\n"); ++ if (!xen_cpu_pool_get_uuid(session, &pool_uuid, pool)) ++ { ++ break; ++ } ++ if (strcmp(pool_uuid, pool) != 0) ++ { ++ printf("Wrong Pool UUID returnd\n"); ++ break; ++ } ++ free(pool_uuid); ++ pool_uuid = NULL; ++ ++ ++ // set_auto_power_on ++ printf("cpu_pool.set_auto_power_on\n"); ++ if (!xen_cpu_pool_set_auto_power_on(session, pool, true)) ++ break; ++ ++ ++ // set_proposed_CPUs ++ printf("cpu_pool.set_proposed_CPUs\n"); ++ proposed_CPUs_set = xen_string_set_alloc(2); ++ proposed_CPUs_set->contents[0] = strdup("2"); ++ proposed_CPUs_set->contents[1] = strdup("4"); ++ if (!xen_cpu_pool_set_proposed_CPUs(session, pool, proposed_CPUs_set)) ++ break; ++ xen_string_set_free(proposed_CPUs_set); ++ proposed_CPUs_set = NULL; ++ ++ ++ // add_to_proposed_CPUs ++ printf("cpu_pool.add_to_proposed_CPUs\n"); ++ if (!xen_cpu_pool_add_to_proposed_CPUs(session, pool, "3")) ++ break; ++ ++ ++ // remove_from_proposed_CPUs ++ printf("cpu_pool.remove_from_proposed_CPUs\n"); ++ if (!xen_cpu_pool_remove_from_proposed_CPUs(session, pool, "4")) ++ break; ++ ++ ++ // set_name_label ++ printf("cpu_pool.set_name_label\n"); ++ if (!xen_cpu_pool_set_name_label(session, pool, NAME_LABEL_2)) ++ break; ++ ++ ++ // set_name_description ++ printf("cpu_pool.set_name_description\n"); ++ if (!xen_cpu_pool_set_name_description(session, pool, NAME_DESCRIPTION_2)) ++ break; ++ ++ ++ // set_ncpu ++ printf("cpu_pool.set_ncpu\n"); ++ if (!xen_cpu_pool_set_ncpu(session, pool, NCPU_VAL_2)) ++ break; ++ ++ ++ // set_other_config ++ printf("cpu_pool.set_other_config\n"); ++ pool_other_config = xen_string_string_map_alloc(2); ++ pool_other_config->contents[0].key = strdup("test1"); ++ pool_other_config->contents[0].val = strdup("field1"); ++ pool_other_config->contents[1].key = strdup("test2"); ++ pool_other_config->contents[1].val = strdup("field2"); ++ if (!xen_cpu_pool_set_other_config(session, pool, pool_other_config)) ++ break; ++ xen_string_string_map_free(pool_other_config); ++ pool_other_config = NULL; ++ ++ ++ // add_to_other_config ++ printf("cpu_pool.add_to_other_config\n"); ++ if (!xen_cpu_pool_add_to_other_config(session, pool, "test3", "field3")) ++ break; ++ ++ ++ // remove_from_other_config ++ printf("cpu_pool.remove_from_other_config\n"); ++ if (!xen_cpu_pool_remove_from_other_config(session, pool, "test2")) ++ break; ++ ++ ++ // set_sched_policy ++ printf("cpu_pool.set_sched_policy\n"); ++ if (!xen_cpu_pool_set_sched_policy(session, pool, SCHED_NAME)) ++ break; ++ ++ ++ // check get_record again ++ printf("check cpu_pool record\n"); ++ if (!xen_cpu_pool_get_record(session, &cpu_pool_rec, pool)) ++ { ++ break; ++ } ++ if ( (strcmp(cpu_pool_rec->name_label, NAME_LABEL_2) != 0) || ++ (strcmp(cpu_pool_rec->name_description, NAME_DESCRIPTION_2) != 0) || ++ (!cpu_pool_rec->auto_power_on) || ++ (cpu_pool_rec->ncpu != NCPU_VAL_2) || ++ (cpu_pool_rec->started_vms->size != 0) || ++ (strcmp(cpu_pool_rec->sched_policy, SCHED_NAME) != 0) || ++ (cpu_pool_rec->proposed_cpus->size != 2) || ++ (cpu_pool_rec->host_cpus->size != 0) || ++ (cpu_pool_rec->activated) || ++ (strcmp(cpu_pool_rec->resident_on->u.handle, host) != 0) || ++ (strcmp(cpu_pool_rec->uuid, pool) != 0) || ++ (cpu_pool_rec->other_config->size != 2)) ++ { ++ printf("Wrong record output\n"); ++ break; ++ } ++ xen_cpu_pool_record_free(cpu_pool_rec); ++ cpu_pool_rec = NULL; ++ ++ ++ // activate pool ++ printf("cpu_pool.activate\n"); ++ if (!xen_cpu_pool_activate(session, pool)) ++ break; ++ ++ ++ // add_host_CPU_live ++ printf("cpu_pool.add_host_CPU_live\n"); ++ if (!xen_host_cpu_get_unassigned_cpus(session, &host_cpu_set)) ++ { ++ break; ++ } ++ if (host_cpu_set->size < 1) ++ { ++ printf("No free CPU found\n"); ++ break; ++ } ++ if (!xen_cpu_pool_add_host_CPU_live(session, pool, host_cpu_set->contents[0])) ++ break; ++ ++ ++ // remove_host_CPU_live ++ printf("cpu_pool.remove_host_CPU_live\n"); ++ if (!xen_cpu_pool_remove_host_CPU_live(session, pool, host_cpu_set->contents[0])) ++ break; ++ ++ xen_host_cpu_set_free(host_cpu_set); ++ host_cpu_set = NULL; ++ ++ ++ // check get_record again ++ printf("check cpu_pool record\n"); ++ if (!xen_cpu_pool_get_record(session, &cpu_pool_rec, pool)) ++ { ++ break; ++ } ++ if ( (strcmp(cpu_pool_rec->name_label, NAME_LABEL_2) != 0) || ++ (strcmp(cpu_pool_rec->name_description, NAME_DESCRIPTION_2) != 0) || ++ (!cpu_pool_rec->auto_power_on) || ++ (cpu_pool_rec->ncpu != NCPU_VAL_2) || ++ (cpu_pool_rec->started_vms->size != 0) || ++ (strcmp(cpu_pool_rec->sched_policy, SCHED_NAME) != 0) || ++ (cpu_pool_rec->proposed_cpus->size != 2) || ++ (cpu_pool_rec->host_cpus->size != 1) || ++ (!cpu_pool_rec->activated) || ++ (strcmp(cpu_pool_rec->resident_on->u.handle, host) != 0) || ++ (strcmp(cpu_pool_rec->uuid, pool) != 0) || ++ (cpu_pool_rec->other_config->size != 2)) ++ { ++ printf("Wrong record output\n"); ++ break; ++ } ++ xen_cpu_pool_record_free(cpu_pool_rec); ++ cpu_pool_rec = NULL; ++ ++ ++ // deactivate pool ++ printf("cpu_pool.deactivate\n"); ++ if (!xen_cpu_pool_deactivate(session, pool)) ++ break; ++ ++ ++ // Pool delete ++ if (!xen_cpu_pool_destroy(session, pool)) ++ { ++ break; ++ } ++ xen_cpu_pool_free(pool); ++ pool = NULL; ++ ++ // Tests OK ++ printf("Pool Tests OK\n"); ++ rc= 0; ++ } ++ ++ if (rc != 0) ++ { ++ print_error(session); ++ } ++ ++ xen_cpu_pool_set_free(pools); ++ xen_host_record_free(host_record); ++ xen_cpu_pool_record_opt_free(cpu_pool_opt); ++ xen_host_cpu_set_free(host_cpu_set); ++ xen_host_cpu_record_free(host_cpu_record); ++ xen_vm_set_free(vm_set); ++ xen_cpu_pool_free(pool); ++ xen_cpu_pool_free(pool_out); ++ xen_string_string_map_free(pool_other_config); ++ xen_vm_record_free(vm_record); ++ xen_string_set_free(proposed_cpus); ++ free(name_description); ++ free(name_label); ++ free(sched_policy); ++ free(pool_uuid); ++ xen_cpu_pool_record_free(cpu_pool_rec); ++ xen_host_free(res_host); ++ ++ return rc; ++} ++#endif ++ ++ + int main(int argc, char **argv) + { + if (argc != 4) +@@ -365,6 +1009,11 @@ int main(int argc, char **argv) + + xen_vm_record_free(vm_record); + ++#ifdef POOL_TESTS ++ if (pool_tests(session, host) != 0) ++ return 1; ++#endif ++ + xen_host_free(host); + xen_string_string_map_free(versions); + free(dmesg); diff --git a/cpu-pools-python.patch b/cpu-pools-python.patch new file mode 100644 index 0000000..3ad0eea --- /dev/null +++ b/cpu-pools-python.patch @@ -0,0 +1,2543 @@ +Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c ++++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c +@@ -97,17 +97,18 @@ static PyObject *pyxc_domain_create(XcOb + PyObject *args, + PyObject *kwds) + { +- uint32_t dom = 0, ssidref = 0, flags = 0, target = 0; ++ uint32_t dom = 0, ssidref = 0, flags = 0, target = 0, cpupool = 0; + int ret, i; + PyObject *pyhandle = NULL; + xen_domain_handle_t handle = { + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef }; + +- static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", NULL }; ++ static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", "cpupool", NULL }; + +- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list, +- &dom, &ssidref, &pyhandle, &flags, &target)) ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOiii", kwd_list, &dom, ++ &ssidref, &pyhandle, &flags, &target, ++ &cpupool)) + return NULL; + if ( pyhandle != NULL ) + { +@@ -124,8 +125,9 @@ static PyObject *pyxc_domain_create(XcOb + } + } + ++ flags |= XEN_DOMCTL_CDF_pool; + if ( (ret = xc_domain_create(self->xc_handle, ssidref, +- handle, flags, &dom)) < 0 ) ++ handle, flags, &dom, cpupool)) < 0 ) + return pyxc_error_to_exception(); + + if ( target ) +@@ -316,7 +318,7 @@ static PyObject *pyxc_domain_getinfo(XcO + { + info_dict = Py_BuildValue( + "{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" +- ",s:L,s:L,s:L,s:i,s:i}", ++ ",s:L,s:L,s:L,s:i,s:i,s:i}", + "domid", (int)info[i].domid, + "online_vcpus", info[i].nr_online_vcpus, + "max_vcpu_id", info[i].max_vcpu_id, +@@ -331,7 +333,8 @@ static PyObject *pyxc_domain_getinfo(XcO + "cpu_time", (long long)info[i].cpu_time, + "maxmem_kb", (long long)info[i].max_memkb, + "ssidref", (int)info[i].ssidref, +- "shutdown_reason", info[i].shutdown_reason); ++ "shutdown_reason", info[i].shutdown_reason, ++ "cpupool", (int)info[i].cpupool); + pyhandle = PyList_New(sizeof(xen_domain_handle_t)); + if ( (pyhandle == NULL) || (info_dict == NULL) ) + { +@@ -1697,6 +1700,179 @@ static PyObject *pyxc_dom_set_memshr(XcO + return zero; + } + ++static PyObject *cpumap_to_cpulist(uint64_t cpumap) ++{ ++ PyObject *cpulist = NULL; ++ uint32_t i; ++ ++ cpulist = PyList_New(0); ++ for ( i = 0; cpumap != 0; i++ ) ++ { ++ if ( cpumap & 1 ) ++ { ++ PyObject* pyint = PyInt_FromLong(i); ++ ++ PyList_Append(cpulist, pyint); ++ Py_DECREF(pyint); ++ } ++ cpumap >>= 1; ++ } ++ return cpulist; ++} ++ ++static PyObject *pyxc_cpupool_create(XcObject *self, ++ PyObject *args, ++ PyObject *kwds) ++{ ++ uint32_t cpupool = 0, sched = XEN_SCHEDULER_CREDIT; ++ ++ static char *kwd_list[] = { "pool", "sched", NULL }; ++ ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list, &cpupool, ++ &sched)) ++ return NULL; ++ ++ if ( xc_cpupool_create(self->xc_handle, &cpupool, sched) < 0 ) ++ return pyxc_error_to_exception(); ++ ++ return PyInt_FromLong(cpupool); ++} ++ ++static PyObject *pyxc_cpupool_destroy(XcObject *self, ++ PyObject *args) ++{ ++ uint32_t cpupool; ++ ++ if (!PyArg_ParseTuple(args, "i", &cpupool)) ++ return NULL; ++ ++ if (xc_cpupool_destroy(self->xc_handle, cpupool) != 0) ++ return pyxc_error_to_exception(); ++ ++ Py_INCREF(zero); ++ return zero; ++} ++ ++static PyObject *pyxc_cpupool_getinfo(XcObject *self, ++ PyObject *args, ++ PyObject *kwds) ++{ ++ PyObject *list, *info_dict; ++ ++ uint32_t first_pool = 0; ++ int max_pools = 1024, nr_pools, i; ++ xc_cpupoolinfo_t *info; ++ ++ static char *kwd_list[] = { "first_pool", "max_pools", NULL }; ++ ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list, ++ &first_pool, &max_pools) ) ++ return NULL; ++ ++ info = calloc(max_pools, sizeof(xc_cpupoolinfo_t)); ++ if (info == NULL) ++ return PyErr_NoMemory(); ++ ++ nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info); ++ ++ if (nr_pools < 0) ++ { ++ free(info); ++ return pyxc_error_to_exception(); ++ } ++ ++ list = PyList_New(nr_pools); ++ for ( i = 0 ; i < nr_pools; i++ ) ++ { ++ info_dict = Py_BuildValue( ++ "{s:i,s:i,s:i,s:N}", ++ "cpupool", (int)info[i].cpupool_id, ++ "sched", info[i].sched_id, ++ "n_dom", info[i].n_dom, ++ "cpulist", cpumap_to_cpulist(info[i].cpumap)); ++ if ( info_dict == NULL ) ++ { ++ Py_DECREF(list); ++ if ( info_dict != NULL ) { Py_DECREF(info_dict); } ++ free(info); ++ return NULL; ++ } ++ PyList_SetItem(list, i, info_dict); ++ } ++ ++ free(info); ++ ++ return list; ++} ++ ++static PyObject *pyxc_cpupool_addcpu(XcObject *self, ++ PyObject *args, ++ PyObject *kwds) ++{ ++ uint32_t cpupool; ++ int cpu = -1; ++ ++ static char *kwd_list[] = { "cpupool", "cpu", NULL }; ++ ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, ++ &cpupool, &cpu) ) ++ return NULL; ++ ++ if (xc_cpupool_addcpu(self->xc_handle, cpupool, cpu) != 0) ++ return pyxc_error_to_exception(); ++ ++ Py_INCREF(zero); ++ return zero; ++} ++ ++static PyObject *pyxc_cpupool_removecpu(XcObject *self, ++ PyObject *args, ++ PyObject *kwds) ++{ ++ uint32_t cpupool; ++ int cpu = -1; ++ ++ static char *kwd_list[] = { "cpupool", "cpu", NULL }; ++ ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, ++ &cpupool, &cpu) ) ++ return NULL; ++ ++ if (xc_cpupool_removecpu(self->xc_handle, cpupool, cpu) != 0) ++ return pyxc_error_to_exception(); ++ ++ Py_INCREF(zero); ++ return zero; ++} ++ ++static PyObject *pyxc_cpupool_movedomain(XcObject *self, ++ PyObject *args, ++ PyObject *kwds) ++{ ++ uint32_t cpupool, domid; ++ ++ static char *kwd_list[] = { "cpupool", "domid", NULL }; ++ ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list, ++ &cpupool, &domid) ) ++ return NULL; ++ ++ if (xc_cpupool_movedomain(self->xc_handle, cpupool, domid) != 0) ++ return pyxc_error_to_exception(); ++ ++ Py_INCREF(zero); ++ return zero; ++} ++ ++static PyObject *pyxc_cpupool_freeinfo(XcObject *self) ++{ ++ uint64_t cpumap; ++ ++ if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0) ++ return pyxc_error_to_exception(); ++ ++ return cpumap_to_cpulist(cpumap); ++} + + static PyMethodDef pyxc_methods[] = { + { "handle", +@@ -1812,7 +1988,8 @@ static PyMethodDef pyxc_methods[] = { + " maxmem_kb [int]: Maximum memory limit, in kilobytes\n" + " cpu_time [long]: CPU time consumed, in nanoseconds\n" + " shutdown_reason [int]: Numeric code from guest OS, explaining " +- "reason why it shut itself down.\n" }, ++ "reason why it shut itself down.\n" ++ " cpupool [int] Id of cpupool domain is bound to.\n" }, + + { "vcpu_getinfo", + (PyCFunction)pyxc_vcpu_getinfo, +@@ -2210,6 +2387,66 @@ static PyMethodDef pyxc_methods[] = { + " enable [int,0|1]: Disable or enable?\n" + "Returns: [int] 0 on success; -1 on error.\n" }, + ++ { "cpupool_create", ++ (PyCFunction)pyxc_cpupool_create, ++ METH_VARARGS | METH_KEYWORDS, "\n" ++ "Create new cpupool.\n" ++ " pool [int, 0]: cpupool identifier to use (allocated if zero).\n" ++ " sched [int]: scheduler to use (credit if unspecified).\n\n" ++ "Returns: [int] new cpupool identifier; -1 on error.\n" }, ++ ++ { "cpupool_destroy", ++ (PyCFunction)pyxc_cpupool_destroy, ++ METH_VARARGS, "\n" ++ "Destroy a cpupool.\n" ++ " pool [int]: Identifier of cpupool to be destroyed.\n\n" ++ "Returns: [int] 0 on success; -1 on error.\n" }, ++ ++ { "cpupool_getinfo", ++ (PyCFunction)pyxc_cpupool_getinfo, ++ METH_VARARGS | METH_KEYWORDS, "\n" ++ "Get information regarding a set of cpupools, in increasing id order.\n" ++ " first_pool [int, 0]: First cpupool to retrieve info about.\n" ++ " max_pools [int, 1024]: Maximum number of cpupools to retrieve info" ++ " about.\n\n" ++ "Returns: [list of dicts] if list length is less than 'max_pools'\n" ++ " parameter then there was an error, or the end of the\n" ++ " cpupool-id space was reached.\n" ++ " pool [int]: Identifier of cpupool to which this info pertains\n" ++ " sched [int]: Scheduler used for this cpupool\n" ++ " n_dom [int]: Number of Domains in this cpupool\n" ++ " cpulist [list]: List of CPUs this cpupool is using\n" }, ++ ++ { "cpupool_addcpu", ++ (PyCFunction)pyxc_cpupool_addcpu, ++ METH_VARARGS | METH_KEYWORDS, "\n" ++ "Add a cpu to a cpupool.\n" ++ " pool [int]: Identifier of cpupool.\n" ++ " cpu [int, -1]: Cpu to add (lowest free if -1)\n\n" ++ "Returns: [int] 0 on success; -1 on error.\n" }, ++ ++ { "cpupool_removecpu", ++ (PyCFunction)pyxc_cpupool_removecpu, ++ METH_VARARGS | METH_KEYWORDS, "\n" ++ "Remove a cpu from a cpupool.\n" ++ " pool [int]: Identifier of cpupool.\n" ++ " cpu [int, -1]: Cpu to remove (highest used if -1)\n\n" ++ "Returns: [int] 0 on success; -1 on error.\n" }, ++ ++ { "cpupool_movedomain", ++ (PyCFunction)pyxc_cpupool_movedomain, ++ METH_VARARGS | METH_KEYWORDS, "\n" ++ "Move a domain to another cpupool.\n" ++ " pool [int]: Identifier of cpupool to move domain to.\n" ++ " dom [int]: Domain to move\n\n" ++ "Returns: [int] 0 on success; -1 on error.\n" }, ++ ++ { "cpupool_freeinfo", ++ (PyCFunction)pyxc_cpupool_freeinfo, ++ METH_NOARGS, "\n" ++ "Get info about cpus not in any cpupool.\n" ++ "Returns: [list]: List of CPUs\n" }, ++ + { NULL, NULL, 0, NULL } + }; + +Index: xen-4.0.0-testing/tools/python/xen/util/sxputils.py +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/python/xen/util/sxputils.py +@@ -0,0 +1,64 @@ ++#============================================================================ ++# This library is free software; you can redistribute it and/or ++# modify it under the terms of version 2.1 of the GNU Lesser General Public ++# License as published by the Free Software Foundation. ++# ++# This library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with this library; if not, write to the Free Software ++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++#============================================================================ ++# Copyright (c) 2009 Fujitsu Technology Solutions ++#============================================================================ ++ ++""" convert sxp to map / map to sxp. ++""" ++ ++import types ++from xen.xend import sxp ++ ++def map2sxp(map_val): ++ """ conversion of all key-value pairs of a map (recursively) to sxp. ++ @param map_val: map; if a value contains a list or dict it is also ++ converted to sxp ++ @type map_val: dict ++ @return sxp expr ++ @rtype: list ++ """ ++ sxp_vals = [] ++ for (k, v) in map_val.items(): ++ if isinstance(v, types.DictionaryType): ++ sxp_vals += [[k] + map2sxp(v)] ++ elif isinstance(v, types.ListType): ++ sxp_vals += [[k] + v] ++ else: ++ sxp_vals += [[k, v]] ++ return sxp_vals ++ ++def sxp2map( s ): ++ """ conversion of sxp to map. ++ @param s: sxp expr ++ @type s: list ++ @return: map ++ @rtype: dict ++ """ ++ sxphash = {} ++ ++ for child in sxp.children( s ): ++ if isinstance( child, types.ListType ) and len( child ) > 1: ++ if isinstance( child[1], types.ListType ) and len( child[1] ) > 1: ++ sxphash[ child[0] ] = sxp2map( child ) ++ else: ++ childs = sxp.children(child) ++ if len(childs) > 1: ++ sxphash[ child[0] ] = childs ++ else: ++ sxphash[ child[0] ] = childs[0] ++ ++ return sxphash ++ ++ +Index: xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendAPI.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py +@@ -51,6 +51,7 @@ from XendDPCI import XendDPCI + from XendPSCSI import XendPSCSI, XendPSCSI_HBA + from XendDSCSI import XendDSCSI, XendDSCSI_HBA + from XendXSPolicy import XendXSPolicy, XendACMPolicy ++from xen.xend.XendCPUPool import XendCPUPool + + from XendAPIConstants import * + from xen.util.xmlrpclib2 import stringify +@@ -498,6 +499,7 @@ classes = { + 'PSCSI_HBA' : valid_object("PSCSI_HBA"), + 'DSCSI' : valid_object("DSCSI"), + 'DSCSI_HBA' : valid_object("DSCSI_HBA"), ++ 'cpu_pool' : valid_object("cpu_pool"), + } + + autoplug_classes = { +@@ -514,6 +516,7 @@ autoplug_classes = { + 'DSCSI_HBA' : XendDSCSI_HBA, + 'XSPolicy' : XendXSPolicy, + 'ACMPolicy' : XendACMPolicy, ++ 'cpu_pool' : XendCPUPool, + } + + class XendAPI(object): +@@ -914,7 +917,8 @@ class XendAPI(object): + 'API_version_minor', + 'API_version_vendor', + 'API_version_vendor_implementation', +- 'enabled'] ++ 'enabled', ++ 'resident_cpu_pools'] + + host_attr_rw = ['name_label', + 'name_description', +@@ -1014,6 +1018,8 @@ class XendAPI(object): + return xen_api_todo() + def host_get_logging(self, _, host_ref): + return xen_api_todo() ++ def host_get_resident_cpu_pools(self, _, host_ref): ++ return xen_api_success(XendCPUPool.get_all()) + + # object methods + def host_disable(self, session, host_ref): +@@ -1076,7 +1082,9 @@ class XendAPI(object): + 'PBDs': XendPBD.get_all(), + 'PPCIs': XendPPCI.get_all(), + 'PSCSIs': XendPSCSI.get_all(), +- 'PSCSI_HBAs': XendPSCSI_HBA.get_all()} ++ 'PSCSI_HBAs': XendPSCSI_HBA.get_all(), ++ 'resident_cpu_pools': XendCPUPool.get_all(), ++ } + return xen_api_success(record) + + def host_tmem_thaw(self, _, host_ref, cli_id): +@@ -1185,7 +1193,10 @@ class XendAPI(object): + 'stepping', + 'flags', + 'utilisation', +- 'features'] ++ 'features', ++ 'cpu_pool'] ++ ++ host_cpu_funcs = [('get_unassigned_cpus', 'Set(host_cpu)')] + + # attributes + def _host_cpu_get(self, ref, field): +@@ -1210,21 +1221,28 @@ class XendAPI(object): + return self._host_cpu_get(ref, 'flags') + def host_cpu_get_utilisation(self, _, ref): + return xen_api_success(XendNode.instance().get_host_cpu_load(ref)) ++ def host_cpu_get_cpu_pool(self, _, ref): ++ return xen_api_success(XendCPUPool.get_cpu_pool_by_cpu_ref(ref)) + + # object methods + def host_cpu_get_record(self, _, ref): + node = XendNode.instance() + record = dict([(f, node.get_host_cpu_field(ref, f)) + for f in self.host_cpu_attr_ro +- if f not in ['uuid', 'host', 'utilisation']]) ++ if f not in ['uuid', 'host', 'utilisation', 'cpu_pool']]) + record['uuid'] = ref + record['host'] = node.uuid + record['utilisation'] = node.get_host_cpu_load(ref) ++ record['cpu_pool'] = XendCPUPool.get_cpu_pool_by_cpu_ref(ref) + return xen_api_success(record) + + # class methods + def host_cpu_get_all(self, session): + return xen_api_success(XendNode.instance().get_host_cpu_refs()) ++ def host_cpu_get_unassigned_cpus(self, session): ++ return xen_api_success( ++ [ref for ref in XendNode.instance().get_host_cpu_refs() ++ if len(XendCPUPool.get_cpu_pool_by_cpu_ref(ref)) == 0]) + + + # Xen API: Class host_metrics +@@ -1284,6 +1302,7 @@ class XendAPI(object): + 'is_control_domain', + 'metrics', + 'crash_dumps', ++ 'cpu_pool', + ] + + VM_attr_rw = ['name_label', +@@ -1312,7 +1331,9 @@ class XendAPI(object): + 'platform', + 'PCI_bus', + 'other_config', +- 'security_label'] ++ 'security_label', ++ 'pool_name', ++ ] + + VM_methods = [('clone', 'VM'), + ('start', None), +@@ -1340,7 +1361,9 @@ class XendAPI(object): + ('set_memory_dynamic_min_live', None), + ('send_trigger', None), + ('migrate', None), +- ('destroy', None)] ++ ('destroy', None), ++ ('cpu_pool_migrate', None), ++ ] + + VM_funcs = [('create', 'VM'), + ('restore', None), +@@ -1540,6 +1563,17 @@ class XendAPI(object): + return xen_api_success( + xd.get_vm_by_uuid(vm_ref) == xd.privilegedDomain()) + ++ def VM_get_cpu_pool(self, session, vm_ref): ++ dom = XendDomain.instance().get_vm_by_uuid(vm_ref) ++ pool_ref = XendCPUPool.query_pool_ref(dom.get_cpu_pool()) ++ return xen_api_success(pool_ref) ++ ++ def VM_get_pool_name(self, session, vm_ref): ++ return self.VM_get('pool_name', session, vm_ref) ++ ++ def VM_set_pool_name(self, session, vm_ref, value): ++ return self.VM_set('pool_name', session, vm_ref, value) ++ + def VM_set_name_label(self, session, vm_ref, label): + dom = XendDomain.instance().get_vm_by_uuid(vm_ref) + dom.setName(label) +@@ -1618,7 +1652,8 @@ class XendAPI(object): + if key.startswith("cpumap"): + vcpu = int(key[6:]) + try: +- xendom.domain_pincpu(xeninfo.getDomid(), vcpu, value) ++ cpus = map(int, value.split(",")) ++ xendom.domain_pincpu(xeninfo.getDomid(), vcpu, cpus) + except Exception, ex: + log.exception(ex) + +@@ -1835,7 +1870,9 @@ class XendAPI(object): + 'is_control_domain': xeninfo.info['is_control_domain'], + 'metrics': xeninfo.get_metrics(), + 'security_label': xeninfo.get_security_label(), +- 'crash_dumps': [] ++ 'crash_dumps': [], ++ 'pool_name': xeninfo.info.get('pool_name'), ++ 'cpu_pool' : XendCPUPool.query_pool_ref(xeninfo.get_cpu_pool()), + } + return xen_api_success(record) + +@@ -1933,6 +1970,25 @@ class XendAPI(object): + xendom.domain_restore(src, bool(paused)) + return xen_api_success_void() + ++ def VM_cpu_pool_migrate(self, session, vm_ref, cpu_pool_ref): ++ xendom = XendDomain.instance() ++ xeninfo = xendom.get_vm_by_uuid(vm_ref) ++ domid = xeninfo.getDomid() ++ pool = XendAPIStore.get(cpu_pool_ref, XendCPUPool.getClass()) ++ if pool == None: ++ return xen_api_error(['HANDLE_INVALID', 'cpu_pool', cpu_pool_ref]) ++ if domid is not None: ++ if domid == 0: ++ return xen_api_error(['OPERATION_NOT_ALLOWED', ++ 'could not move Domain-0']) ++ try: ++ XendCPUPool.move_domain(cpu_pool_ref, domid) ++ except Exception, ex: ++ return xen_api_error(['INTERNAL_ERROR', ++ 'could not move domain']) ++ self.VM_set('pool_name', session, vm_ref, pool.get_name_label()) ++ return xen_api_success_void() ++ + + # Xen API: Class VBD + # ---------------------------------------------------------------- +Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py +@@ -0,0 +1,896 @@ ++#============================================================================ ++# This library is free software; you can redistribute it and/or ++# modify it under the terms of version 2.1 of the GNU Lesser General Public ++# License as published by the Free Software Foundation. ++# ++# This library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with this library; if not, write to the Free Software ++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++#============================================================================ ++# Copyright (c) 2009 Fujitsu Technology Solutions. ++#============================================================================ ++ ++""" CPU Pool support including XEN-API and Legacy API. ++""" ++ ++import types ++import threading ++import re ++import xen.lowlevel.xc ++import XendNode ++import XendDomain ++from xen.xend.XendLogging import log ++from xen.xend.XendBase import XendBase ++from xen.xend import XendAPIStore ++from xen.xend.XendConstants import XS_POOLROOT ++from xen.xend import uuid as genuuid ++from xen.xend.XendError import VmError, XendAPIError, PoolError ++from xen.xend.xenstore.xstransact import xstransact ++from xen.util.sxputils import sxp2map, map2sxp ++ ++ ++XEND_ERROR_INTERNAL = 'INTERNAL_ERROR' ++XEND_ERROR_UNKOWN_SCHED_POLICY = 'UNKOWN_SCHED_POLICY' ++XEND_ERROR_BAD_POOL_STATE = 'POOL_BAD_STATE' ++XEND_ERROR_POOL_PARAM = 'PARAMETER_ERROR' ++XEND_ERROR_INSUFFICIENT_CPUS = 'INSUFFICIENT_CPUS' ++XEND_ERROR_POOL_RECONF = 'POOL_RECONF' ++XEND_ERROR_INVALID_CPU = 'INVAILD_CPU' ++XEND_ERROR_LAST_CPU_NOT_REM = 'LAST_CPU_NOT_REMOVEABLE' ++ ++ ++XEN_SCHEDULER_TO_ID = { ++ 'credit' : xen.lowlevel.xc.XEN_SCHEDULER_CREDIT, ++ 'sedf' : xen.lowlevel.xc.XEN_SCHEDULER_SEDF, ++ } ++ ++xc = xen.lowlevel.xc.xc() ++ ++class XendCPUPool(XendBase): ++ """ CPU Pool management. ++ @ivar pool_lock: Lock to secure modification of pool data ++ @type pool_lock: Rlock ++ """ ++ ++ pool_lock = threading.RLock() ++ ++ def getClass(cls): ++ return "cpu_pool" ++ ++ def getAttrRO(cls): ++ attrRO = ['resident_on', ++ 'started_VMs', ++ 'host_CPUs', ++ 'activated', ++ ] ++ return XendBase.getAttrRO() + attrRO ++ ++ def getAttrRW(cls): ++ attrRW = ['name_label', ++ 'name_description', ++ 'auto_power_on', ++ 'ncpu', ++ 'sched_policy', ++ 'proposed_CPUs', ++ 'other_config', ++ ] ++ return XendBase.getAttrRW() + attrRW ++ ++ def getMethods(cls): ++ methods = ['destroy', ++ 'activate', ++ 'deactivate', ++ 'add_host_CPU_live', ++ 'remove_host_CPU_live', ++ 'add_to_proposed_CPUs', ++ 'remove_from_proposed_CPUs', ++ 'add_to_other_config', ++ 'remove_from_other_config', ++ ] ++ return XendBase.getMethods() + methods ++ ++ def getFuncs(cls): ++ funcs = ['create', ++ 'get_by_name_label', ++ ] ++ return XendBase.getFuncs() + funcs ++ ++ getClass = classmethod(getClass) ++ getAttrRO = classmethod(getAttrRO) ++ getAttrRW = classmethod(getAttrRW) ++ getMethods = classmethod(getMethods) ++ getFuncs = classmethod(getFuncs) ++ ++ ++ # ++ # XenAPI function calls ++ # ++ ++ def create(cls, record): ++ """ Create a new managed pool instance. ++ @param record: attributes of pool ++ @type record: dict ++ @return: uuid of created pool ++ @rtype: str ++ """ ++ new_uuid = genuuid.createString() ++ XendCPUPool(record, new_uuid) ++ XendNode.instance().save_cpu_pools() ++ return new_uuid ++ ++ create = classmethod(create) ++ ++ ++ def get_by_name_label(cls, name_label): ++ """ Query a Pool(ref) by its name. ++ @return: ref of pool ++ @rtype: str ++ """ ++ cls.pool_lock.acquire() ++ try: ++ return [ inst.get_uuid() ++ for inst in XendAPIStore.get_all(cls.getClass()) ++ if inst.name_label == name_label ++ ] ++ finally: ++ cls.pool_lock.release() ++ ++ get_by_name_label = classmethod(get_by_name_label) ++ ++ ++ def get_cpu_pool_by_cpu_ref(cls, host_cpu): ++ """ Query cpu_pool ref the given cpu belongs to. ++ @param host_cpu: ref of host_cpu to lookup ++ @type host_cpu: str ++ @return: list cpu_pool refs (list contains not more than one element) ++ @rtype: list of str ++ """ ++ node = XendNode.instance() ++ cpu_nr = node.get_host_cpu_field(host_cpu, 'number') ++ for pool_rec in xc.cpupool_getinfo(): ++ if cpu_nr in pool_rec['cpulist']: ++ # pool found; return the ref ++ return cls.query_pool_ref(pool_rec['cpupool']) ++ return [] ++ ++ get_cpu_pool_by_cpu_ref = classmethod(get_cpu_pool_by_cpu_ref) ++ ++ ++ def get_all_managed(cls): ++ """ Query all managed pools. ++ @return: uuids of all managed pools ++ @rtype: list of str ++ """ ++ cls.pool_lock.acquire() ++ try: ++ managed_pools = [ inst.get_uuid() ++ for inst in XendAPIStore.get_all(cls.getClass()) ++ if inst.is_managed() ] ++ finally: ++ cls.pool_lock.release() ++ return managed_pools ++ ++ get_all_managed = classmethod(get_all_managed) ++ ++ ++ # ++ # XenAPI methods calls ++ # ++ ++ def __init__(self, record, new_uuid, managed_pool=True): ++ XendBase.__init__(self, new_uuid, record) ++ try: ++ self._managed = managed_pool ++ self.name_label = None ++ ++ name = record.get('name_label', 'Pool-Unnamed') ++ self._checkName(name) ++ self.name_label = name ++ self.name_description = record.get('name_description', ++ self.name_label) ++ self.proposed_cpus = [ int(cpu) ++ for cpu in record.get('proposed_CPUs', []) ] ++ self.auto_power_on = bool(record.get('auto_power_on', False)) ++ self.ncpu = int(record.get('ncpu', 1)) ++ self.sched_policy = record.get('sched_policy', '') ++ self.other_config = record.get('other_config', {}) ++ except Exception, ex: ++ XendBase.destroy(self) ++ raise ex ++ ++ ++ def get_resident_on(self): ++ """ Always return uuid of own node. ++ @return: uuid of this node ++ @rytpe: str ++ """ ++ return XendNode.instance().uuid ++ ++ def get_started_VMs(self): ++ """ Query all VMs currently assigned to pool. ++ @return: ref of all VMs assigned to pool; if pool is not active, ++ an empty list will be returned ++ @rtype: list of str ++ """ ++ if self.get_activated(): ++ # search VMs related to this pool ++ pool_id = self.query_pool_id() ++ started_VMs = [ vm.get_uuid() ++ for vm in XendDomain.instance().list('all') ++ if vm.get_cpu_pool() == pool_id ] ++ else: ++ # pool not active, so it couldn't have any started VMs ++ started_VMs = [] ++ ++ return started_VMs ++ ++ def get_host_CPUs(self): ++ """ Query all cpu refs of this pool currently asisgned . ++ - Read pool id of this pool from xenstore ++ - Read cpu configuration from hypervisor ++ - lookup cpu number -> cpu ref ++ @return: host_cpu refs ++ @rtype: list of str ++ """ ++ if self.get_activated(): ++ node = XendNode.instance() ++ pool_id = self.query_pool_id() ++ if pool_id == None: ++ raise PoolError(XEND_ERROR_INTERNAL, ++ [self.getClass(), 'get_host_CPUs']) ++ cpus = [] ++ for pool_rec in xc.cpupool_getinfo(): ++ if pool_rec['cpupool'] == pool_id: ++ cpus = pool_rec['cpulist'] ++ ++ # query host_cpu ref for any cpu of the pool ++ host_CPUs = [ cpu_ref ++ for cpu_ref in node.get_host_cpu_refs() ++ if node.get_host_cpu_field(cpu_ref, 'number') ++ in cpus ] ++ else: ++ # pool not active, so it couldn't have any assigned cpus ++ host_CPUs = [] ++ ++ return host_CPUs ++ ++ def get_activated(self): ++ """ Query if the pool is registered in XendStore. ++ If pool uuid is not in XenStore, the pool is not activated. ++ @return: True, if activated ++ @rtype: bool ++ """ ++ return self.query_pool_id() != None ++ ++ def get_name_label(self): ++ return self.name_label ++ ++ def get_name_description(self): ++ return self.name_description ++ ++ def get_auto_power_on(self): ++ return self.auto_power_on ++ ++ def get_ncpu(self): ++ return self.ncpu ++ ++ def get_sched_policy(self): ++ if len(self.sched_policy) == 0: ++ # default scheduler selected ++ return XendNode.instance().get_vcpus_policy() ++ else: ++ return self.sched_policy ++ ++ def get_proposed_CPUs(self): ++ return [ str(cpu) for cpu in self.proposed_cpus ] ++ ++ def get_other_config(self): ++ return self.other_config ++ ++ def set_name_label(self, name_label): ++ self._checkName(name_label) ++ self.name_label = name_label ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_name_description(self, name_descr): ++ self.name_description = name_descr ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_auto_power_on(self, auto_power_on): ++ self.auto_power_on = bool(int(auto_power_on)) ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_ncpu(self, ncpu): ++ _ncpu = int(ncpu) ++ if _ncpu < 1: ++ raise PoolError(XEND_ERROR_POOL_PARAM, 'ncpu') ++ self.ncpu = _ncpu ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_sched_policy(self, sched_policy): ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ self.sched_policy = sched_policy ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_proposed_CPUs(self, proposed_cpus): ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ self.proposed_cpus = [ int(cpu) for cpu in proposed_cpus ] ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_other_config(self, other_config): ++ self.other_config = other_config ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def destroy(self): ++ """ In order to destroy a cpu pool, it must be deactivated """ ++ self.pool_lock.acquire() ++ try: ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ XendBase.destroy(self) ++ finally: ++ self.pool_lock.release() ++ XendNode.instance().save_cpu_pools() ++ ++ def activate(self): ++ """ Create pool in hypervisor and add cpus. ++ Preconditions: ++ - pool not already active ++ - enough unbound cpus available ++ Actions: ++ - create pool in hypervisor ++ - select free cpus (preferred from proposed_CPUs list) and bind it to ++ the pool ++ - create entries in Xenstore ++ """ ++ self.pool_lock.acquire() ++ try: ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ if self.sched_policy != XendNode.instance().get_vcpus_policy(): ++ raise PoolError(XEND_ERROR_UNKOWN_SCHED_POLICY) ++ unbound_cpus = set(self.unbound_cpus()) ++ if len(unbound_cpus) < self.ncpu: ++ raise PoolError(XEND_ERROR_INSUFFICIENT_CPUS, ++ [str(self.ncpu), str(len(unbound_cpus))]) ++ ++ # build list of cpu numbers to bind to pool ++ cpu_set = set(self.proposed_cpus).intersection(unbound_cpus) ++ if len(cpu_set) < self.ncpu: ++ pool_cpus = (list(cpu_set) + ++ list(unbound_cpus.difference(cpu_set))) ++ else: ++ pool_cpus = list(cpu_set) ++ pool_cpus = pool_cpus[0:self.ncpu] ++ ++ # create pool in hypervisor ++ pool_id = xc.cpupool_create( ++ sched = XEN_SCHEDULER_TO_ID.get(self.sched_policy, 0)) ++ ++ self.update_XS(pool_id) ++ # add cpus ++ for cpu in pool_cpus: ++ xc.cpupool_addcpu(pool_id, cpu) ++ ++ finally: ++ self.pool_lock.release() ++ ++ def deactivate(self): ++ """ Delete pool in hypervisor ++ Preconditions: ++ - pool is activated ++ - no running VMs in pool ++ Actions: ++ - call hypervisor for deletion ++ - remove path of pool in xenstore ++ """ ++ self.pool_lock.acquire() ++ try: ++ if not self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated') ++ if len(self.get_started_VMs()) != 0: ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'in use') ++ ++ pool_id = self.query_pool_id() ++ # remove cpus from pool ++ cpus = [] ++ for pool_rec in xc.cpupool_getinfo(): ++ if pool_rec['cpupool'] == pool_id: ++ cpus = pool_rec['cpulist'] ++ for cpu_number in cpus: ++ xc.cpupool_removecpu(pool_id, cpu_number) ++ xc.cpupool_destroy(pool_id) ++ ++ # update XenStore ++ xs_path = XS_POOLROOT + "%s/" % pool_id ++ xstransact.Remove(xs_path) ++ finally: ++ self.pool_lock.release() ++ ++ def add_host_CPU_live(self, cpu_ref): ++ """ Add cpu to pool, if it is currently not assigned to a pool. ++ @param cpu_ref: reference of host_cpu instance to add ++ @type cpu_ref: str ++ """ ++ if not self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated') ++ node = XendNode.instance() ++ number = node.get_host_cpu_field(cpu_ref, 'number') ++ ++ self.pool_lock.acquire() ++ try: ++ pool_id = self.query_pool_id() ++ other_pool_ref = self.get_cpu_pool_by_cpu_ref(cpu_ref) ++ if len(other_pool_ref) != 0: ++ raise PoolError(XEND_ERROR_INVALID_CPU, ++ 'cpu already assigned to pool "%s"' % other_pool_ref[0]) ++ xc.cpupool_addcpu(pool_id, number) ++ finally: ++ self.pool_lock.release() ++ ++ if number not in self.proposed_cpus: ++ self.proposed_cpus.append(number) ++ self._update_ncpu(pool_id) ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def remove_host_CPU_live(self, cpu_ref): ++ """ Remove cpu from pool. ++ After successfull call, the cpu is free. ++ Remove of the last cpu of the pool is rejected. ++ @param cpu_ref: reference of host_cpu instance to remove ++ @type cpu_ref: str ++ """ ++ if not self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated') ++ node = XendNode.instance() ++ number = node.get_host_cpu_field(cpu_ref, 'number') ++ ++ self.pool_lock.acquire() ++ try: ++ pool_id = self.query_pool_id() ++ pool_rec = {} ++ for pool in xc.cpupool_getinfo(): ++ if pool['cpupool'] == pool_id: ++ pool_rec = pool ++ break ++ ++ if number in pool_rec['cpulist']: ++ if len(pool_rec['cpulist']) < 2 and pool_rec['n_dom'] > 0: ++ raise PoolError(XEND_ERROR_LAST_CPU_NOT_REM, ++ 'could not remove last cpu') ++ xc.cpupool_removecpu(pool_id, number) ++ else: ++ raise PoolError(XEND_ERROR_INVALID_CPU, ++ 'CPU not assigned to pool') ++ finally: ++ self.pool_lock.release() ++ ++ if number in self.proposed_cpus: ++ self.proposed_cpus.remove(number) ++ self._update_ncpu(pool_id) ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def add_to_proposed_CPUs(self, cpu): ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ ++ _cpu = int(cpu) ++ if _cpu not in self.proposed_cpus: ++ self.proposed_cpus.append(_cpu) ++ self.proposed_cpus.sort() ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def remove_from_proposed_CPUs(self, cpu): ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ _cpu = int(cpu) ++ if _cpu in self.proposed_cpus: ++ self.proposed_cpus.remove(_cpu) ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def add_to_other_config(self, key, value): ++ self.other_config[key] = value ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def remove_from_other_config(self, key): ++ if key in self.other_config: ++ del self.other_config[key] ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ ++ # ++ # Legacy RPC calls ++ # ++ def pool_new(cls, config): ++ try: ++ record = sxp2map(config) ++ if record.has_key('proposed_CPUs') and \ ++ not isinstance(record['proposed_CPUs'], types.ListType): ++ record['proposed_CPUs'] = [record['proposed_CPUs']] ++ new_uuid = cls.create(record) ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ return new_uuid ++ ++ def pool_create(cls, config): ++ try: ++ record = sxp2map(config) ++ if record.has_key('proposed_CPUs') and \ ++ not isinstance(record['proposed_CPUs'], types.ListType): ++ record['proposed_CPUs'] = [record['proposed_CPUs']] ++ new_uuid = genuuid.createString() ++ pool = XendCPUPool(record, new_uuid, False) ++ pool.activate() ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_start(cls, poolname): ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ try: ++ pool.activate() ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_list(cls, names): ++ sxprs = [] ++ try: ++ node = XendNode.instance() ++ xd = XendDomain.instance() ++ pools = cls.get_all_records() ++ for (pool_uuid, pool_vals) in pools.items(): ++ if pool_vals['name_label'] in names or len(names) == 0: ++ # conv host_cpu refs to cpu number ++ cpus = [ node.get_host_cpu_field(cpu_ref, 'number') ++ for cpu_ref in pool_vals['host_CPUs'] ] ++ cpus.sort() ++ pool_vals['host_CPU_numbers'] = cpus ++ vm_names = [ xd.get_vm_by_uuid(uuid).getName() ++ for uuid in pool_vals['started_VMs'] ] ++ pool_vals['started_VM_names'] = vm_names ++ pool_vals['auto_power_on'] = int(pool_vals['auto_power_on']) ++ sxprs += [[pool_uuid] + map2sxp(pool_vals)] ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ return sxprs ++ ++ def pool_destroy(cls, poolname): ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ try: ++ pool.deactivate() ++ if not pool.is_managed(): ++ pool.destroy() ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_delete(cls, poolname): ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ try: ++ pool.destroy() ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_cpu_add(cls, poolname, cpu): ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ try: ++ cpu_ref = cls._cpu_number_to_ref(int(cpu)) ++ if cpu_ref: ++ pool.add_host_CPU_live(cpu_ref) ++ else: ++ raise PoolError(XEND_ERROR_INVALID_CPU, ++ 'CPU unkown') ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_cpu_remove(cls, poolname, cpu): ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ try: ++ cpu_ref = cls._cpu_number_to_ref(int(cpu)) ++ if cpu_ref: ++ pool.remove_host_CPU_live(cpu_ref) ++ else: ++ raise PoolError(XEND_ERROR_INVALID_CPU, ++ 'CPU unkown') ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_migrate(cls, domname, poolname): ++ dom = XendDomain.instance() ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ dominfo = dom.domain_lookup_nr(domname) ++ if not dominfo: ++ raise VmError('unkown domain %s' % domname) ++ domid = dominfo.getDomid() ++ if domid is not None: ++ if domid == 0: ++ raise VmError('could not move Domain-0') ++ try: ++ cls.move_domain(pool.get_uuid(), domid) ++ except Exception, ex: ++ raise VmError('could not move domain') ++ dominfo.info['pool_name'] = poolname ++ dom.managed_config_save(dominfo) ++ ++ pool_new = classmethod(pool_new) ++ pool_create = classmethod(pool_create) ++ pool_start = classmethod(pool_start) ++ pool_list = classmethod(pool_list) ++ pool_destroy = classmethod(pool_destroy) ++ pool_delete = classmethod(pool_delete) ++ pool_cpu_add = classmethod(pool_cpu_add) ++ pool_cpu_remove = classmethod(pool_cpu_remove) ++ pool_migrate = classmethod(pool_migrate) ++ ++ ++ # ++ # methods ++ # ++ ++ def is_managed(self): ++ """ Check, if pool is managed. ++ @return: True, if managed ++ @rtype: bool ++ """ ++ return self._managed ++ ++ def query_pool_id(self): ++ """ Get corresponding pool-id of pool instance from XenStore. ++ @return: pool id or None ++ @rytpe: int ++ """ ++ self.pool_lock.acquire() ++ try: ++ for pool_id in xstransact.List(XS_POOLROOT): ++ uuid = xstransact.Read(XS_POOLROOT + "%s/" % pool_id, 'uuid') ++ if uuid == self.get_uuid(): ++ return int(pool_id) ++ finally: ++ self.pool_lock.release() ++ ++ return None ++ ++ def update_XS(self, pool_id): ++ """ Write (or update) data in xenstore taken from instance. ++ @param pool_id: Pool id to build path to pool data in xenstore ++ @type pool_id: int ++ """ ++ self.pool_lock.acquire() ++ try: ++ xs_path = XS_POOLROOT + "%s/" % pool_id ++ xs_entries = { 'uuid' : self.get_uuid(), ++ 'name' : self.name_label, ++ 'description' : self.name_description ++ } ++ xstransact.Mkdir(xs_path) ++ xstransact.Mkdir(xs_path, 'other_config') ++ xstransact.Write(xs_path, xs_entries) ++ xstransact.Write('%s%s' % (xs_path, 'other_config'), ++ self.other_config) ++ finally: ++ self.pool_lock.release() ++ ++ def _update_ncpu(self, pool_id): ++ for pool_rec in xc.cpupool_getinfo(): ++ if pool_rec['cpupool'] == pool_id: ++ self.ncpu = len(pool_rec['cpulist']) ++ ++ def _checkName(self, name): ++ """ Check if a pool name is valid. Valid names contain alphabetic ++ characters, digits, or characters in '_-.:/+'. ++ The same name cannot be used for more than one pool at the same ++ time. ++ @param name: name ++ @type name: str ++ @raise: PoolError if invalid ++ """ ++ if name is None or name == '': ++ raise PoolError(XEND_ERROR_POOL_PARAM, 'Missing Pool Name') ++ if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name): ++ raise PoolError(XEND_ERROR_POOL_PARAM, 'Invalid Pool Name') ++ ++ pool = self.lookup_pool(name) ++ if pool and pool.get_uuid() != self.get_uuid(): ++ raise PoolError(XEND_ERROR_POOL_PARAM, ++ 'Pool name "%s" already exists' % name) ++ ++ ++ # ++ # class methods ++ # ++ ++ def recreate_active_pools(cls): ++ """ Read active pool config from hypervisor and create pool instances. ++ - Query pool ids and assigned CPUs from hypervisor. ++ - Query additional information for any pool from xenstore. ++ If an entry for a pool id is missing in xenstore, it will be ++ recreated with a new uuid and generic name (this is an error case) ++ - Create an XendCPUPool instance for any pool id ++ Function have to be called after recreation of managed pools. ++ """ ++ log.debug('recreate_active_pools') ++ ++ for pool_rec in xc.cpupool_getinfo(): ++ pool = pool_rec['cpupool'] ++ ++ # read pool data from xenstore ++ path = XS_POOLROOT + "%s/" % pool ++ uuid = xstransact.Read(path, 'uuid') ++ if not uuid: ++ # xenstore entry missing / invaild; create entry with new uuid ++ uuid = genuuid.createString() ++ name = "Pool-%s" % pool ++ try: ++ inst = XendCPUPool( { 'name_label' : name }, uuid, False ) ++ inst.update_XS(pool) ++ except PoolError, ex: ++ # log error and skip domain ++ log.error('cannot recreate pool %s; skipping (reason: %s)' \ ++ % (name, ex)) ++ else: ++ (name, descr) = xstransact.Read(path, 'name', 'description') ++ other_config = {} ++ for key in xstransact.List(path + 'other_config'): ++ other_config[key] = xstransact.Read( ++ path + 'other_config/%s' % key) ++ ++ # check existance of pool instance ++ inst = XendAPIStore.get(uuid, cls.getClass()) ++ if inst: ++ # update attributes of existing instance ++ inst.name_label = name ++ inst.name_description = descr ++ inst.other_config = other_config ++ else: ++ # recreate instance ++ try: ++ inst = XendCPUPool( ++ { 'name_label' : name, ++ 'name_description' : descr, ++ 'other_config' : other_config, ++ 'proposed_CPUs' : pool_rec['cpulist'], ++ 'ncpu' : len(pool_rec['cpulist']), ++ }, ++ uuid, False ) ++ except PoolError, ex: ++ # log error and skip domain ++ log.error( ++ 'cannot recreate pool %s; skipping (reason: %s)' \ ++ % (name, ex)) ++ ++ recreate_active_pools = classmethod(recreate_active_pools) ++ ++ ++ def recreate(cls, record, current_uuid): ++ """ Recreate a pool instance while xend restart. ++ @param record: attributes of pool ++ @type record: dict ++ @param current_uuid: uuid of pool to create ++ @type current_uuid: str ++ """ ++ XendCPUPool(record, current_uuid) ++ ++ recreate = classmethod(recreate) ++ ++ ++ def autostart_pools(cls): ++ """ Start managed pools that are marked as autostart pools. ++ Function is called after recreation of managed domains while ++ xend restart. ++ """ ++ cls.pool_lock.acquire() ++ try: ++ for inst in XendAPIStore.get_all(cls.getClass()): ++ if inst.is_managed() and inst.auto_power_on and \ ++ inst.query_pool_id() == None: ++ inst.activate() ++ finally: ++ cls.pool_lock.release() ++ ++ autostart_pools = classmethod(autostart_pools) ++ ++ ++ def move_domain(cls, pool_ref, domid): ++ cls.pool_lock.acquire() ++ try: ++ pool = XendAPIStore.get(pool_ref, cls.getClass()) ++ pool_id = pool.query_pool_id() ++ ++ xc.cpupool_movedomain(pool_id, domid) ++ finally: ++ cls.pool_lock.release() ++ ++ move_domain = classmethod(move_domain) ++ ++ ++ def query_pool_ref(cls, pool_id): ++ """ Get pool ref by pool id. ++ Take the ref from xenstore. ++ @param pool_id: ++ @type pool_id: int ++ @return: ref ++ @rtype: str ++ """ ++ uuid = xstransact.Read(XS_POOLROOT + "%s/" % pool_id, 'uuid') ++ if uuid: ++ return [uuid] ++ else: ++ return [] ++ ++ query_pool_ref = classmethod(query_pool_ref) ++ ++ ++ def lookup_pool(cls, id_or_name): ++ """ Search XendCPUPool instance with given id_or_name. ++ @param id_or_name: pool id or pool nameto search ++ @type id_or_name: [int, str] ++ @return: instane or None if not found ++ @rtype: XendCPUPool ++ """ ++ pool_uuid = None ++ try: ++ pool_id = int(id_or_name) ++ # pool id given ++ pool_uuid = cls.query_pool_ref(pool_id) ++ except ValueError: ++ # pool name given ++ pool_uuid = cls.get_by_name_label(id_or_name) ++ ++ if len(pool_uuid) > 0: ++ return XendAPIStore.get(pool_uuid[0], cls.getClass()) ++ else: ++ return None ++ ++ lookup_pool = classmethod(lookup_pool) ++ ++ ++ def _cpu_number_to_ref(cls, number): ++ node = XendNode.instance() ++ for cpu_ref in node.get_host_cpu_refs(): ++ if node.get_host_cpu_field(cpu_ref, 'number') == number: ++ return cpu_ref ++ return None ++ ++ _cpu_number_to_ref = classmethod(_cpu_number_to_ref) ++ ++ ++ def unbound_cpus(cls): ++ """ Build list containing the numbers of all cpus not bound to a pool. ++ Info is taken from Hypervisor. ++ @return: list of cpu numbers ++ @rytpe: list of int ++ """ ++ return xc.cpupool_freeinfo() ++ ++ unbound_cpus = classmethod(unbound_cpus) ++ +Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py +@@ -128,6 +128,7 @@ XENAPI_CFG_TO_LEGACY_CFG = { + 'PV_bootloader': 'bootloader', + 'PV_bootloader_args': 'bootloader_args', + 'Description': 'description', ++ 'pool_name' : 'pool_name', + } + + LEGACY_CFG_TO_XENAPI_CFG = reverse_dict(XENAPI_CFG_TO_LEGACY_CFG) +@@ -233,6 +234,7 @@ XENAPI_CFG_TYPES = { + 's3_integrity' : int, + 'superpages' : int, + 'memory_sharing': int, ++ 'pool_name' : str, + } + + # List of legacy configuration keys that have no equivalent in the +@@ -278,6 +280,7 @@ LEGACY_CFG_TYPES = { + 'bootloader': str, + 'bootloader_args': str, + 'description': str, ++ 'pool_name': str, + } + + # Values that should be stored in xenstore's /vm/ that is used +@@ -299,6 +302,7 @@ LEGACY_XENSTORE_VM_PARAMS = [ + 'on_xend_stop', + 'bootloader', + 'bootloader_args', ++ 'pool_name', + ] + + ## +@@ -407,6 +411,7 @@ class XendConfig(dict): + 'other_config': {}, + 'platform': {}, + 'target': 0, ++ 'pool_name' : 'Pool-0', + 'superpages': 0, + 'description': '', + } +Index: xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConstants.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py +@@ -133,6 +133,8 @@ VTPM_DELETE_SCRIPT = auxbin.scripts_dir( + + XS_VMROOT = "/vm/" + ++XS_POOLROOT = "/local/pool/" ++ + NR_PCI_FUNC = 8 + NR_PCI_DEV = 32 + NR_PCI_DEVFN = NR_PCI_FUNC * NR_PCI_DEV +Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py +@@ -60,6 +60,7 @@ from xen.xend.xenstore.xsutil import Get + from xen.xend.xenstore.xswatch import xswatch + from xen.xend.XendConstants import * + from xen.xend.XendAPIConstants import * ++from xen.xend.XendCPUPool import XendCPUPool + from xen.xend.server.DevConstants import xenbusState + from xen.xend.server.BlktapController import TAPDISK_DEVICE, parseDeviceString + +@@ -2565,6 +2566,19 @@ class XendDomainInfo: + oos = self.info['platform'].get('oos', 1) + oos_off = 1 - int(oos) + ++ # look-up pool id to use ++ pool_name = self.info['pool_name'] ++ if len(pool_name) == 0: ++ pool_name = "Pool-0" ++ ++ pool = XendCPUPool.lookup_pool(pool_name) ++ ++ if pool is None: ++ raise VmError("unkown pool %s" % pool_name) ++ pool_id = pool.query_pool_id() ++ if pool_id is None: ++ raise VmError("pool %s not activated" % pool_name) ++ + flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3) + + try: +@@ -2573,6 +2587,7 @@ class XendDomainInfo: + ssidref = ssidref, + handle = uuid.fromString(self.info['uuid']), + flags = flags, ++ cpupool = pool_id, + target = self.info.target()) + except Exception, e: + # may get here if due to ACM the operation is not permitted +@@ -3613,6 +3628,11 @@ class XendDomainInfo: + + retval = xc.sched_credit_domain_get(self.getDomid()) + return retval ++ def get_cpu_pool(self): ++ if self.getDomid() is None: ++ return None ++ xeninfo = dom_get(self.domid) ++ return xeninfo['cpupool'] + def get_power_state(self): + return XEN_API_VM_POWER_STATE[self._stateGet()] + def get_platform(self): +Index: xen-4.0.0-testing/tools/python/xen/xend/XendError.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendError.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendError.py +@@ -18,6 +18,7 @@ + + from xmlrpclib import Fault + ++import types + import XendClient + + class XendInvalidDomain(Fault): +@@ -186,6 +187,26 @@ class DirectPCIError(XendAPIError): + def __str__(self): + return 'DIRECT_PCI_ERROR: %s' % self.error + ++class PoolError(XendAPIError): ++ def __init__(self, error, spec=None): ++ XendAPIError.__init__(self) ++ self.spec = [] ++ if spec: ++ if isinstance(spec, types.ListType): ++ self.spec = spec ++ else: ++ self.spec = [spec] ++ self.error = error ++ ++ def get_api_error(self): ++ return [self.error] + self.spec ++ ++ def __str__(self): ++ if self.spec: ++ return '%s: %s' % (self.error, self.spec) ++ else: ++ return '%s' % self.error ++ + class VDIError(XendAPIError): + def __init__(self, error, vdi): + XendAPIError.__init__(self) +Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendNode.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendNode.py +@@ -43,6 +43,7 @@ from XendStateStore import XendStateStor + from XendMonitor import XendMonitor + from XendPPCI import XendPPCI + from XendPSCSI import XendPSCSI, XendPSCSI_HBA ++from xen.xend.XendCPUPool import XendCPUPool + + class XendNode: + """XendNode - Represents a Domain 0 Host.""" +@@ -159,6 +160,8 @@ class XendNode: + + self._init_PSCSIs() + ++ self._init_cpu_pools() ++ + + def _init_networks(self): + # Initialise networks +@@ -357,6 +360,18 @@ class XendNode: + for physical_host, pscsi_HBA_uuid in pscsi_HBA_table.items(): + XendPSCSI_HBA(pscsi_HBA_uuid, {'physical_host': physical_host}) + ++ def _init_cpu_pools(self): ++ # Initialise cpu_pools ++ saved_cpu_pools = self.state_store.load_state(XendCPUPool.getClass()) ++ if saved_cpu_pools: ++ for cpu_pool_uuid, cpu_pool in saved_cpu_pools.items(): ++ try: ++ XendCPUPool.recreate(cpu_pool, cpu_pool_uuid) ++ except CreateUnspecifiedAttributeError: ++ log.warn("Error recreating %s %s", ++ (XendCPUPool.getClass(), cpu_pool_uuid)) ++ XendCPUPool.recreate_active_pools() ++ + + def add_network(self, interface): + # TODO +@@ -577,6 +592,7 @@ class XendNode: + self.save_PPCIs() + self.save_PSCSIs() + self.save_PSCSI_HBAs() ++ self.save_cpu_pools() + + def save_PIFs(self): + pif_records = dict([(pif_uuid, XendAPIStore.get( +@@ -619,6 +635,12 @@ class XendNode: + for pscsi_HBA_uuid in XendPSCSI_HBA.get_all()]) + self.state_store.save_state('pscsi_HBA', pscsi_HBA_records) + ++ def save_cpu_pools(self): ++ cpu_pool_records = dict([(cpu_pool_uuid, XendAPIStore.get( ++ cpu_pool_uuid, XendCPUPool.getClass()).get_record()) ++ for cpu_pool_uuid in XendCPUPool.get_all_managed()]) ++ self.state_store.save_state(XendCPUPool.getClass(), cpu_pool_records) ++ + def shutdown(self): + return 0 + +@@ -930,6 +952,7 @@ class XendNode: + self.format_node_to_memory(info, 'node_to_memory') + info['node_to_dma32_mem'] = \ + self.format_node_to_memory(info, 'node_to_dma32_mem') ++ info['free_cpus'] = len(XendCPUPool.unbound_cpus()) + + # FIXME: These are hard-coded to be the inverse of the getXenMemory + # functions in image.py. Find a cleaner way. +@@ -949,6 +972,7 @@ class XendNode: + 'virt_caps', + 'total_memory', + 'free_memory', ++ 'free_cpus', + 'max_free_memory', + 'max_para_memory', + 'max_hvm_memory', +Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/server/SrvServer.py ++++ xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py +@@ -52,6 +52,7 @@ from xen.xend import XendNode, XendOptio + from xen.xend.XendLogging import log + from xen.xend.XendClient import XEN_API_SOCKET + from xen.xend.XendDomain import instance as xenddomain ++from xen.xend.XendCPUPool import XendCPUPool + from xen.web.SrvDir import SrvDir + + from SrvRoot import SrvRoot +@@ -147,6 +148,12 @@ class XendServers: + status.close() + status = None + ++ # auto start pools before domains are started ++ try: ++ XendCPUPool.autostart_pools() ++ except Exception, e: ++ log.exception("Failed while autostarting pools") ++ + # Reaching this point means we can auto start domains + try: + xenddomain().autostart_domains() +Index: xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/server/XMLRPCServer.py ++++ xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py +@@ -33,6 +33,7 @@ from xen.xend.XendClient import XML_RPC_ + from xen.xend.XendConstants import DOM_STATE_RUNNING + from xen.xend.XendLogging import log + from xen.xend.XendError import XendInvalidDomain ++from xen.xend.XendCPUPool import XendCPUPool + + # vcpu_avail is a long and is not needed by the clients. It's far easier + # to just remove it then to try and marshal the long. +@@ -98,6 +99,10 @@ methods = ['device_create', 'device_conf + + exclude = ['domain_create', 'domain_restore'] + ++POOL_FUNCS = ['pool_create', 'pool_new', 'pool_start', 'pool_list', ++ 'pool_destroy', 'pool_delete', 'pool_cpu_add', 'pool_cpu_remove', ++ 'pool_migrate'] ++ + class XMLRPCServer: + def __init__(self, auth, use_xenapi, use_tcp = False, + ssl_key_file = None, ssl_cert_file = None, +@@ -197,6 +202,11 @@ class XMLRPCServer: + if name not in exclude: + self.server.register_function(fn, "xend.domain.%s" % name[7:]) + ++ # Functions in XendPool ++ for name in POOL_FUNCS: ++ fn = getattr(XendCPUPool, name) ++ self.server.register_function(fn, "xend.cpu_pool.%s" % name[5:]) ++ + # Functions in XendNode and XendDmesg + for type, lst, n in [(XendNode, + ['info', 'pciinfo', 'send_debug_keys', +Index: xen-4.0.0-testing/tools/python/xen/xm/create.dtd +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xm/create.dtd ++++ xen-4.0.0-testing/tools/python/xen/xm/create.dtd +@@ -50,6 +50,7 @@ + s3_integrity CDATA #REQUIRED + vcpus_max CDATA #REQUIRED + vcpus_at_startup CDATA #REQUIRED ++ pool_name CDATA #REQUIRED + actions_after_shutdown %NORMAL_EXIT; #REQUIRED + actions_after_reboot %NORMAL_EXIT; #REQUIRED + actions_after_crash %CRASH_BEHAVIOUR; #REQUIRED +Index: xen-4.0.0-testing/tools/python/xen/xm/create.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xm/create.py ++++ xen-4.0.0-testing/tools/python/xen/xm/create.py +@@ -659,6 +659,10 @@ gopts.var('suppress_spurious_page_faults + fn=set_bool, default=None, + use="""Do not inject spurious page faults into this guest""") + ++gopts.var('pool', val='POOL NAME', ++ fn=set_value, default=None, ++ use="""CPU pool to use for the VM""") ++ + gopts.var('pci_msitranslate', val='TRANSLATE', + fn=set_int, default=1, + use="""Global PCI MSI-INTx translation flag (0=disable; +@@ -1147,6 +1151,8 @@ def make_config(vals): + config.append(['localtime', vals.localtime]) + if vals.oos: + config.append(['oos', vals.oos]) ++ if vals.pool: ++ config.append(['pool_name', vals.pool]) + + config_image = configure_image(vals) + if vals.bootloader: +Index: xen-4.0.0-testing/tools/python/xen/xm/main.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py ++++ xen-4.0.0-testing/tools/python/xen/xm/main.py +@@ -56,6 +56,7 @@ from xen.util.xmlrpcclient import Server + import xen.util.xsm.xsm as security + from xen.util.xsm.xsm import XSMError + from xen.util.acmpolicy import ACM_LABEL_UNLABELED_DISPLAY ++from xen.util.sxputils import sxp2map, map2sxp as map_to_sxp + from xen.util import auxbin + + import XenAPI +@@ -235,6 +236,23 @@ SUBCOMMAND_HELP = { + 'tmem-freeable' : ('', 'Print freeable tmem (in MiB).'), + 'tmem-shared-auth' : ('[|-a|--all] [--uuid=] [--auth=<0|1>]', 'De/authenticate shared tmem pool.'), + ++ # ++ # pool commands ++ # ++ 'pool-create' : (' [vars]', ++ 'Create a CPU pool based an ConfigFile.'), ++ 'pool-new' : (' [vars]', ++ 'Adds a CPU pool to Xend CPU pool management'), ++ 'pool-start' : ('', 'Starts a Xend CPU pool'), ++ 'pool-list' : ('[] [-l|--long] [-c|--cpus]', 'List CPU pools on host'), ++ 'pool-destroy' : ('', 'Deactivates a CPU pool'), ++ 'pool-delete' : ('', ++ 'Removes a CPU pool from Xend management'), ++ 'pool-cpu-add' : (' ', 'Adds a CPU to a CPU pool'), ++ 'pool-cpu-remove': (' ', 'Removes a CPU from a CPU pool'), ++ 'pool-migrate' : (' ', ++ 'Moves a domain into a CPU pool'), ++ + # security + + 'addlabel' : ('