6891466418
xend-preserve-devs.patch - bnc#597770 - insserv reports a loop between xendomains and openais. Remove openais from Should-Start in xendomains script. OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=42
311 lines
9.0 KiB
Diff
311 lines
9.0 KiB
Diff
--- a/tools/libxc/Makefile
|
|
+++ b/tools/libxc/Makefile
|
|
@@ -8,6 +8,7 @@ CTRL_SRCS-y :=
|
|
CTRL_SRCS-y += xc_core.c
|
|
CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
|
|
CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c
|
|
+CTRL_SRCS-y += xc_cpupool.c
|
|
CTRL_SRCS-y += xc_domain.c
|
|
CTRL_SRCS-y += xc_evtchn.c
|
|
CTRL_SRCS-y += xc_misc.c
|
|
--- /dev/null
|
|
+++ b/tools/libxc/xc_cpupool.c
|
|
@@ -0,0 +1,154 @@
|
|
+/******************************************************************************
|
|
+ * xc_cpupool.c
|
|
+ *
|
|
+ * API for manipulating and obtaining information on cpupools.
|
|
+ *
|
|
+ * Copyright (c) 2009, J Gross.
|
|
+ */
|
|
+
|
|
+#include <stdarg.h>
|
|
+#include "xc_private.h"
|
|
+
|
|
+int xc_cpupool_create(int xc_handle,
|
|
+ uint32_t *ppoolid,
|
|
+ uint32_t sched_id)
|
|
+{
|
|
+ int err;
|
|
+ DECLARE_DOMCTL;
|
|
+
|
|
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
|
|
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
|
|
+ domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
|
|
+ XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
|
|
+ domctl.u.cpupool_op.sched_id = sched_id;
|
|
+ if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
|
|
+ return err;
|
|
+
|
|
+ *ppoolid = domctl.u.cpupool_op.cpupool_id;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int xc_cpupool_destroy(int xc_handle,
|
|
+ uint32_t poolid)
|
|
+{
|
|
+ DECLARE_DOMCTL;
|
|
+
|
|
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
|
|
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
|
|
+ domctl.u.cpupool_op.cpupool_id = poolid;
|
|
+ return do_domctl_save(xc_handle, &domctl);
|
|
+}
|
|
+
|
|
+int xc_cpupool_getinfo(int xc_handle,
|
|
+ uint32_t first_poolid,
|
|
+ uint32_t n_max,
|
|
+ xc_cpupoolinfo_t *info)
|
|
+{
|
|
+ int err = 0;
|
|
+ int p;
|
|
+ uint32_t poolid = first_poolid;
|
|
+ uint8_t local[sizeof (info->cpumap)];
|
|
+ DECLARE_DOMCTL;
|
|
+
|
|
+ memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
|
|
+
|
|
+ for (p = 0; p < n_max; p++)
|
|
+ {
|
|
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
|
|
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
|
|
+ domctl.u.cpupool_op.cpupool_id = poolid;
|
|
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
|
|
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
|
|
+
|
|
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
|
|
+ {
|
|
+ PERROR("Could not lock memory for Xen hypercall");
|
|
+ break;
|
|
+ }
|
|
+ err = do_domctl_save(xc_handle, &domctl);
|
|
+ unlock_pages(local, sizeof (local));
|
|
+
|
|
+ if ( err < 0 )
|
|
+ break;
|
|
+
|
|
+ info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
|
|
+ info->sched_id = domctl.u.cpupool_op.sched_id;
|
|
+ info->n_dom = domctl.u.cpupool_op.n_dom;
|
|
+ bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
|
|
+ poolid = domctl.u.cpupool_op.cpupool_id + 1;
|
|
+ info++;
|
|
+ }
|
|
+
|
|
+ if ( p == 0 )
|
|
+ return err;
|
|
+
|
|
+ return p;
|
|
+}
|
|
+
|
|
+int xc_cpupool_addcpu(int xc_handle,
|
|
+ uint32_t poolid,
|
|
+ int cpu)
|
|
+{
|
|
+ DECLARE_DOMCTL;
|
|
+
|
|
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
|
|
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
|
|
+ domctl.u.cpupool_op.cpupool_id = poolid;
|
|
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
|
|
+ return do_domctl_save(xc_handle, &domctl);
|
|
+}
|
|
+
|
|
+int xc_cpupool_removecpu(int xc_handle,
|
|
+ uint32_t poolid,
|
|
+ int cpu)
|
|
+{
|
|
+ DECLARE_DOMCTL;
|
|
+
|
|
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
|
|
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
|
|
+ domctl.u.cpupool_op.cpupool_id = poolid;
|
|
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
|
|
+ return do_domctl_save(xc_handle, &domctl);
|
|
+}
|
|
+
|
|
+int xc_cpupool_movedomain(int xc_handle,
|
|
+ uint32_t poolid,
|
|
+ uint32_t domid)
|
|
+{
|
|
+ DECLARE_DOMCTL;
|
|
+
|
|
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
|
|
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
|
|
+ domctl.u.cpupool_op.cpupool_id = poolid;
|
|
+ domctl.u.cpupool_op.domid = domid;
|
|
+ return do_domctl_save(xc_handle, &domctl);
|
|
+}
|
|
+
|
|
+int xc_cpupool_freeinfo(int xc_handle,
|
|
+ uint64_t *cpumap)
|
|
+{
|
|
+ int err;
|
|
+ uint8_t local[sizeof (*cpumap)];
|
|
+ DECLARE_DOMCTL;
|
|
+
|
|
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
|
|
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
|
|
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
|
|
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
|
|
+
|
|
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
|
|
+ {
|
|
+ PERROR("Could not lock memory for Xen hypercall");
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ err = do_domctl_save(xc_handle, &domctl);
|
|
+ unlock_pages(local, sizeof (local));
|
|
+
|
|
+ if (err < 0)
|
|
+ return err;
|
|
+
|
|
+ bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
--- a/tools/libxc/xc_domain.c
|
|
+++ b/tools/libxc/xc_domain.c
|
|
@@ -220,6 +220,7 @@ int xc_domain_getinfo(int xc_handle,
|
|
info->cpu_time = domctl.u.getdomaininfo.cpu_time;
|
|
info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
|
|
info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
|
|
+ info->cpupool = domctl.u.getdomaininfo.cpupool;
|
|
|
|
memcpy(info->handle, domctl.u.getdomaininfo.handle,
|
|
sizeof(xen_domain_handle_t));
|
|
--- a/tools/libxc/xc_private.h
|
|
+++ b/tools/libxc/xc_private.h
|
|
@@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl
|
|
return ret;
|
|
}
|
|
|
|
+static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ do
|
|
+ {
|
|
+ ret = do_domctl(xc_handle, domctl);
|
|
+ }
|
|
+ while ( (ret < 0 ) && (errno == EAGAIN) );
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
|
|
{
|
|
int ret = -1;
|
|
--- a/tools/libxc/xenctrl.h
|
|
+++ b/tools/libxc/xenctrl.h
|
|
@@ -171,6 +171,7 @@ typedef struct xc_dominfo {
|
|
unsigned int nr_online_vcpus;
|
|
unsigned int max_vcpu_id;
|
|
xen_domain_handle_t handle;
|
|
+ unsigned int cpupool;
|
|
} xc_dominfo_t;
|
|
|
|
typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
|
|
@@ -502,6 +503,100 @@ int xc_domain_setdebugging(int xc_handle
|
|
unsigned int enable);
|
|
|
|
/*
|
|
+ * CPUPOOL MANAGEMENT FUNCTIONS
|
|
+ */
|
|
+
|
|
+typedef struct xc_cpupoolinfo {
|
|
+ uint32_t cpupool_id;
|
|
+ uint32_t sched_id;
|
|
+ uint32_t n_dom;
|
|
+ uint64_t cpumap;
|
|
+} xc_cpupoolinfo_t;
|
|
+
|
|
+/**
|
|
+ * Create a new cpupool.
|
|
+ *
|
|
+ * @parm xc_handle a handle to an open hypervisor interface
|
|
+ * @parm ppoolid pointer to the new cpupool id (in/out)
|
|
+ * @parm sched_id id of scheduler to use for pool
|
|
+ * return 0 on success, -1 on failure
|
|
+ */
|
|
+int xc_cpupool_create(int xc_handle,
|
|
+ uint32_t *ppoolid,
|
|
+ uint32_t sched_id);
|
|
+
|
|
+/**
|
|
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
|
|
+ *
|
|
+ * @parm xc_handle a handle to an open hypervisor interface
|
|
+ * @parm poolid id of the cpupool to destroy
|
|
+ * return 0 on success, -1 on failure
|
|
+ */
|
|
+int xc_cpupool_destroy(int xc_handle,
|
|
+ uint32_t poolid);
|
|
+
|
|
+/**
|
|
+ * Get cpupool info. Returns info for up to the specified number of cpupools
|
|
+ * starting at the given id.
|
|
+ * @parm xc_handle a handle to an open hypervisor interface
|
|
+ * @parm first_poolid lowest id for which info is returned
|
|
+ * @parm n_max maximum number of cpupools to return info
|
|
+ * @parm info pointer to xc_cpupoolinfo_t array
|
|
+ * return number of cpupool infos
|
|
+ */
|
|
+int xc_cpupool_getinfo(int xc_handle,
|
|
+ uint32_t first_poolid,
|
|
+ uint32_t n_max,
|
|
+ xc_cpupoolinfo_t *info);
|
|
+
|
|
+/**
|
|
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
|
|
+ *
|
|
+ * @parm xc_handle a handle to an open hypervisor interface
|
|
+ * @parm poolid id of the cpupool
|
|
+ * @parm cpu cpu number to add
|
|
+ * return 0 on success, -1 on failure
|
|
+ */
|
|
+int xc_cpupool_addcpu(int xc_handle,
|
|
+ uint32_t poolid,
|
|
+ int cpu);
|
|
+
|
|
+/**
|
|
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
|
|
+ *
|
|
+ * @parm xc_handle a handle to an open hypervisor interface
|
|
+ * @parm poolid id of the cpupool
|
|
+ * @parm cpu cpu number to remove
|
|
+ * return 0 on success, -1 on failure
|
|
+ */
|
|
+int xc_cpupool_removecpu(int xc_handle,
|
|
+ uint32_t poolid,
|
|
+ int cpu);
|
|
+
|
|
+/**
|
|
+ * Move domain to another cpupool.
|
|
+ *
|
|
+ * @parm xc_handle a handle to an open hypervisor interface
|
|
+ * @parm poolid id of the destination cpupool
|
|
+ * @parm domid id of the domain to move
|
|
+ * return 0 on success, -1 on failure
|
|
+ */
|
|
+int xc_cpupool_movedomain(int xc_handle,
|
|
+ uint32_t poolid,
|
|
+ uint32_t domid);
|
|
+
|
|
+/**
|
|
+ * Return map of cpus not in any cpupool.
|
|
+ *
|
|
+ * @parm xc_handle a handle to an open hypervisor interface
|
|
+ * @parm cpumap pointer where to store the cpumap
|
|
+ * return 0 on success, -1 on failure
|
|
+ */
|
|
+int xc_cpupool_freeinfo(int xc_handle,
|
|
+ uint64_t *cpumap);
|
|
+
|
|
+
|
|
+/*
|
|
* EVENT CHANNEL FUNCTIONS
|
|
*/
|
|
|