xen/cpu-pools-libxc.patch
Charles Arnold a9bb3bc2f5 -Fix bnc#466899 - numa enabled xen fails to start/create vms
adjust_vcpuaffinity_more_cpu.patch 

- Update to changeset 21022 Xen 4.0.0 RC6.

- bnc#586510 - cpupool fixes
  cpu-pools-update.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=35
2010-03-19 17:16:56 +00:00

361 lines
11 KiB
Diff

Index: xen-4.0.0-testing/tools/libxc/Makefile
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/Makefile
+++ xen-4.0.0-testing/tools/libxc/Makefile
@@ -8,6 +8,7 @@ CTRL_SRCS-y :=
CTRL_SRCS-y += xc_core.c
CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c
+CTRL_SRCS-y += xc_cpupool.c
CTRL_SRCS-y += xc_domain.c
CTRL_SRCS-y += xc_evtchn.c
CTRL_SRCS-y += xc_misc.c
Index: xen-4.0.0-testing/tools/libxc/xc_cpupool.c
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/libxc/xc_cpupool.c
@@ -0,0 +1,154 @@
+/******************************************************************************
+ * xc_cpupool.c
+ *
+ * API for manipulating and obtaining information on cpupools.
+ *
+ * Copyright (c) 2009, J Gross.
+ */
+
+#include <stdarg.h>
+#include "xc_private.h"
+
+int xc_cpupool_create(int xc_handle,
+ uint32_t *ppoolid,
+ uint32_t sched_id)
+{
+ int err;
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
+ domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+ XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
+ domctl.u.cpupool_op.sched_id = sched_id;
+ if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
+ return err;
+
+ *ppoolid = domctl.u.cpupool_op.cpupool_id;
+ return 0;
+}
+
+int xc_cpupool_destroy(int xc_handle,
+ uint32_t poolid)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_getinfo(int xc_handle,
+ uint32_t first_poolid,
+ uint32_t n_max,
+ xc_cpupoolinfo_t *info)
+{
+ int err = 0;
+ int p;
+ uint32_t poolid = first_poolid;
+ uint8_t local[sizeof (info->cpumap)];
+ DECLARE_DOMCTL;
+
+ memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+
+ for (p = 0; p < n_max; p++)
+ {
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ break;
+ }
+ err = do_domctl_save(xc_handle, &domctl);
+ unlock_pages(local, sizeof (local));
+
+ if ( err < 0 )
+ break;
+
+ info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
+ info->sched_id = domctl.u.cpupool_op.sched_id;
+ info->n_dom = domctl.u.cpupool_op.n_dom;
+ bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
+ poolid = domctl.u.cpupool_op.cpupool_id + 1;
+ info++;
+ }
+
+ if ( p == 0 )
+ return err;
+
+ return p;
+}
+
+int xc_cpupool_addcpu(int xc_handle,
+ uint32_t poolid,
+ int cpu)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_removecpu(int xc_handle,
+ uint32_t poolid,
+ int cpu)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_movedomain(int xc_handle,
+ uint32_t poolid,
+ uint32_t domid)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.domid = domid;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_freeinfo(int xc_handle,
+ uint64_t *cpumap)
+{
+ int err;
+ uint8_t local[sizeof (*cpumap)];
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ return err;
+ }
+
+ err = do_domctl_save(xc_handle, &domctl);
+ unlock_pages(local, sizeof (local));
+
+ if (err < 0)
+ return err;
+
+ bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
+
+ return 0;
+}
Index: xen-4.0.0-testing/tools/libxc/xc_domain.c
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xc_domain.c
+++ xen-4.0.0-testing/tools/libxc/xc_domain.c
@@ -6,6 +6,7 @@
* Copyright (c) 2003, K A Fraser.
*/
+#include <stdarg.h>
#include "xc_private.h"
#include "xg_save_restore.h"
#include <xen/memory.h>
@@ -15,15 +16,21 @@ int xc_domain_create(int xc_handle,
uint32_t ssidref,
xen_domain_handle_t handle,
uint32_t flags,
- uint32_t *pdomid)
+ uint32_t *pdomid, ...)
{
int err;
+ va_list ap;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_createdomain;
domctl.domain = (domid_t)*pdomid;
domctl.u.createdomain.ssidref = ssidref;
domctl.u.createdomain.flags = flags;
+ if ( flags & XEN_DOMCTL_CDF_pool ) {
+ va_start(ap, pdomid);
+ domctl.u.createdomain.cpupool = va_arg(ap, uint32_t);
+ va_end(ap);
+ }
memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
return err;
@@ -220,6 +227,7 @@ int xc_domain_getinfo(int xc_handle,
info->cpu_time = domctl.u.getdomaininfo.cpu_time;
info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
+ info->cpupool = domctl.u.getdomaininfo.cpupool;
memcpy(info->handle, domctl.u.getdomaininfo.handle,
sizeof(xen_domain_handle_t));
Index: xen-4.0.0-testing/tools/libxc/xc_private.h
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xc_private.h
+++ xen-4.0.0-testing/tools/libxc/xc_private.h
@@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl
return ret;
}
+static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl)
+{
+ int ret;
+
+ do
+ {
+ ret = do_domctl(xc_handle, domctl);
+ }
+ while ( (ret < 0 ) && (errno == EAGAIN) );
+
+ return ret;
+}
+
static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
{
int ret = -1;
Index: xen-4.0.0-testing/tools/libxc/xenctrl.h
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xenctrl.h
+++ xen-4.0.0-testing/tools/libxc/xenctrl.h
@@ -171,6 +171,7 @@ typedef struct xc_dominfo {
unsigned int nr_online_vcpus;
unsigned int max_vcpu_id;
xen_domain_handle_t handle;
+ unsigned int cpupool;
} xc_dominfo_t;
typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
@@ -207,7 +208,7 @@ int xc_domain_create(int xc_handle,
uint32_t ssidref,
xen_domain_handle_t handle,
uint32_t flags,
- uint32_t *pdomid);
+ uint32_t *pdomid, ...);
/* Functions to produce a dump of a given domain
@@ -502,6 +503,100 @@ int xc_domain_setdebugging(int xc_handle
unsigned int enable);
/*
+ * CPUPOOL MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_cpupoolinfo {
+ uint32_t cpupool_id;
+ uint32_t sched_id;
+ uint32_t n_dom;
+ uint64_t cpumap;
+} xc_cpupoolinfo_t;
+
+/**
+ * Create a new cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm ppoolid pointer to the new cpupool id (in/out)
+ * @parm sched_id id of scheduler to use for pool
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_create(int xc_handle,
+ uint32_t *ppoolid,
+ uint32_t sched_id);
+
+/**
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool to destroy
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_destroy(int xc_handle,
+ uint32_t poolid);
+
+/**
+ * Get cpupool info. Returns info for up to the specified number of cpupools
+ * starting at the given id.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm first_poolid lowest id for which info is returned
+ * @parm n_max maximum number of cpupools to return info
+ * @parm info pointer to xc_cpupoolinfo_t array
+ * return number of cpupool infos
+ */
+int xc_cpupool_getinfo(int xc_handle,
+ uint32_t first_poolid,
+ uint32_t n_max,
+ xc_cpupoolinfo_t *info);
+
+/**
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to add
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_addcpu(int xc_handle,
+ uint32_t poolid,
+ int cpu);
+
+/**
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to remove
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_removecpu(int xc_handle,
+ uint32_t poolid,
+ int cpu);
+
+/**
+ * Move domain to another cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the destination cpupool
+ * @parm domid id of the domain to move
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_movedomain(int xc_handle,
+ uint32_t poolid,
+ uint32_t domid);
+
+/**
+ * Return map of cpus not in any cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm cpumap pointer where to store the cpumap
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_freeinfo(int xc_handle,
+ uint64_t *cpumap);
+
+
+/*
* EVENT CHANNEL FUNCTIONS
*/