SHA256
1
0
forked from pool/xen

- bnc#556939 - Improve device map cleanup code in domUloader

- bnc# 578910 - xm block-detach does not cleanup xenstore
  hotplug-cleanup-fix.patch  

- bnc#579361 - Windows Server 2003 cannot wake up from stand by in 
  sp1 
  hibernate.patch

- fate#308852: XEN CPU Pools
  cpupools-core.patch
  cpupools-core-fixup.patch
  keyhandler-alternative.patch
  cpu-pools-libxc.patch
  cpu-pools-python.patch
  cpu-pools-libxen.patch
  cpu-pools-xmtest.patch
  cpu-pools-docs.patch

- bnc#558760: Disable scsi devices when PV drivers are loaded. 

- Update to changeset 20951 Xen 4.0.0 RC4 for sle11-sp1 beta5. 

- bnc#572146 - SLES11 SP1 beta 2 Xen - BUG: soft lockup - CPU#31 
  stuck for 61s! [kstop/31:4512] 
  cpuidle-hint-v3.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=31
This commit is contained in:
Charles Arnold 2010-03-01 15:05:50 +00:00 committed by Git OBS Bridge
parent 6c7e8be7db
commit 514b8cf8ec
39 changed files with 11277 additions and 135 deletions

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -2883,7 +2883,7 @@ class XendDomainInfo:
@@ -2902,7 +2902,7 @@ class XendDomainInfo:
self.guest_bitsize = self.image.getBitSize()
# Make sure there's enough RAM available for the domain

View File

@ -1,6 +1,8 @@
--- a/tools/ioemu-remote/hw/xen_blktap.c
+++ b/tools/ioemu-remote/hw/xen_blktap.c
@@ -225,6 +225,7 @@ static int open_disk(struct td_state *s,
Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
@@ -227,6 +227,7 @@ static int open_disk(struct td_state *s,
BlockDriver* drv;
char* devname;
static int devnumber = 0;
@ -8,7 +10,7 @@
int i;
DPRINTF("Opening %s as blktap%d\n", path, devnumber);
@@ -247,7 +248,7 @@ static int open_disk(struct td_state *s,
@@ -249,7 +250,7 @@ static int open_disk(struct td_state *s,
DPRINTF("%s driver specified\n", drv ? drv->format_name : "No");
/* Open the image */
@ -17,9 +19,11 @@
fprintf(stderr, "Could not open image file %s\n", path);
return -ENOMEM;
}
--- a/tools/ioemu-remote/xenstore.c
+++ b/tools/ioemu-remote/xenstore.c
@@ -134,7 +134,8 @@ static void insert_media(void *opaque)
Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c
+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
@@ -136,7 +136,8 @@ static void insert_media(void *opaque)
else
format = &bdrv_raw;
@ -29,7 +33,7 @@
#ifdef CONFIG_STUBDOM
{
char *buf, *backend, *params_path, *params;
@@ -398,9 +399,9 @@ void xenstore_parse_domain_config(int hv
@@ -400,9 +401,9 @@ void xenstore_parse_domain_config(int hv
{
char **e_danger = NULL;
char *buf = NULL;
@ -41,7 +45,7 @@
unsigned int len, num, hd_index, pci_devid = 0;
BlockDriverState *bs;
BlockDriver *format;
@@ -462,7 +463,8 @@ void xenstore_parse_domain_config(int hv
@@ -464,7 +465,8 @@ void xenstore_parse_domain_config(int hv
}
for (i = 0; i < num; i++) {
@ -51,7 +55,7 @@
/* read the backend path */
xenstore_get_backend_path(&bpath, "vbd", danger_path, hvm_domid, e_danger[i]);
if (bpath == NULL)
@@ -548,6 +550,17 @@ void xenstore_parse_domain_config(int hv
@@ -550,6 +552,17 @@ void xenstore_parse_domain_config(int hv
format = &bdrv_raw;
}
@ -69,7 +73,7 @@
#if 0
/* Phantom VBDs are disabled because the use of paths
* from guest-controlled areas in xenstore is unsafe.
@@ -615,7 +628,7 @@ void xenstore_parse_domain_config(int hv
@@ -617,7 +630,7 @@ void xenstore_parse_domain_config(int hv
#ifdef CONFIG_STUBDOM
if (pasprintf(&danger_buf, "%s/device/vbd/%s", danger_path, e_danger[i]) == -1)
continue;
@ -78,12 +82,12 @@
pstrcpy(bs->filename, sizeof(bs->filename), params);
}
#else
@@ -644,7 +657,7 @@ void xenstore_parse_domain_config(int hv
@@ -646,7 +659,7 @@ void xenstore_parse_domain_config(int hv
}
}
pstrcpy(bs->filename, sizeof(bs->filename), params);
- if (bdrv_open2(bs, params, BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0)
+ if (bdrv_open2(bs, params, flags|BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0)
- if (bdrv_open2(bs, params, BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) {
+ if (bdrv_open2(bs, params, flags|BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) {
fprintf(stderr, "qemu: could not open vbd '%s' or hard disk image '%s' (drv '%s' format '%s')\n", buf, params, drv ? drv : "?", format ? format->format_name : "0");
}
} else {
char* snapshot = get_snapshot_name(atoi(e_danger[i]));

View File

@ -741,7 +741,7 @@ Index: xen-4.0.0-testing/tools/blktap/lib/blktaplib.h
===================================================================
--- xen-4.0.0-testing.orig/tools/blktap/lib/blktaplib.h
+++ xen-4.0.0-testing/tools/blktap/lib/blktaplib.h
@@ -219,6 +219,7 @@ typedef struct msg_pid {
@@ -220,6 +220,7 @@ typedef struct msg_pid {
#define DISK_TYPE_RAM 3
#define DISK_TYPE_QCOW 4
#define DISK_TYPE_QCOW2 5

View File

@ -1,9 +1,11 @@
bug #239173
bug #242953
--- a/tools/python/xen/xend/XendDomainInfo.py
+++ b/tools/python/xen/xend/XendDomainInfo.py
@@ -3282,7 +3282,7 @@ class XendDomainInfo:
Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -3286,7 +3286,7 @@ class XendDomainInfo:
(fn, BOOTLOADER_LOOPBACK_DEVICE))
vbd = {
@ -12,8 +14,10 @@ bug #242953
'device': BOOTLOADER_LOOPBACK_DEVICE,
}
--- a/tools/ioemu-remote/xenstore.c
+++ b/tools/ioemu-remote/xenstore.c
Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c
+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
@@ -397,9 +397,9 @@ void xenstore_parse_domain_config(int hv
{
char **e_danger = NULL;

1484
cpu-pools-docs.patch Normal file

File diff suppressed because it is too large Load Diff

360
cpu-pools-libxc.patch Normal file
View File

@ -0,0 +1,360 @@
Index: xen-4.0.0-testing/tools/libxc/Makefile
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/Makefile
+++ xen-4.0.0-testing/tools/libxc/Makefile
@@ -8,6 +8,7 @@ CTRL_SRCS-y :=
CTRL_SRCS-y += xc_core.c
CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c
+CTRL_SRCS-y += xc_cpupool.c
CTRL_SRCS-y += xc_domain.c
CTRL_SRCS-y += xc_evtchn.c
CTRL_SRCS-y += xc_misc.c
Index: xen-4.0.0-testing/tools/libxc/xc_cpupool.c
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/libxc/xc_cpupool.c
@@ -0,0 +1,154 @@
+/******************************************************************************
+ * xc_cpupool.c
+ *
+ * API for manipulating and obtaining information on cpupools.
+ *
+ * Copyright (c) 2009, J Gross.
+ */
+
+#include <stdarg.h>
+#include "xc_private.h"
+
+int xc_cpupool_create(int xc_handle,
+ uint32_t *ppoolid,
+ uint32_t sched_id)
+{
+ int err;
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
+ domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+ XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
+ domctl.u.cpupool_op.sched_id = sched_id;
+ if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
+ return err;
+
+ *ppoolid = domctl.u.cpupool_op.cpupool_id;
+ return 0;
+}
+
+int xc_cpupool_destroy(int xc_handle,
+ uint32_t poolid)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_getinfo(int xc_handle,
+ uint32_t first_poolid,
+ uint32_t n_max,
+ xc_cpupoolinfo_t *info)
+{
+ int err = 0;
+ int p;
+ uint32_t poolid = first_poolid;
+ uint8_t local[sizeof (info->cpumap)];
+ DECLARE_DOMCTL;
+
+ memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+
+ for (p = 0; p < n_max; p++)
+ {
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ break;
+ }
+ err = do_domctl_save(xc_handle, &domctl);
+ unlock_pages(local, sizeof (local));
+
+ if ( err < 0 )
+ break;
+
+ info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
+ info->sched_id = domctl.u.cpupool_op.sched_id;
+ info->n_dom = domctl.u.cpupool_op.n_dom;
+ bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
+ poolid = domctl.u.cpupool_op.cpupool_id + 1;
+ info++;
+ }
+
+ if ( p == 0 )
+ return err;
+
+ return p;
+}
+
+int xc_cpupool_addcpu(int xc_handle,
+ uint32_t poolid,
+ int cpu)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_removecpu(int xc_handle,
+ uint32_t poolid,
+ int cpu)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_movedomain(int xc_handle,
+ uint32_t poolid,
+ uint32_t domid)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.domid = domid;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_freeinfo(int xc_handle,
+ uint64_t *cpumap)
+{
+ int err;
+ uint8_t local[sizeof (*cpumap)];
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ return err;
+ }
+
+ err = do_domctl_save(xc_handle, &domctl);
+ unlock_pages(local, sizeof (local));
+
+ if (err < 0)
+ return err;
+
+ bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
+
+ return 0;
+}
Index: xen-4.0.0-testing/tools/libxc/xc_domain.c
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xc_domain.c
+++ xen-4.0.0-testing/tools/libxc/xc_domain.c
@@ -6,6 +6,7 @@
* Copyright (c) 2003, K A Fraser.
*/
+#include <stdarg.h>
#include "xc_private.h"
#include "xg_save_restore.h"
#include <xen/memory.h>
@@ -15,15 +16,21 @@ int xc_domain_create(int xc_handle,
uint32_t ssidref,
xen_domain_handle_t handle,
uint32_t flags,
- uint32_t *pdomid)
+ uint32_t *pdomid, ...)
{
int err;
+ va_list ap;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_createdomain;
domctl.domain = (domid_t)*pdomid;
domctl.u.createdomain.ssidref = ssidref;
domctl.u.createdomain.flags = flags;
+ if ( flags & XEN_DOMCTL_CDF_pool ) {
+ va_start(ap, pdomid);
+ domctl.u.createdomain.cpupool = va_arg(ap, uint32_t);
+ va_end(ap);
+ }
memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
return err;
@@ -206,6 +213,7 @@ int xc_domain_getinfo(int xc_handle,
info->cpu_time = domctl.u.getdomaininfo.cpu_time;
info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
+ info->cpupool = domctl.u.getdomaininfo.cpupool;
memcpy(info->handle, domctl.u.getdomaininfo.handle,
sizeof(xen_domain_handle_t));
Index: xen-4.0.0-testing/tools/libxc/xc_private.h
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xc_private.h
+++ xen-4.0.0-testing/tools/libxc/xc_private.h
@@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl
return ret;
}
+static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl)
+{
+ int ret;
+
+ do
+ {
+ ret = do_domctl(xc_handle, domctl);
+ }
+ while ( (ret < 0 ) && (errno == EAGAIN) );
+
+ return ret;
+}
+
static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
{
int ret = -1;
Index: xen-4.0.0-testing/tools/libxc/xenctrl.h
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xenctrl.h
+++ xen-4.0.0-testing/tools/libxc/xenctrl.h
@@ -171,6 +171,7 @@ typedef struct xc_dominfo {
unsigned int nr_online_vcpus;
unsigned int max_vcpu_id;
xen_domain_handle_t handle;
+ unsigned int cpupool;
} xc_dominfo_t;
typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
@@ -207,7 +208,7 @@ int xc_domain_create(int xc_handle,
uint32_t ssidref,
xen_domain_handle_t handle,
uint32_t flags,
- uint32_t *pdomid);
+ uint32_t *pdomid, ...);
/* Functions to produce a dump of a given domain
@@ -500,6 +501,100 @@ int xc_domain_setdebugging(int xc_handle
unsigned int enable);
/*
+ * CPUPOOL MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_cpupoolinfo {
+ uint32_t cpupool_id;
+ uint32_t sched_id;
+ uint32_t n_dom;
+ uint64_t cpumap;
+} xc_cpupoolinfo_t;
+
+/**
+ * Create a new cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm ppoolid pointer to the new cpupool id (in/out)
+ * @parm sched_id id of scheduler to use for pool
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_create(int xc_handle,
+ uint32_t *ppoolid,
+ uint32_t sched_id);
+
+/**
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool to destroy
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_destroy(int xc_handle,
+ uint32_t poolid);
+
+/**
+ * Get cpupool info. Returns info for up to the specified number of cpupools
+ * starting at the given id.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm first_poolid lowest id for which info is returned
+ * @parm n_max maximum number of cpupools to return info
+ * @parm info pointer to xc_cpupoolinfo_t array
+ * return number of cpupool infos
+ */
+int xc_cpupool_getinfo(int xc_handle,
+ uint32_t first_poolid,
+ uint32_t n_max,
+ xc_cpupoolinfo_t *info);
+
+/**
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to add
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_addcpu(int xc_handle,
+ uint32_t poolid,
+ int cpu);
+
+/**
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to remove
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_removecpu(int xc_handle,
+ uint32_t poolid,
+ int cpu);
+
+/**
+ * Move domain to another cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the destination cpupool
+ * @parm domid id of the domain to move
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_movedomain(int xc_handle,
+ uint32_t poolid,
+ uint32_t domid);
+
+/**
+ * Return map of cpus not in any cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm cpumap pointer where to store the cpumap
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_freeinfo(int xc_handle,
+ uint64_t *cpumap);
+
+
+/*
* EVENT CHANNEL FUNCTIONS
*/

2180
cpu-pools-libxen.patch Normal file

File diff suppressed because it is too large Load Diff

2543
cpu-pools-python.patch Normal file

File diff suppressed because it is too large Load Diff

838
cpu-pools-xmtest.patch Normal file
View File

@ -0,0 +1,838 @@
Index: xen-4.0.0-testing/tools/xm-test/configure.ac
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/configure.ac
+++ xen-4.0.0-testing/tools/xm-test/configure.ac
@@ -161,6 +161,7 @@ AC_CONFIG_FILES([
tests/vtpm/Makefile
tests/xapi/Makefile
tests/enforce_dom0_cpus/Makefile
+ tests/cpupool/Makefile
lib/XmTestReport/xmtest.py
lib/XmTestLib/config.py
])
Index: xen-4.0.0-testing/tools/xm-test/grouptest/cpupool
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/grouptest/cpupool
@@ -0,0 +1 @@
+cpupool
Index: xen-4.0.0-testing/tools/xm-test/lib/XmTestLib/NetConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/lib/XmTestLib/NetConfig.py
+++ xen-4.0.0-testing/tools/xm-test/lib/XmTestLib/NetConfig.py
@@ -56,17 +56,21 @@ def getXendNetConfig():
val = pin.get_val()
while val[0] != 'network-script':
val = pin.get_val()
-
- # split network command into script name and its parameters
- sub_val = val[1].split()
- if sub_val[0] == "network-bridge":
+
+ if val[0] != 'network-script' or len(val) < 2:
+ # entry network-script not found or no type specified
netenv = "bridge"
- elif sub_val[0] == "network-route":
- netenv = "route"
- elif sub_val[0] == "network-nat":
- netenv = "nat"
else:
- raise NetworkError("Failed to get network env from xend config")
+ # split network command into script name and its parameters
+ sub_val = val[1].split()
+ if sub_val[0] == "network-bridge":
+ netenv = "bridge"
+ elif sub_val[0] == "network-route":
+ netenv = "route"
+ elif sub_val[0] == "network-nat":
+ netenv = "nat"
+ else:
+ raise NetworkError("Failed to get network env from xend config")
configfile.close()
return netenv
Index: xen-4.0.0-testing/tools/xm-test/lib/XmTestLib/XenDomain.py
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/lib/XmTestLib/XenDomain.py
+++ xen-4.0.0-testing/tools/xm-test/lib/XmTestLib/XenDomain.py
@@ -181,6 +181,7 @@ class XenDomain:
if not self.isManaged:
ret, output = traceCommand("xm create %s" % self.config)
+ print self.config
else:
ret, output = traceCommand("xm new %s" % self.config)
if ret != 0:
Index: xen-4.0.0-testing/tools/xm-test/runtest.sh
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/runtest.sh
+++ xen-4.0.0-testing/tools/xm-test/runtest.sh
@@ -91,7 +91,7 @@ runnable_tests() {
echo "Error: ramdisk/initrd.img is from an old version, or is not for this "
echo "architecture ($ARCH)."
echo "You need to build a ramdisk from at least ${XM_TEST_MAJ}.${XM_TEST_MIN}"
- exit 1
+ #exit 1
fi
# See if xend is running
Index: xen-4.0.0-testing/tools/xm-test/tests/Makefile.am
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/tests/Makefile.am
+++ xen-4.0.0-testing/tools/xm-test/tests/Makefile.am
@@ -28,7 +28,8 @@ SUBDIRS = \
vcpu-pin \
vtpm \
enforce_dom0_cpus \
- save restore migrate
+ save restore migrate \
+ cpupool
EXTRA_DIST = $(SUBDIRS) Makefile.am.template
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/01_cpupool_basic_pos.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/01_cpupool_basic_pos.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+
+import sys
+import re
+import time
+
+from XmTestLib import *
+
+
+#
+# Check output of xm info. It must include field 'free_cpus'
+# The value must be between 0 - nr_cpus
+#
+free_cpus = getInfo("free_cpus")
+if free_cpus == "":
+ FAIL("Missing 'free_cpus' entry in xm info output")
+if int(free_cpus) not in range(int(getInfo("nr_cpus")) + 1):
+ FAIL("Wrong value of 'free_cpus' (%s)" % int(free_cpus))
+
+
+#
+# Check output of xm list -l. It must contain the key 'pool_name'
+# If XM_USES_API is set, output must also contain 'cpu_pool'.
+#
+status, output = traceCommand("xm list -l Domain-0")
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if not re.search("pool_name Pool-0", output):
+ FAIL("Missing or wrong attribute 'pool_name' in output of 'xm list -l'")
+if os.getenv("XM_USES_API"):
+ if not re.search("cpu_pool (.+)", output):
+ FAIL("Missing or wrong attribute 'cpu_pool' in output of 'xm list -l'")
+
+#
+# Test pool selection option of xm list.
+#
+status, output = traceCommand("xm list --pool=Pool-0")
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if not re.search("Domain-0 +0 +", output):
+ FAIL("Missing 'Domain-0' in Pool-0")
+
+status, output = traceCommand("xm list --pool=Dummy-Pool")
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if len(output.splitlines()) != 1:
+ FAIL("Wrong pool selection; output must be empty")
+
+
+#
+# Create a Domain without pool specification.
+# Default pool is Pool-0
+#
+name = "TestDomPool-1"
+domain = XmTestDomain(name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+if not isDomainRunning(name):
+ FAIL("Couldn't start domain without pool specification")
+
+status, output = traceCommand("xm list -l %s" % name)
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if not re.search("pool_name Pool-0", output):
+ FAIL("Missing or wrong attribute 'pool_name' in output of 'xm list -l %s'" % name)
+
+destroyAllDomUs()
+
+
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/02_cpupool_manage_pos.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/02_cpupool_manage_pos.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+
+# Description:
+# Verify commands pool-new and pool-delete.
+#
+import sys
+import re
+import time
+
+from XmTestLib import *
+from pools import *
+
+checkRequirements()
+
+#
+# Check output of xm pool-list (of Pool-0)
+#
+status, output = traceCommand("xm pool-list Pool-0")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+lines = output.splitlines()
+if len(lines) != 2:
+ FAIL("Wrong output of xm pool-list Pool-0 (%s)" % lines)
+if not re.search("Pool-0 +[0-9]+ +credit +y +[0-9]", lines[1]):
+ FAIL("Wrong output of xm pool-list Pool-0 (%s)" % lines)
+
+#
+# Check output of xm pool-list -l (of Pool-0)
+#
+status, output = traceCommand("xm pool-list Pool-0 -l")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if not re.search("name_label Pool-0", output):
+ FAIL("Wrong output of xm pool-list Pool-0 -l; missing 'name_label'")
+if not re.search("started_VMs 00000000-0000-0000-0000-000000000000", output):
+ FAIL("Wrong output of xm pool-list Pool-0 -l; missing 'started_VMs'")
+if not re.search("started_VM_names Domain-0", output):
+ FAIL("Wrong output of xm pool-list Pool-0 -l; missing 'started_VMi_names'")
+
+
+#
+# Create a pool from pool1.cfg
+#
+cmd = "xm pool-new pool1.cfg name=Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+status, output = traceCommand("xm pool-list")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if not re.search("Pool-1 +1 +credit", output):
+ FAIL("Missing or wrong pool definition for 'Pool-1'")
+
+
+#
+# check persistence of pool; restart xend
+#
+restartXend()
+
+status, output = traceCommand("xm pool-list")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if not re.search("Pool-1 +1 +credit", output):
+ FAIL("Missing or wrong pool definition for 'Pool-1'")
+
+
+#
+# Delete pool
+#
+deletePool("Pool-1")
+status, output = traceCommand("xm pool-list")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if re.search("Pool-1 +1 +credit", output):
+ FAIL("'Pool-1' not deleted")
+
+
+#
+# create / start / check / destroy / delete a managed pool
+#
+cmd = "xm pool-new pool1.cfg"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-start Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+restartXend()
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+destroyPool("Pool-1")
+deletePool("Pool-1")
+
+cmd = "xm pool-list Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if re.search("Pool-1 +1 +credit", output):
+ FAIL("'Pool-1' not deleted")
+
+
+#
+# create / check / destroy a unmanaged pool
+#
+cmd = "xm pool-create pool1.cfg"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+restartXend()
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+destroyPool("Pool-1", True)
+
+cmd = "xm pool-list"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if re.search("Pool-1", output):
+ FAIL("'Pool-1' not deleted")
+
+
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/03_cpupool_domain.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/03_cpupool_domain.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+
+import sys
+import re
+import time
+
+from XmTestLib import *
+from pools import *
+
+
+checkRequirements()
+
+#
+# create Pool-1 with 1 CPU and start a VM
+#
+createStdPool()
+name = "TestDomPool-1"
+domain = XmTestDomain(extraConfig={'pool' : 'Pool-1'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+cmd = "xm list --pool=Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search(name, output):
+ FAIL("%s; missing '%s' in Pool-1" % (cmd,name))
+
+domain.stop()
+waitForDomain(name)
+destroyPool("Pool-1", True)
+
+
+
+#
+# create Pool-1 with 1 CPU, add a second CPU
+# start a VM (with vpcu=3) add a third CPU
+# remove 2 CPUs from pool
+# create Pool-1 with 1 CPU and start a VM
+#
+pool_names = ['Pool-1', 'Pool-2']
+createStdPool({'name' : pool_names[0], 'cpus' : '"1"'})
+name = "TestDomPool-1"
+cmd = "xm pool-cpu-add Pool-1 2"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+domain = XmTestDomain(extraConfig={ 'pool' : 'Pool-1'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+cmd = "xm pool-cpu-add Pool-1 3"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-cpu-remove Pool-1 2"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+cmd = "xm pool-cpu-remove Pool-1 3"
+
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+
+createStdPool({'name' : pool_names[1]})
+name2 = "TestDomPool-2"
+domain2 = XmTestDomain(extraConfig={ 'pool' : 'Pool-2'}, name=name2)
+try:
+ domain2.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+domain2.stop()
+domain.stop()
+
+waitForDomain(name)
+waitForDomain(name2)
+
+for pool in pool_names:
+ destroyPool(pool, True)
+
+
+
+#
+# Create 2 pools with 1 cpu per pool.
+# Create three domains in each pool, with 1,2,3 VCPUs
+# Switch a thrid cpu between the pools.
+#
+pool_names = ['Pool-1', 'Pool-2']
+domains = {}
+cpu=3
+
+for pool in pool_names:
+ createStdPool({'name' : pool})
+ for dom_nr in range(3):
+ name = "TestDom%s-%s" % (pool, dom_nr)
+ domains[name] = XmTestDomain(extraConfig={'pool' : pool},
+ name=name)
+ try:
+ domains[name].start(noConsole=True)
+ except DomainError, ex:
+ FAIL(str(ex))
+
+cmd_add_1 = "xm pool-cpu-add Pool-1 %s" % cpu
+cmd_rem_1 = "xm pool-cpu-remove Pool-1 %s" % cpu
+cmd_add_2 = "xm pool-cpu-add Pool-2 %s" % cpu
+cmd_rem_2 = "xm pool-cpu-remove Pool-2 %s" % cpu
+
+for i in range(25):
+ traceCommand(cmd_add_1)
+ traceCommand(cmd_rem_1)
+ traceCommand(cmd_add_2)
+ traceCommand(cmd_rem_2)
+
+destroyAllDomUs()
+for pool in pool_names:
+ destroyPool(pool, True)
+
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/04_cpupool_migrate.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/04_cpupool_migrate.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+
+import sys
+import re
+import time
+
+from XmTestLib import *
+from pools import *
+
+
+
+#
+# Check requirements of test case
+# - min 2 free cpus (not assigned to a pool)
+#
+if int(getInfo("free_cpus")) < 2:
+ SKIP("Need at least 2 free cpus")
+
+
+
+#
+# Create 2 pools with one cpu per pool.
+#
+createStdPool({'name' : 'Pool-1'})
+createStdPool({'name' : 'Pool-2'})
+
+
+
+#
+# Create a domain with vcpus=1 in Pool-0.
+# Migrate it to one of the created pools afterwards to the other pool
+#
+name = "TestDomPool-1"
+domain = XmTestDomain(extraConfig={'pool' : 'Pool-0'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+if not domInPool(name, 'Pool-0'):
+ FAIL("missing '%s' in Pool-0" % name)
+
+if not migrateToPool(name, 'Pool-1'):
+ FAIL("missing '%s' in Pool-1" % name)
+if not migrateToPool(name, 'Pool-2'):
+ FAIL("missing '%s' in Pool-2" % name)
+
+
+
+#
+# Create a domain in Pool-0.
+# Migrate it to one of the created pools afterwards to the other pool
+#
+name = "TestDomPool-2"
+domain = XmTestDomain(extraConfig={'pool' : 'Pool-0'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+if not domInPool(name, 'Pool-0'):
+ FAIL("missing '%s' in Pool-0" % name)
+
+if not migrateToPool(name, 'Pool-1'):
+ FAIL("missing '%s' in Pool-1" % name)
+if not migrateToPool(name, 'Pool-2'):
+ FAIL("missing '%s' in Pool-2" % name)
+
+
+
+#
+# Migrate other domains between pools
+#
+for cnt in range(10):
+ for pool in ['Pool-0', 'Pool-1', 'Pool-2']:
+ for domain in getRunningDomains():
+ if domain != 'Domain-0':
+ if not migrateToPool(domain, pool):
+ FAIL("missing '%s' in %s" % (domain, pool))
+
+
+#
+# Cleanup
+#
+cleanupPoolsDomains()
+
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/Makefile.am
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/Makefile.am
@@ -0,0 +1,22 @@
+SUBDIRS =
+
+TESTS = 01_cpupool_basic_pos.test \
+ 02_cpupool_manage_pos.test \
+ 03_cpupool_domain.test \
+ 04_cpupool_migrate.test
+
+EXTRA_DIST = $(TESTS)
+
+TESTS_ENVIRONMENT=@TENV@
+
+%.test: %.py
+ cp $< $@
+ chmod +x $@
+
+clean-local: am_config_clean-local
+
+am_config_clean-local:
+ rm -f *test
+ rm -f *log
+ rm -f *~
+
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/pool1.cfg
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/pool1.cfg
@@ -0,0 +1 @@
+name="Pool-1"
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/pools.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/pools.py
@@ -0,0 +1,78 @@
+#!/usr/bin/python
+
+
+from XmTestLib import *
+
+def checkRequirements():
+ # - min 4 cpus
+ # - only Pool-0 defined
+ nr_cpus = int(getInfo("nr_cpus"))
+ if nr_cpus < 4:
+ SKIP("Need at least 4 cpus for pool tests")
+ if len(getPoolList()) > 1:
+ SKIP("More than one pool already defined")
+
+ # reduce Pool-0 to CPU-0
+ traceCommand("xm pool-cpu-add Pool-0 0")
+ for i in range(1, nr_cpus):
+ traceCommand("xm pool-cpu-remove Pool-0 %s" % i)
+
+def createStdPool(add_param=None):
+ cmd = "xm pool-create pool1.cfg "
+ if add_param:
+ for k,v in add_param.items():
+ cmd += "%s=%s " % (k,v)
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+
+def deletePool(name):
+ cmd = "xm pool-delete %s" % name
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+
+def destroyPool(name, delete_on_xenapi=False):
+ cmd = "xm pool-destroy %s" % name
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ if os.getenv("XM_USES_API") and delete_on_xenapi:
+ deletePool(name)
+
+def getPoolList():
+ status, output = traceCommand("xm pool-list")
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ lines = output.splitlines()
+ pools = []
+ for l in lines[1:]:
+ elms = l.split(" ", 1)
+ pools.append(elms[0]);
+ return pools
+
+def domInPool(dom, pool):
+ cmd = "xm list --pool=%s" % pool
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ return re.search(dom, output) != None
+
+def migrateToPool(dom, pool):
+ status, output = traceCommand("xm pool-migrate %s %s" % (dom, pool))
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ return domInPool(dom, pool)
+
+def cleanupPoolsDomains():
+ destroyAllDomUs()
+ for pool in getPoolList():
+ if pool != 'Pool-0':
+ destroyPool(pool, True)
+
+def waitForDomain(name):
+ for i in range(10):
+ if not isDomainRunning(name):
+ break
+ time.sleep(1)
+
Index: xen-4.0.0-testing/tools/xm-test/tests/xapi/20_xapi-cpu_pool_basic.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/xapi/20_xapi-cpu_pool_basic.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+
+
+# Basic Pool creation tests
+
+from XmTestLib import xapi
+from XmTestLib import *
+
+
+session = xapi.connect()
+host_ref = session.xenapi.host.get_all()[0]
+pools = session.xenapi.host.get_resident_cpu_pools(host_ref)
+if len(pools) != 1:
+ SKIP("Only Pool-0 have to be created for this test")
+
+
+# check extension of host object
+host_recs = session.xenapi.host.get_all_records()
+host_rec = host_recs[host_recs.keys()[0]]
+if len(host_recs.keys()) != 1 or not host_rec.has_key("resident_cpu_pools") or \
+ len(host_rec["resident_cpu_pools"]) != 1:
+ FAIL("Missing or wrong field 'resident_cpu_pools' in host record")
+
+
+# check extension of host_cpu object
+host_cpu_recs = session.xenapi.host_cpu.get_all_records()
+assigned_cpus = [ cpu for cpu in host_cpu_recs.values() if len(cpu["cpu_pool"]) > 0 ]
+unassigned_cpus = session.xenapi.host_cpu.get_unassigned_cpus()
+if len(host_cpu_recs) - len(assigned_cpus) != len(unassigned_cpus):
+ FAIL("Wrong host_cpu count values; CPUS total: %d, CPUS ass: %d, CPUS unass: %d" % (
+ len(host_cpu_recs), len(assigned_cpus), len(unassigned_cpus)))
+
+for cpu_rec in host_cpu_recs.values():
+ cpu_pool = session.xenapi.host_cpu.get_cpu_pool(cpu_rec['uuid'])
+ if cpu_pool != cpu_rec['cpu_pool']:
+ FAIL("Inconsistency of cpu_pool ref between host_cpu record (%s) "
+ "and get_cpu_pool (%s)" % (cpu_rec['cpu_pool'], cpu_pool))
+
+
+# create / modify / remove managed cpu pools
+pool1_cfg = { 'name_label' : 'Pool-1',
+ 'name_description' : 'new pool',
+ 'auto_power_on' : False,
+ 'ncpu' : '3',
+ 'sched_policy' : 'credit',
+ 'proposed_CPUs' : ['1','2'],
+ 'other_config' : { 'xmtest' : True },
+ }
+pool1 = session.xenapi.cpu_pool.create(pool1_cfg)
+pool1_rec = session.xenapi.cpu_pool.get_record(pool1)
+for k in pool1_cfg.keys():
+ if pool1_rec[k] != pool1_cfg[k]:
+ FAIL("Create error Pool-1 (create config %s, current config: %s, key: %s)" % (
+ pool1_cfg, pool1_rec, k))
+
+pool_all = session.xenapi.cpu_pool.get_all()
+if len(pool_all) != 2:
+ FAIL("cpu_pool.get_all() returns '%d', expected '2'" % len(pool_all))
+
+pool_all = session.xenapi.cpu_pool.get_all_records()
+if len(pool_all) != 2:
+ FAIL("cpu_pool.get_all_records() returns '%d', expected '2'" % len(pool_all))
+
+if pool1 != session.xenapi.cpu_pool.get_by_name_label(pool1_cfg['name_label'])[0]:
+ FAIL("cpu_pool.get_by_name_label() returns wrong value")
+
+if pool1 != session.xenapi.cpu_pool.get_by_uuid(pool1):
+ FAIL("cpu_pool.get_by_uuid() returns wrong value")
+
+if session.xenapi.cpu_pool.get_activated(pool1):
+ FAIL("cpu_pool.get_activated() returns 'true' instead of 'false'")
+
+if pool1_cfg['auto_power_on'] != session.xenapi.cpu_pool.get_auto_power_on(pool1):
+ FAIL("cpu_pool.get_auto_power_on() returns wrong value")
+
+if len(session.xenapi.cpu_pool.get_host_CPUs(pool1)) != 0:
+ FAIL("cpu_pool.get_host_CPUs has to return an empty list")
+
+if pool1_cfg['name_label'] != session.xenapi.cpu_pool.get_name_label(pool1):
+ FAIL("cpu_pool.get_name_label() returns wrong value")
+
+if pool1_cfg['name_description'] != session.xenapi.cpu_pool.get_name_description(pool1):
+ FAIL("cpu_pool.get_name_description() returns wrong value")
+
+if pool1_cfg['ncpu'] != session.xenapi.cpu_pool.get_ncpu(pool1):
+ FAIL("cpu_pool.get_ncpu() returns wrong value")
+
+cfg_len = len(pool1_cfg['proposed_CPUs'])
+api_len = len(session.xenapi.cpu_pool.get_proposed_CPUs(pool1))
+if cfg_len != api_len:
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value; cfg_cnt: %s, api_cnt:%s" % (cfg_len, api_len))
+
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if pool1_cfg['other_config']['xmtest'] != other_config.get('xmtest'):
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+if session.xenapi.cpu_pool.get_resident_on(pool1) != session.xenapi.host.get_all()[0]:
+ FAIL("cpu_pool.get_resident_on() returns wrong value")
+
+if pool1_cfg['sched_policy'] != session.xenapi.cpu_pool.get_sched_policy(pool1):
+ FAIL("cpu_pool.get_sched_policy() returns wrong value")
+
+if len(session.xenapi.cpu_pool.get_started_VMs(pool1)) != 0:
+ FAIL("cpu_pool.get_started_VMs() returns wrong value")
+
+if pool1 != session.xenapi.cpu_pool.get_uuid(pool1):
+ FAIL("cpu_pool.get_uuid() returns wrong value")
+
+session.xenapi.cpu_pool.set_auto_power_on(pool1, True)
+if not session.xenapi.cpu_pool.get_auto_power_on(pool1):
+ FAIL("cpu_pool.get_auto_power_on() returns wrong value")
+
+session.xenapi.cpu_pool.set_proposed_CPUs(pool1, [4])
+if '4' not in session.xenapi.cpu_pool.get_proposed_CPUs(pool1):
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value; (set_proposed_CPUs)")
+
+session.xenapi.cpu_pool.add_to_proposed_CPUs(pool1, 5)
+val = session.xenapi.cpu_pool.get_proposed_CPUs(pool1)
+if '5' not in val:
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value; %s not in %s" % ('5',val))
+
+session.xenapi.cpu_pool.remove_from_proposed_CPUs(pool1, 5)
+val = session.xenapi.cpu_pool.get_proposed_CPUs(pool1)
+if '5' in val:
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value; %s in %s" % ('5',val))
+
+session.xenapi.cpu_pool.set_name_label(pool1, 'New-Pool-1')
+if 'New-Pool-1' != session.xenapi.cpu_pool.get_name_label(pool1):
+ FAIL("cpu_pool.get_name_label() returns wrong value")
+
+session.xenapi.cpu_pool.set_ncpu(pool1, 4)
+if '4' != session.xenapi.cpu_pool.get_ncpu(pool1):
+ FAIL("cpu_pool.get_ncpu() returns wrong value")
+
+session.xenapi.cpu_pool.set_other_config(pool1, {'test' : 'ok'})
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if other_config.get('test') != 'ok':
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+session.xenapi.cpu_pool.add_to_other_config(pool1, 'new_entry', 'added')
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if other_config.get('new_entry') != 'added':
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+session.xenapi.cpu_pool.remove_from_other_config(pool1, 'new_entry')
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if other_config.get('new_entry') != None:
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+session.xenapi.cpu_pool.set_sched_policy(pool1, 'credit')
+if 'credit' != session.xenapi.cpu_pool.get_sched_policy(pool1):
+ FAIL("cpu_pool.get_sched_policy() returns wrong value")
+
+session.xenapi.cpu_pool.destroy(pool1)
+if pool1 in session.xenapi.cpu_pool.get_all():
+ FAIL("cpu_pool.destroy() has not removed pool")
+
Index: xen-4.0.0-testing/tools/xm-test/tests/xapi/Makefile.am
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/tests/xapi/Makefile.am
+++ xen-4.0.0-testing/tools/xm-test/tests/xapi/Makefile.am
@@ -3,7 +3,8 @@ SUBDIRS =
TESTS = 01_xapi-vm_basic.test \
02_xapi-vbd_basic.test \
03_xapi-network_pos.test \
- 04_xapi-data_uri_handling.test
+ 04_xapi-data_uri_handling.test \
+ 20_xapi-cpu_pool_basic.test
XFAIL_TESTS =

127
cpupools-core-fixup.patch Normal file
View File

@ -0,0 +1,127 @@
- fix tasklet_schedule_cpu() when invoked from the tasklet's handler
- properly balance (un-)pausing in continue_hypercall_on_cpu() code
paths
- bump domctl interface version (due to the addition of the "cpupool"
member to struct xen_domctl_getdomaininfo)
- move and rename csched_priv to make sure eventual backported
upstream patches using the variable get correctly adjusted (i.e.
missing adjustments get detected at build time)
Index: xen-4.0.0-testing/xen/arch/x86/domain.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/domain.c
+++ xen-4.0.0-testing/xen/arch/x86/domain.c
@@ -1573,6 +1573,7 @@ int continue_hypercall_on_cpu(int cpu, v
v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
v->arch.continue_info = info;
+ vcpu_pause_nosync(v);
}
else
{
@@ -1583,7 +1584,6 @@ int continue_hypercall_on_cpu(int cpu, v
info->func = func;
info->data = data;
- vcpu_pause_nosync(v);
tasklet_schedule_cpu(&info->tasklet, cpu);
raise_softirq(SCHEDULE_SOFTIRQ);
Index: xen-4.0.0-testing/xen/common/sched_credit.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/sched_credit.c
+++ xen-4.0.0-testing/xen/common/sched_credit.c
@@ -176,7 +176,6 @@ struct csched_private {
/*
* Global variables
*/
-static struct csched_private csched_priv;
static struct csched_private *csched_priv0 = NULL;
static void csched_tick(void *_cpu);
@@ -1524,11 +1523,13 @@ static void csched_tick_resume(struct sc
}
}
+static struct csched_private _csched_priv;
+
struct scheduler sched_credit_def = {
.name = "SMP Credit Scheduler",
.opt_name = "credit",
.sched_id = XEN_SCHEDULER_CREDIT,
- .sched_data = &csched_priv,
+ .sched_data = &_csched_priv,
.init_domain = csched_dom_init,
.destroy_domain = csched_dom_destroy,
Index: xen-4.0.0-testing/xen/common/softirq.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/softirq.c
+++ xen-4.0.0-testing/xen/common/softirq.c
@@ -104,12 +104,15 @@ static void tasklet_schedule_list(struct
{
BUG_ON(!list_empty(&t->list));
list_add_tail(&t->list, tlist);
+ t->scheduled_on = NR_CPUS;
}
t->is_scheduled = 1;
if ( cpu == smp_processor_id() )
raise_softirq(TASKLET_SOFTIRQ);
- else
+ else if ( !t->is_running )
cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ else
+ t->scheduled_on = cpu;
}
spin_unlock_irqrestore(&tasklet_lock, flags);
@@ -156,7 +159,15 @@ static void tasklet_action(void)
if ( t->is_scheduled )
{
BUG_ON(t->is_dead || !list_empty(&t->list));
- list_add_tail(&t->list, tlist);
+ if ( t->scheduled_on >= NR_CPUS )
+ list_add_tail(&t->list, tlist);
+ else
+ {
+ unsigned int cpu = t->scheduled_on;
+
+ list_add_tail(&t->list, &per_cpu(tasklet_list_pcpu, cpu));
+ cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ }
}
/*
Index: xen-4.0.0-testing/xen/include/public/domctl.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/public/domctl.h
+++ xen-4.0.0-testing/xen/include/public/domctl.h
@@ -35,7 +35,7 @@
#include "xen.h"
#include "grant_table.h"
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000006
+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000007
struct xenctl_cpumap {
XEN_GUEST_HANDLE_64(uint8) bitmap;
Index: xen-4.0.0-testing/xen/include/xen/softirq.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/xen/softirq.h
+++ xen-4.0.0-testing/xen/include/xen/softirq.h
@@ -50,12 +50,14 @@ struct tasklet
bool_t is_scheduled;
bool_t is_running;
bool_t is_dead;
+ unsigned int scheduled_on;
void (*func)(unsigned long);
unsigned long data;
};
#define DECLARE_TASKLET(name, func, data) \
- struct tasklet name = { LIST_HEAD_INIT(name.list), 0, 0, 0, func, data }
+ struct tasklet name = { LIST_HEAD_INIT(name.list), 0, 0, 0, NR_CPUS, \
+ func, data }
void tasklet_schedule(struct tasklet *t);
void tasklet_schedule_cpu(struct tasklet *t, int cpu);

3267
cpupools-core.patch Normal file

File diff suppressed because it is too large Load Diff

View File

@ -235,6 +235,13 @@ class Wholedisk:
os.system("lvchange -ay '%s' > /dev/null 2>&1" % (self.vdev))
self.mapped += 1
def partitionsdeactivated(self):
"Return True if partition mappings have been removed, False otherwise"
for part in self.partitions:
if os.access(part.pdev, os.F_OK):
return False
return True
def deactivatepartitions(self):
"""Remove device-mapper mappings and loop mapping.
@ -246,9 +253,14 @@ class Wholedisk:
self.mapped -= 1
if not self.mapped:
if self.pcount:
verbose_print("kpartx %s -d '%s'" % (kpartx_args, self.physdev()))
fd = os.popen("kpartx %s -d '%s'" % (kpartx_args, self.physdev()))
fd.close()
retries = 10
while retries and not self.partitionsdeactivated():
verbose_print("kpartx %s -d '%s'" % (kpartx_args, self.physdev()))
os.system("kpartx %s -d '%s'" % (kpartx_args, self.physdev()))
time.sleep(0.1)
retries -= 1
if retries == 0:
error("unable to remove partition mappings with kpartx -d")
if self.pcount and self.lvm:
verbose_print("lvchange -an '%s'" % (self.vdev))
ret = os.system("lvchange -an '%s' > /dev/null 2>&1" % (self.vdev)) >> 8

View File

@ -1,5 +1,7 @@
--- 2010-01-06.orig/xen/arch/ia64/linux-xen/smp.c 2009-05-27 13:54:05.000000000 +0200
+++ 2010-01-06/xen/arch/ia64/linux-xen/smp.c 2010-01-06 11:22:12.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/ia64/linux-xen/smp.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/ia64/linux-xen/smp.c
+++ xen-4.0.0-testing/xen/arch/ia64/linux-xen/smp.c
@@ -189,7 +189,7 @@ handle_IPI (int irq, void *dev_id, struc
* At this point the structure may be gone unless
* wait is true.
@ -9,8 +11,10 @@
/* Notify the sending CPU that the task is done. */
mb();
--- 2010-01-06.orig/xen/arch/x86/smp.c 2009-08-19 17:01:49.000000000 +0200
+++ 2010-01-06/xen/arch/x86/smp.c 2010-01-06 11:22:12.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/smp.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/smp.c
+++ xen-4.0.0-testing/xen/arch/x86/smp.c
@@ -394,7 +394,7 @@ static void __smp_call_function_interrup
if ( call_data.wait )
@ -29,9 +33,11 @@
}
irq_exit();
--- 2010-01-06.orig/xen/common/keyhandler.c 2009-12-16 09:14:13.000000000 +0100
+++ 2010-01-06/xen/common/keyhandler.c 2010-01-06 11:22:12.000000000 +0100
@@ -72,14 +72,25 @@ static struct keyhandler show_handlers_k
Index: xen-4.0.0-testing/xen/common/keyhandler.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/keyhandler.c
+++ xen-4.0.0-testing/xen/common/keyhandler.c
@@ -71,14 +71,25 @@ static struct keyhandler show_handlers_k
.desc = "show this message"
};
@ -61,7 +67,7 @@
}
static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
@@ -93,14 +104,12 @@ static void dump_registers(unsigned char
@@ -92,14 +103,12 @@ static void dump_registers(unsigned char
printk("'%c' pressed -> dumping registers\n", key);
/* Get local execution state out immediately, in case we get stuck. */
@ -77,8 +83,10 @@
on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1);
}
--- 2010-01-06.orig/xen/include/asm-ia64/linux-xen/asm/ptrace.h 2009-05-20 08:46:00.000000000 +0200
+++ 2010-01-06/xen/include/asm-ia64/linux-xen/asm/ptrace.h 2010-01-06 11:22:12.000000000 +0100
Index: xen-4.0.0-testing/xen/include/asm-ia64/linux-xen/asm/ptrace.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/asm-ia64/linux-xen/asm/ptrace.h
+++ xen-4.0.0-testing/xen/include/asm-ia64/linux-xen/asm/ptrace.h
@@ -278,7 +278,7 @@ struct switch_stack {
# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)

28
hibernate.patch Normal file
View File

@ -0,0 +1,28 @@
Index: xen-4.0.0-testing/tools/firmware/hvmloader/acpi/dsdt.asl
===================================================================
--- xen-4.0.0-testing.orig/tools/firmware/hvmloader/acpi/dsdt.asl
+++ xen-4.0.0-testing/tools/firmware/hvmloader/acpi/dsdt.asl
@@ -30,21 +30,9 @@ DefinitionBlock ("DSDT.aml", "DSDT", 2,
/*
* S3 (suspend-to-ram), S4 (suspend-to-disc) and S5 (power-off) type codes:
* must match piix4 emulation.
+ * Turn off support for s3 and s4 sleep states to deal with SVVP tests.
+ * This is what MSFT does on HyperV.
*/
- Name (\_S3, Package (0x04)
- {
- 0x05, /* PM1a_CNT.SLP_TYP */
- 0x05, /* PM1b_CNT.SLP_TYP */
- 0x0, /* reserved */
- 0x0 /* reserved */
- })
- Name (\_S4, Package (0x04)
- {
- 0x06, /* PM1a_CNT.SLP_TYP */
- 0x06, /* PM1b_CNT.SLP_TYP */
- 0x00, /* reserved */
- 0x00 /* reserved */
- })
Name (\_S5, Package (0x04)
{
0x07, /* PM1a_CNT.SLP_TYP */

View File

@ -2,14 +2,14 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c
+++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
@@ -914,16 +914,16 @@ static PyObject *pyxc_hvm_build(XcObject
@@ -917,16 +917,16 @@ static PyObject *pyxc_hvm_build(XcObject
#endif
int i;
char *image;
- int memsize, target=-1, vcpus = 1, acpi = 0, apic = 1;
+ int memsize, target=-1, vcpus = 1, acpi = 0, apic = 1, extid = 0;
PyObject *vcpu_avail_handle = NULL;
uint8_t vcpu_avail[HVM_MAX_VCPUS/8];
uint8_t vcpu_avail[(HVM_MAX_VCPUS + 7)/8];
static char *kwd_list[] = { "domid",
- "memsize", "image", "target", "vcpus",
@ -24,7 +24,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
return NULL;
memset(vcpu_avail, 0, sizeof(vcpu_avail));
@@ -975,6 +975,7 @@ static PyObject *pyxc_hvm_build(XcObject
@@ -978,6 +978,7 @@ static PyObject *pyxc_hvm_build(XcObject
va_hvm->checksum -= sum;
munmap(va_map, XC_PAGE_SIZE);
#endif
@ -36,7 +36,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
@@ -150,6 +150,7 @@ XENAPI_PLATFORM_CFG_TYPES = {
@@ -151,6 +151,7 @@ XENAPI_PLATFORM_CFG_TYPES = {
'nographic': int,
'nomigrate': int,
'pae' : int,
@ -87,7 +87,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
gopts.var('acpi', val='ACPI',
fn=set_int, default=1,
use="Disable or enable ACPI of HVM domain.")
@@ -1077,7 +1081,7 @@ def configure_hvm(config_image, vals):
@@ -1081,7 +1085,7 @@ def configure_hvm(config_image, vals):
'timer_mode',
'usb', 'usbdevice',
'vcpus', 'vnc', 'vncconsole', 'vncdisplay', 'vnclisten',

View File

@ -151,14 +151,14 @@ Index: xen-4.0.0-testing/xen/include/public/arch-x86/hvm/save.h
+ uint32_t long_mode;
+ uint32_t ext_id;
+};
+DECLARE_HVM_SAVE_TYPE(HYPERV_DOM, 15, struct hvm_hyperv_dom);
+DECLARE_HVM_SAVE_TYPE(HYPERV_DOM, 16, struct hvm_hyperv_dom);
+
+struct hvm_hyperv_cpu {
+ uint64_t control_msr;
+ uint64_t version_msr;
+ uint64_t pad[27]; //KYS: sles10 sp2 compatibility
+};
+DECLARE_HVM_SAVE_TYPE(HYPERV_CPU, 16, struct hvm_hyperv_cpu);
+DECLARE_HVM_SAVE_TYPE(HYPERV_CPU, 17, struct hvm_hyperv_cpu);
+
/*
* Largest type-code in use

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
@@ -247,8 +247,11 @@ static int open_disk(struct td_state *s,
@@ -249,8 +249,11 @@ static int open_disk(struct td_state *s,
drv = blktap_drivers[i].drv;
DPRINTF("%s driver specified\n", drv ? drv->format_name : "No");

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
@@ -360,6 +360,15 @@ static void qemu_send_responses(void* op
@@ -362,6 +362,15 @@ static void qemu_send_responses(void* op
}
/**
@ -18,7 +18,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
* Callback function for the IO message pipe. Reads requests from the ring
* and processes them (call qemu read/write functions).
*
@@ -378,6 +387,7 @@ static void handle_blktap_iomsg(void* pr
@@ -380,6 +389,7 @@ static void handle_blktap_iomsg(void* pr
blkif_t *blkif = s->blkif;
tapdev_info_t *info = s->ring_info;
int page_size = getpagesize();
@ -26,7 +26,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
struct aiocb_info *aiocb_info;
@@ -410,7 +420,7 @@ static void handle_blktap_iomsg(void* pr
@@ -412,7 +422,7 @@ static void handle_blktap_iomsg(void* pr
/* Don't allow writes on readonly devices */
if ((s->flags & TD_RDONLY) &&
@ -35,7 +35,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
blkif->pending_list[idx].status = BLKIF_RSP_ERROR;
goto send_response;
}
@@ -431,7 +441,7 @@ static void handle_blktap_iomsg(void* pr
@@ -433,7 +443,7 @@ static void handle_blktap_iomsg(void* pr
DPRINTF("Sector request failed:\n");
DPRINTF("%s request, idx [%d,%d] size [%llu], "
"sector [%llu,%llu]\n",
@ -44,7 +44,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
"WRITE" : "READ"),
idx,i,
(long long unsigned)
@@ -444,8 +454,14 @@ static void handle_blktap_iomsg(void* pr
@@ -446,8 +456,14 @@ static void handle_blktap_iomsg(void* pr
blkif->pending_list[idx].secs_pending += nsects;
@ -60,7 +60,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
case BLKIF_OP_WRITE:
aiocb_info = malloc(sizeof(*aiocb_info));
@@ -465,6 +481,10 @@ static void handle_blktap_iomsg(void* pr
@@ -467,6 +483,10 @@ static void handle_blktap_iomsg(void* pr
DPRINTF("ERROR: bdrv_write() == NULL\n");
goto send_response;
}

View File

@ -19,7 +19,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
@@ -218,9 +218,10 @@ static int map_new_dev(struct td_state *
@@ -220,9 +220,10 @@ static int map_new_dev(struct td_state *
return -1;
}
@ -31,7 +31,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
char* devname;
static int devnumber = 0;
int i;
@@ -230,7 +231,22 @@ static int open_disk(struct td_state *s,
@@ -232,7 +233,22 @@ static int open_disk(struct td_state *s,
bs = bdrv_new(devname);
free(devname);
@ -55,7 +55,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
fprintf(stderr, "Could not open image file %s\n", path);
return -ENOMEM;
}
@@ -521,7 +537,7 @@ static void handle_blktap_ctrlmsg(void*
@@ -527,7 +543,7 @@ static void handle_blktap_ctrlmsg(void*
s = state_init();
/*Open file*/

View File

@ -16,7 +16,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
@@ -256,6 +256,12 @@ static int open_disk(struct td_state *s,
@@ -258,6 +258,12 @@ static int open_disk(struct td_state *s,
s->size = bs->total_sectors;
s->sector_size = 512;

80
ioemu-disable-scsi.patch Normal file
View File

@ -0,0 +1,80 @@
Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_platform.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_platform.c 2010-02-18 14:08:08.000000000 -0700
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_platform.c 2010-02-18 14:28:59.000000000 -0700
@@ -359,6 +359,8 @@ static void platform_ioport_write(void *
case 4:
fprintf(logfile, "Disconnect IDE hard disk...\n");
ide_unplug_harddisks();
+ fprintf(logfile, "Disconnect SCSI hard disk...\n");
+ pci_unplug_scsi();
fprintf(logfile, "Disconnect netifs...\n");
pci_unplug_netifs();
fprintf(logfile, "Shutdown taps...\n");
Index: xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/qemu-xen.h 2010-02-18 14:08:08.000000000 -0700
+++ xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h 2010-02-18 14:44:28.000000000 -0700
@@ -57,6 +57,7 @@ void unset_vram_mapping(void *opaque);
#endif
void pci_unplug_netifs(void);
+void pci_unplug_scsi(void);
void destroy_hvm_domain(void);
void unregister_iomem(target_phys_addr_t start);
Index: xen-4.0.0-testing/tools/ioemu-remote/hw/pci.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/pci.c 2010-02-18 14:49:36.000000000 -0700
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/pci.c 2010-02-18 14:53:47.000000000 -0700
@@ -892,6 +892,50 @@ void pci_unplug_netifs(void)
}
}
+void pci_unplug_scsi(void)
+{
+ PCIBus *bus;
+ PCIDevice *dev;
+ PCIIORegion *region;
+ int x;
+ int i;
+
+ /* We only support one PCI bus */
+ for (bus = first_bus; bus; bus = NULL) {
+ for (x = 0; x < 256; x++) {
+ dev = bus->devices[x];
+ if (dev &&
+ dev->config[0xa] == 0 &&
+ dev->config[0xb] == 1
+#ifdef CONFIG_PASSTHROUGH
+ && test_pci_devfn(x) != 1
+#endif
+ ) {
+ /* Found a scsi disk. Remove it from the bus. Note that
+ we don't free it here, since there could still be
+ references to it floating around. There are only
+ ever one or two structures leaked, and it's not
+ worth finding them all. */
+ bus->devices[x] = NULL;
+ for (i = 0; i < PCI_NUM_REGIONS; i++) {
+ region = &dev->io_regions[i];
+ if (region->addr == (uint32_t)-1 ||
+ region->size == 0)
+ continue;
+ fprintf(logfile, "region type %d at [%x,%x).\n",
+ region->type, region->addr,
+ region->addr+region->size);
+ if (region->type == PCI_ADDRESS_SPACE_IO) {
+ isa_unassign_ioport(region->addr, region->size);
+ } else if (region->type == PCI_ADDRESS_SPACE_MEM) {
+ unregister_iomem(region->addr);
+ }
+ }
+ }
+ }
+ }
+}
+
typedef struct {
PCIDevice dev;
PCIBus *bus;

View File

@ -0,0 +1,86 @@
Index: xen-4.0.0-testing/xen/common/keyhandler.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/keyhandler.c
+++ xen-4.0.0-testing/xen/common/keyhandler.c
@@ -19,6 +19,7 @@
static struct keyhandler *key_table[256];
static unsigned char keypress_key;
+static bool_t alt_key_handling;
char keyhandler_scratch[1024];
@@ -115,6 +116,26 @@ static struct keyhandler dump_registers_
.desc = "dump registers"
};
+static DECLARE_TASKLET(dump_dom0_tasklet, NULL, 0);
+
+static void dump_dom0_action(unsigned long arg)
+{
+ struct vcpu *v = (void *)arg;
+
+ for ( ; ; ) {
+ vcpu_show_execution_state(v);
+ v = v->next_in_list;
+ if ( !v )
+ break;
+ if ( softirq_pending(smp_processor_id()) )
+ {
+ dump_dom0_tasklet.data = (unsigned long)v;
+ tasklet_schedule_cpu(&dump_dom0_tasklet, v->processor);
+ break;
+ }
+ }
+}
+
static void dump_dom0_registers(unsigned char key)
{
struct vcpu *v;
@@ -125,7 +146,17 @@ static void dump_dom0_registers(unsigned
printk("'%c' pressed -> dumping Dom0's registers\n", key);
for_each_vcpu ( dom0, v )
+ {
+ if ( alt_key_handling && softirq_pending(smp_processor_id()) )
+ {
+ tasklet_kill(&dump_dom0_tasklet);
+ tasklet_init(&dump_dom0_tasklet, dump_dom0_action,
+ (unsigned long)v);
+ tasklet_schedule_cpu(&dump_dom0_tasklet, v->processor);
+ return;
+ }
vcpu_show_execution_state(v);
+ }
}
static struct keyhandler dump_dom0_registers_keyhandler = {
@@ -425,8 +456,28 @@ static struct keyhandler do_debug_key_ke
.desc = "trap to xendbg"
};
+static void do_toggle_alt_key(unsigned char key, struct cpu_user_regs *regs)
+{
+ alt_key_handling = !alt_key_handling;
+ printk("'%c' pressed -> using %s key handling\n", key,
+ alt_key_handling ? "alternative" : "normal");
+}
+
+static struct keyhandler toggle_alt_keyhandler = {
+ .irq_callback = 1,
+ .u.irq_fn = do_toggle_alt_key,
+ .desc = "toggle alternative key handling"
+};
+
void __init initialize_keytable(void)
{
+ if ( num_present_cpus() > 16 )
+ {
+ alt_key_handling = 1;
+ printk(XENLOG_INFO "Defaulting to alternative key handling; "
+ "send 'A' to switch to normal mode.\n");
+ }
+ register_keyhandler('A', &toggle_alt_keyhandler);
register_keyhandler('d', &dump_registers_keyhandler);
register_keyhandler('h', &show_handlers_keyhandler);
register_keyhandler('q', &dump_domains_keyhandler);

View File

@ -55,7 +55,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/mm/shadow/common.c
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
}
@@ -3100,7 +3109,7 @@ int shadow_enable(struct domain *d, u32
@@ -3102,7 +3111,7 @@ int shadow_enable(struct domain *d, u32
{
unsigned int r;
shadow_lock(d);

View File

@ -1,6 +1,8 @@
--- a/tools/ioemu-remote/xenstore.c
+++ b/tools/ioemu-remote/xenstore.c
@@ -868,6 +868,18 @@ static void xenstore_process_dm_command_
Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c
+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
@@ -912,6 +912,18 @@ static void xenstore_process_dm_command_
}
snapshot_name = xs_read(xsh, XBT_NULL, path, &len);
@ -19,8 +21,10 @@
} else if (!strncmp(command, "continue", len)) {
fprintf(logfile, "dm-command: continue after state save\n");
xen_pause_requested = 0;
--- a/tools/ioemu-remote/savevm.c
+++ b/tools/ioemu-remote/savevm.c
Index: xen-4.0.0-testing/tools/ioemu-remote/savevm.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/savevm.c
+++ xen-4.0.0-testing/tools/ioemu-remote/savevm.c
@@ -1096,6 +1096,35 @@ the_end:
return ret;
}
@ -57,8 +61,10 @@
#ifndef CONFIG_DM
void do_savevm(const char *name)
--- a/tools/ioemu-remote/qemu-xen.h
+++ b/tools/ioemu-remote/qemu-xen.h
Index: xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/qemu-xen.h
+++ xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h
@@ -42,6 +42,7 @@ enum {
/* xen-vl-extra.c */

View File

@ -40,7 +40,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
char buf[MSG_SIZE];
+#ifndef QEMU_TOOL
+ char *snapshot;
+ char *snapshot;
+#endif
+
length = read(read_fd, buf, MSG_SIZE);
@ -91,7 +91,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c
+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
@@ -102,6 +102,8 @@ int xenstore_watch_new_callback(const ch
@@ -103,6 +103,8 @@ int xenstore_watch_new_callback(const ch
}
@ -100,7 +100,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
static int pasprintf(char **buf, const char *fmt, ...)
{
va_list ap;
@@ -641,8 +643,33 @@ void xenstore_parse_domain_config(int hv
@@ -644,8 +646,33 @@ void xenstore_parse_domain_config(int hv
}
}
pstrcpy(bs->filename, sizeof(bs->filename), params);
@ -135,7 +135,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
}
#endif
@@ -776,6 +803,23 @@ int xenstore_parse_disable_pf_config ()
@@ -779,6 +806,23 @@ int xenstore_parse_disable_pf_config ()
return disable_pf;
}

View File

@ -1,5 +1,7 @@
--- a/tools/ioemu-remote/savevm.c
+++ b/tools/ioemu-remote/savevm.c
Index: xen-4.0.0-testing/tools/ioemu-remote/savevm.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/savevm.c
+++ xen-4.0.0-testing/tools/ioemu-remote/savevm.c
@@ -28,6 +28,7 @@
#include "sysemu.h"
#include "qemu-timer.h"
@ -95,8 +97,10 @@
#ifndef CONFIG_DM
void do_savevm(const char *name)
--- a/tools/ioemu-remote/i386-dm/helper2.c
+++ b/tools/ioemu-remote/i386-dm/helper2.c
Index: xen-4.0.0-testing/tools/ioemu-remote/i386-dm/helper2.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/i386-dm/helper2.c
+++ xen-4.0.0-testing/tools/ioemu-remote/i386-dm/helper2.c
@@ -109,6 +109,9 @@ int send_vcpu = 0;
//the evtchn port for polling the notification,
evtchn_port_t *ioreq_local_port;
@ -163,8 +167,10 @@
/* Wait to be allowed to continue */
while (xen_pause_requested) {
--- a/tools/ioemu-remote/qemu-xen.h
+++ b/tools/ioemu-remote/qemu-xen.h
Index: xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/qemu-xen.h
+++ xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h
@@ -34,6 +34,15 @@ void qemu_invalidate_map_cache(void)
#define mapcache_lock() ((void)0)
#define mapcache_unlock() ((void)0)
@ -189,8 +195,10 @@
void xenstore_check_new_media_present(int timeout);
void xenstore_write_vncport(int vnc_display);
void xenstore_read_vncpasswd(int domid, char *pwbuf, size_t pwbuflen);
--- a/tools/ioemu-remote/xenstore.c
+++ b/tools/ioemu-remote/xenstore.c
Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c
+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
@@ -17,6 +17,7 @@
#include "exec-all.h"

View File

@ -325,10 +325,10 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
@@ -233,6 +233,7 @@ XENAPI_CFG_TYPES = {
's3_integrity' : int,
@@ -235,6 +235,7 @@ XENAPI_CFG_TYPES = {
'superpages' : int,
'memory_sharing': int,
'pool_name' : str,
+ 'snapshotname': str,
}
@ -537,7 +537,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
@@ -122,6 +122,14 @@ SUBCOMMAND_HELP = {
@@ -123,6 +123,14 @@ SUBCOMMAND_HELP = {
'Restore a domain from a saved state.'),
'save' : ('[-c|-f] <Domain> <CheckpointFile>',
'Save a domain state to restore later.'),
@ -552,7 +552,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
'shutdown' : ('<Domain> [-waRH]', 'Shutdown a domain.'),
'top' : ('', 'Monitor a host and the domains in real time.'),
'unpause' : ('<Domain>', 'Unpause a paused domain.'),
@@ -316,6 +324,9 @@ SUBCOMMAND_OPTIONS = {
@@ -335,6 +343,9 @@ SUBCOMMAND_OPTIONS = {
('-c', '--checkpoint', 'Leave domain running after creating snapshot'),
('-f', '--force', 'Force to overwrite exist file'),
),
@ -562,7 +562,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
'restore': (
('-p', '--paused', 'Do not unpause domain after restoring it'),
),
@@ -362,6 +373,10 @@ common_commands = [
@@ -385,6 +396,10 @@ common_commands = [
"restore",
"resume",
"save",
@ -573,7 +573,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
"shell",
"shutdown",
"start",
@@ -395,6 +410,10 @@ domain_commands = [
@@ -418,6 +433,10 @@ domain_commands = [
"restore",
"resume",
"save",
@ -584,7 +584,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
"shutdown",
"start",
"suspend",
@@ -815,6 +834,62 @@ def xm_event_monitor(args):
@@ -850,6 +869,62 @@ def xm_event_monitor(args):
#
#########################################################################
@ -647,7 +647,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
def xm_save(args):
arg_check(args, "save", 2, 4)
@@ -3467,6 +3542,10 @@ commands = {
@@ -3694,6 +3769,10 @@ commands = {
"restore": xm_restore,
"resume": xm_resume,
"save": xm_save,

View File

@ -27,8 +27,8 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
+int connected_disks = 0;
fd_list_entry_t *fd_start = NULL;
static void handle_blktap_iomsg(void* private);
@@ -541,6 +542,7 @@ static void handle_blktap_ctrlmsg(void*
extern char* get_snapshot_name(int devid);
@@ -547,6 +548,7 @@ static void handle_blktap_ctrlmsg(void*
/* Allocate the disk structs */
s = state_init();
@ -36,7 +36,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
/*Open file*/
if (s == NULL || open_disk(s, path, msg->drivertype, msg->readonly)) {
@@ -591,7 +593,8 @@ static void handle_blktap_ctrlmsg(void*
@@ -629,7 +631,8 @@ static void handle_blktap_ctrlmsg(void*
case CTLMSG_CLOSE:
s = get_state(msg->cookie);
if (s) unmap_disk(s);

View File

@ -1,7 +1,9 @@
--- 2010-01-06.orig/xen/arch/x86/platform_hypercall.c 2009-12-14 08:34:19.000000000 +0100
+++ 2010-01-06/xen/arch/x86/platform_hypercall.c 2010-01-06 11:19:05.000000000 +0100
@@ -21,7 +21,7 @@
#include <xen/acpi.h>
Index: xen-4.0.0-testing/xen/arch/x86/platform_hypercall.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/platform_hypercall.c
+++ xen-4.0.0-testing/xen/arch/x86/platform_hypercall.c
@@ -22,7 +22,7 @@
#include <xen/sched-if.h>
#include <asm/current.h>
#include <public/platform.h>
-#include <acpi/cpufreq/processor_perf.h>
@ -9,7 +11,7 @@
#include <asm/edd.h>
#include <asm/mtrr.h>
#include "cpu/mtrr/mtrr.h"
@@ -62,6 +62,7 @@ static long cpu_down_helper(void *data)
@@ -63,6 +63,7 @@ static long cpu_down_helper(void *hdl, v
ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
{
ret_t ret = 0;
@ -17,7 +19,7 @@
struct xen_platform_op curop, *op = &curop;
if ( !IS_PRIV(current->domain) )
@@ -474,6 +475,24 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
@@ -475,6 +476,24 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
op->u.mem_add.epfn,
op->u.mem_add.pxm);
break;
@ -42,8 +44,10 @@
default:
ret = -ENOSYS;
break;
--- 2010-01-06.orig/xen/include/public/platform.h 2009-12-14 08:34:19.000000000 +0100
+++ 2010-01-06/xen/include/public/platform.h 2010-01-06 11:22:06.000000000 +0100
Index: xen-4.0.0-testing/xen/include/public/platform.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/public/platform.h
+++ xen-4.0.0-testing/xen/include/public/platform.h
@@ -355,6 +355,14 @@ struct xenpf_mem_hotadd
uint32_t flags;
};

View File

@ -1,5 +1,7 @@
--- 2010-01-06.orig/xen/arch/x86/mm.c 2010-01-05 13:29:13.000000000 +0100
+++ 2010-01-06/xen/arch/x86/mm.c 2010-01-06 11:22:26.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/mm.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/mm.c
+++ xen-4.0.0-testing/xen/arch/x86/mm.c
@@ -154,6 +154,8 @@ unsigned long __read_mostly pdx_group_va
int opt_allow_hugepage;
boolean_param("allowhugepage", opt_allow_hugepage);
@ -9,9 +11,11 @@
#define l1_disallow_mask(d) \
((d != dom_io) && \
(rangeset_is_empty((d)->iomem_caps) && \
--- 2010-01-06.orig/xen/arch/x86/traps.c 2009-12-17 12:20:22.000000000 +0100
+++ 2010-01-06/xen/arch/x86/traps.c 2010-01-06 11:22:26.000000000 +0100
@@ -1349,6 +1349,7 @@ asmlinkage void do_early_page_fault(stru
Index: xen-4.0.0-testing/xen/arch/x86/traps.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/traps.c
+++ xen-4.0.0-testing/xen/arch/x86/traps.c
@@ -1352,6 +1352,7 @@ asmlinkage void do_early_page_fault(stru
unsigned long *stk = (unsigned long *)regs;
printk("Early fatal page fault at %04x:%p (cr2=%p, ec=%04x)\n",
regs->cs, _p(regs->eip), _p(cr2), regs->error_code);
@ -19,8 +23,10 @@
printk("Stack dump: ");
while ( ((long)stk & ((PAGE_SIZE - 1) & ~(BYTES_PER_LONG - 1))) != 0 )
printk("%p ", _p(*stk++));
--- 2010-01-06.orig/xen/arch/x86/x86_32/mm.c 2009-10-29 12:24:48.000000000 +0100
+++ 2010-01-06/xen/arch/x86/x86_32/mm.c 2010-01-06 11:22:26.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/x86_32/mm.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/x86_32/mm.c
+++ xen-4.0.0-testing/xen/arch/x86/x86_32/mm.c
@@ -122,6 +122,8 @@ void __init paging_init(void)
#undef CNT
#undef MFN
@ -30,8 +36,10 @@
/* Create page tables for ioremap()/map_domain_page_global(). */
for ( i = 0; i < (IOREMAP_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
{
--- 2010-01-06.orig/xen/arch/x86/x86_32/traps.c 2009-12-02 10:02:49.000000000 +0100
+++ 2010-01-06/xen/arch/x86/x86_32/traps.c 2010-01-06 11:22:26.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/x86_32/traps.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/x86_32/traps.c
+++ xen-4.0.0-testing/xen/arch/x86/x86_32/traps.c
@@ -161,7 +161,8 @@ void show_page_walk(unsigned long addr)
l3t += (cr3 & 0xFE0UL) >> 3;
l3e = l3t[l3_table_offset(addr)];
@ -62,8 +70,10 @@
printk(" L1[0x%03lx] = %"PRIpte" %08lx\n",
l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
unmap_domain_page(l1t);
--- 2010-01-06.orig/xen/arch/x86/x86_64/mm.c 2010-01-05 13:29:13.000000000 +0100
+++ 2010-01-06/xen/arch/x86/x86_64/mm.c 2010-01-06 11:22:26.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/x86_64/mm.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/x86_64/mm.c
+++ xen-4.0.0-testing/xen/arch/x86/x86_64/mm.c
@@ -725,6 +725,8 @@ void __init paging_init(void)
#undef CNT
#undef MFN
@ -73,8 +83,10 @@
/* Create user-accessible L2 directory to map the MPT for compat guests. */
BUILD_BUG_ON(l4_table_offset(RDWR_MPT_VIRT_START) !=
l4_table_offset(HIRO_COMPAT_MPT_VIRT_START));
--- 2010-01-06.orig/xen/arch/x86/x86_64/traps.c 2009-12-02 10:02:49.000000000 +0100
+++ 2010-01-06/xen/arch/x86/x86_64/traps.c 2010-01-06 11:22:26.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/x86_64/traps.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/x86_64/traps.c
+++ xen-4.0.0-testing/xen/arch/x86/x86_64/traps.c
@@ -176,7 +176,8 @@ void show_page_walk(unsigned long addr)
l4t = mfn_to_virt(mfn);
l4e = l4t[l4_table_offset(addr)];
@ -115,8 +127,10 @@
printk(" L1[0x%03lx] = %"PRIpte" %016lx\n",
l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
}
--- 2010-01-06.orig/xen/include/asm-x86/mm.h 2009-12-17 12:20:22.000000000 +0100
+++ 2010-01-06/xen/include/asm-x86/mm.h 2010-01-06 11:23:39.000000000 +0100
Index: xen-4.0.0-testing/xen/include/asm-x86/mm.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/asm-x86/mm.h
+++ xen-4.0.0-testing/xen/include/asm-x86/mm.h
@@ -443,6 +443,8 @@ TYPE_SAFE(unsigned long,mfn);
#define SHARED_M2P_ENTRY (~0UL - 1UL)
#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY)

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fadb3f78dfaf163464c6fcfed57a1f76a6d7cc2f65771bc9886800afdbb528bb
size 23224505
oid sha256:88c2cad04e93a909e405bee6f4c3dff2c6b12ea2485b6e7f1db4813cb74f2f38
size 23155997

View File

@ -9,8 +9,8 @@ Index: xen-4.0.0-testing/Config.mk
-CONFIG_QEMU ?= $(QEMU_REMOTE)
+CONFIG_QEMU ?= ioemu-remote
QEMU_TAG ?= 575ed1016f6fba1c6a6cd32a828cb468bdee96bb
# Mon Feb 1 16:33:52 2010 +0000
QEMU_TAG ?= e5d14857cd67490bf956d97c8888c0be95ed3f78
# Thu Feb 18 15:36:29 2010 +0000
@@ -163,9 +163,9 @@ CONFIG_OCAML_XENSTORED ?= n
# Optional components
XENSTAT_XENTOP ?= y

View File

@ -62,7 +62,7 @@ Index: xen-4.0.0-testing/tools/python/Makefile
+ --prefix="/usr" --root="$(DESTDIR)" --force
install-dtd: all
$(INSTALL_DIR) $(DESTDIR)$(SHAREDIR)
$(INSTALL_DIR) $(DESTDIR)$(SHAREDIR)/xen
Index: xen-4.0.0-testing/tools/xenstore/Makefile
===================================================================
--- xen-4.0.0-testing.orig/tools/xenstore/Makefile

View File

@ -123,7 +123,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
import xen.util.xsm.xsm as security
from xen.util import xsconstants
from xen.util import mkdir
@@ -2337,6 +2337,10 @@ class XendDomainInfo:
@@ -2342,6 +2342,10 @@ class XendDomainInfo:
deviceClass, config = self.info['devices'].get(dev_uuid)
self._waitForDevice(deviceClass, config['devid'])
@ -134,7 +134,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
def _waitForDevice_destroy(self, deviceClass, devid, backpath):
return self.getDeviceController(deviceClass).waitForDevice_destroy(
devid, backpath)
@@ -3226,7 +3230,7 @@ class XendDomainInfo:
@@ -3230,7 +3234,7 @@ class XendDomainInfo:
devtype = devinfo[0]
disk = devinfo[1]['uname']
@ -143,7 +143,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
# If this is a drbd volume, check if we need to activate it
if disk.find(":") != -1:
@@ -3237,8 +3241,17 @@ class XendDomainInfo:
@@ -3241,8 +3245,17 @@ class XendDomainInfo:
if state == 'Secondary':
os.system('/sbin/drbdadm primary ' + diskname)
@ -163,7 +163,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
if mounted:
# This is a file, not a device. pygrub can cope with a
# file if it's raw, but if it's QCOW or other such formats
@@ -3254,7 +3267,8 @@ class XendDomainInfo:
@@ -3258,7 +3271,8 @@ class XendDomainInfo:
from xen.xend import XendDomain
dom0 = XendDomain.instance().privilegedDomain()
@ -173,7 +173,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
fn = BOOTLOADER_LOOPBACK_DEVICE
try:
@@ -3264,8 +3278,10 @@ class XendDomainInfo:
@@ -3268,8 +3282,10 @@ class XendDomainInfo:
if mounted:
log.info("Unmounting %s from %s." %
(fn, BOOTLOADER_LOOPBACK_DEVICE))

View File

@ -1,3 +1,51 @@
-------------------------------------------------------------------
Mon Feb 22 08:26:01 MST 2010 - jfehlig@novell.com
- bnc#556939 - Improve device map cleanup code in domUloader
-------------------------------------------------------------------
Sun Feb 21 21:34:23 MST 2010 - jfehlig@novell.com
- bnc# 578910 - xm block-detach does not cleanup xenstore
hotplug-cleanup-fix.patch
-------------------------------------------------------------------
Fri Feb 19 11:56:57 MST 2010 - carnold@novell.com
- bnc#579361 - Windows Server 2003 cannot wake up from stand by in
sp1
hibernate.patch
-------------------------------------------------------------------
Fri Feb 19 09:49:56 MST 2010 - carnold@novell.com
- fate#308852: XEN CPU Pools
cpupools-core.patch
cpupools-core-fixup.patch
keyhandler-alternative.patch
cpu-pools-libxc.patch
cpu-pools-python.patch
cpu-pools-libxen.patch
cpu-pools-xmtest.patch
cpu-pools-docs.patch
-------------------------------------------------------------------
Thu Feb 18 15:18:59 - ksrinivasan@novell.com
- bnc#558760: Disable scsi devices when PV drivers are loaded.
-------------------------------------------------------------------
Tue Feb 16 08:35:08 MST 2010 - carnold@novell.com
- Update to changeset 20951 Xen 4.0.0 RC4 for sle11-sp1 beta5.
-------------------------------------------------------------------
Mon Feb 8 08:08:01 MST 2010 - carnold@novell.com
- bnc#572146 - SLES11 SP1 beta 2 Xen - BUG: soft lockup - CPU#31
stuck for 61s! [kstop/31:4512]
cpuidle-hint-v3.patch
-------------------------------------------------------------------
Fri Feb 5 08:16:39 MST 2010 - carnold@novell.com

View File

@ -1,7 +1,7 @@
#
# spec file for package xen (Version 4.0.0_20900_01)
# spec file for package xen (Version 4.0.0_20978_01)
#
# Copyright (c) 2009 SUSE LINUX Products GmbH, Nuernberg, Germany.
# Copyright (c) 2010 SUSE LINUX Products GmbH, Nuernberg, Germany.
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
@ -22,9 +22,9 @@ Name: xen
ExclusiveArch: %ix86 x86_64
%define xvers 4.0
%define xvermaj 4
%define changeset 20900
%define changeset 20978
%define xen_build_dir xen-4.0.0-testing
%define with_kmp 1
%define with_kmp 0
BuildRequires: LibVNCServer-devel SDL-devel automake bin86 curl-devel dev86 graphviz latex2html libjpeg-devel libxml2-devel ncurses-devel openssl openssl-devel pciutils-devel python-devel texinfo transfig
%if %suse_version >= 1030
BuildRequires: texlive texlive-latex
@ -37,7 +37,7 @@ BuildRequires: glibc-32bit glibc-devel-32bit
%if %{?with_kmp}0
BuildRequires: kernel-source kernel-syms module-init-tools xorg-x11
%endif
Version: 4.0.0_20900_01
Version: 4.0.0_20978_01
Release: 1
License: GPL v2 only
Group: System/Kernel
@ -90,7 +90,6 @@ Patch312: serial-split.patch
Patch313: xen-xm-top-needs-root.diff
Patch314: xen-max-free-mem.diff
Patch315: xen-disable-libxl.diff
Patch316: xen-disable-xenpaging.diff
Patch320: block-losetup-retry.diff
Patch321: block-flags.diff
Patch322: bridge-opensuse.patch
@ -125,6 +124,14 @@ Patch359: qemu-console-retry.patch
Patch360: checkpoint-rename.patch
Patch361: xm-save-check-file.patch
Patch362: xm-create-xflag.patch
Patch363: cpupools-core.patch
Patch364: cpupools-core-fixup.patch
Patch365: keyhandler-alternative.patch
Patch366: cpu-pools-libxc.patch
Patch367: cpu-pools-python.patch
Patch368: cpu-pools-libxen.patch
Patch369: cpu-pools-xmtest.patch
Patch370: cpu-pools-docs.patch
# Patches for snapshot support
Patch400: snapshot-ioemu-save.patch
Patch401: snapshot-ioemu-restore.patch
@ -147,6 +154,7 @@ Patch425: ioemu-bdrv-open-CACHE_WB.patch
Patch426: xen-ioemu-hvm-pv-support.diff
Patch427: qemu-dm-segfault.patch
Patch428: shadow.patch
Patch429: hibernate.patch
# Jim's domain lock patch
Patch450: xend-domain-lock.patch
# Hypervisor and PV driver Patches
@ -165,6 +173,7 @@ Patch511: pv-driver-build.patch
Patch512: supported_module.diff
Patch513: magic_ioport_compat.patch
Patch650: disable_emulated_device.diff
Patch651: ioemu-disable-scsi.patch
# novell_shim patches
Patch700: hv_tools.patch
Patch701: hv_xen_base.patch
@ -520,7 +529,6 @@ Authors:
%patch313 -p1
%patch314 -p1
%patch315 -p1
%patch316 -p1
#%patch320 -p1
#%patch321 -p1
%patch322 -p1
@ -554,8 +562,16 @@ Authors:
%patch360 -p1
%patch361 -p1
%patch362 -p1
%patch363 -p1
%patch364 -p1
%patch365 -p1
%patch366 -p1
%patch367 -p1
%patch368 -p1
%patch369 -p1
%patch370 -p1
%patch400 -p1
#%patch401 -p1
%patch401 -p1
%patch402 -p1
%patch403 -p1
%patch410 -p1
@ -573,6 +589,7 @@ Authors:
%patch426 -p1
%patch427 -p1
%patch428 -p1
%patch429 -p1
%patch450 -p1
%patch500 -p1
%patch501 -p1
@ -589,6 +606,7 @@ Authors:
%patch512 -p1
%patch513 -p1
%patch650 -p1
%patch651 -p1
%patch700 -p1
%patch701 -p1
%patch702 -p1
@ -762,8 +780,13 @@ rm -f $RPM_BUILD_ROOT/usr/sbin/netfix
rm -f $RPM_BUILD_ROOT/%{_libdir}/python%{pyver}/site-packages/*.egg-info
rm -rf $RPM_BUILD_ROOT/html
rm -rf $RPM_BUILD_ROOT/usr/share/doc/xen/README.*
rm -f $RPM_BUILD_ROOT/usr/share/create.dtd
rm -f $RPM_BUILD_ROOT/usr/share/xen/create.dtd
rm -f $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm.debug
rm -f $RPM_BUILD_ROOT/%{_bindir}/qemu-img-xen
rm -f $RPM_BUILD_ROOT/%{_bindir}/qemu-nbd-xen
# FATE feature for remus rejected
rm -f $RPM_BUILD_ROOT/%{_bindir}/remus
rm -rf $RPM_BUILD_ROOT/%{_libdir}/python%{pyver}/site-packages/xen/remus
# This is necessary because of the build of libconfig for libxl
#rm -rf $RPM_BUILD_ROOT/$RPM_BUILD_ROOT
@ -798,11 +821,11 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm.debug
/usr/bin/xenstore*
/usr/bin/xentrace*
/usr/bin/pygrub
/usr/bin/qemu-img-xen
/usr/bin/qemu-nbd-xen
#/usr/bin/qemu-img-xen
#/usr/bin/qemu-nbd-xen
/usr/bin/tapdisk-ioemu
/usr/bin/gdbserver-xen
/usr/bin/remus
#/usr/bin/remus
/usr/sbin/blktapctrl
/usr/sbin/flask-loadpolicy
/usr/sbin/flask-getenforce
@ -954,6 +977,18 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm.debug
%post tools
%{fillup_and_insserv -y -n xend xend}
%{fillup_and_insserv -y -n xendomains xendomains}
if [ -f /usr/bin/qemu-img ]; then
if [ -f /usr/bin/qemu-img-xen ]; then
rm /usr/bin/qemu-img-xen
fi
ln -s /usr/bin/qemu-img /usr/bin/qemu-img-xen
fi
if [ -f /usr/bin/qemu-nbd ]; then
if [ -f /usr/bin/qemu-nbd-xen ]; then
rm /usr/bin/qemu-nbd-xen
fi
ln -s /usr/bin/qemu-nbd /usr/bin/qemu-nbd-xen
fi
%preun tools
%{stop_on_removal xendomains xend}
@ -961,6 +996,12 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm.debug
%postun tools
%{restart_on_update xend}
%{insserv_cleanup}
if [ -f /usr/bin/qemu-img-xen ]; then
rm /usr/bin/qemu-img-xen
fi
if [ -f /usr/bin/qemu-nbd-xen ]; then
rm /usr/bin/qemu-nbd-xen
fi
%post libs
/sbin/ldconfig

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -3916,6 +3916,14 @@ class XendDomainInfo:
@@ -3920,6 +3920,14 @@ class XendDomainInfo:
if not config.has_key('backend'):
config['backend'] = "00000000-0000-0000-0000-000000000000"

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -2308,7 +2308,7 @@ class XendDomainInfo:
@@ -2313,7 +2313,7 @@ class XendDomainInfo:
# To prohibit directory traversal
based_name = os.path.basename(self.info['name_label'])

View File

@ -83,7 +83,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
from xen.util.blkif import parse_uname
import xen.util.xsm.xsm as security
from xen.util import xsconstants
@@ -465,6 +466,7 @@ class XendDomainInfo:
@@ -466,6 +467,7 @@ class XendDomainInfo:
if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
try:
@ -91,7 +91,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
XendTask.log_progress(0, 30, self._constructDomain)
XendTask.log_progress(31, 60, self._initDomain)
@@ -2953,6 +2955,11 @@ class XendDomainInfo:
@@ -2972,6 +2974,11 @@ class XendDomainInfo:
self._stateSet(DOM_STATE_HALTED)
self.domid = None # Do not push into _stateSet()!
@ -103,7 +103,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
finally:
self.refresh_shutdown_lock.release()
@@ -4454,6 +4461,74 @@ class XendDomainInfo:
@@ -4478,6 +4485,74 @@ class XendDomainInfo:
def has_device(self, dev_class, dev_uuid):
return (dev_uuid in self.info['%s_refs' % dev_class.lower()])