Accepting request 33724 from Virtualization

Copy from Virtualization/xen based on submit request 33724 from user charlesa

OBS-URL: https://build.opensuse.org/request/show/33724
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/xen?expand=0&rev=84
This commit is contained in:
OBS User autobuild 2010-03-02 00:46:56 +00:00 committed by Git OBS Bridge
commit 7418cbbf9e
56 changed files with 11851 additions and 299 deletions

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -2863,7 +2863,7 @@ class XendDomainInfo:
@@ -2902,7 +2902,7 @@ class XendDomainInfo:
self.guest_bitsize = self.image.getBitSize()
# Make sure there's enough RAM available for the domain

View File

@ -31,6 +31,7 @@ optional packages are also installed:
vm-install (Optional, to install VMs)
python-gtk (Optional, to install VMs graphically)
virt-manager (Optional, to manage VMs graphically)
virt-viewer (Optional, to view VMs outside virt-manager)
tightvnc (Optional, to view VMs outside virt-manager)
Additional packages:
@ -328,7 +329,7 @@ documentation for workarounds.
Networking
----------
Your virtual machines become much more useful if your can reach them via the
Your virtual machines become much more useful if you can reach them via the
network. Starting with openSUSE11.1 and SLE11, networking in domain 0 is
configured and managed via YaST. The yast2-networking module can be used
to create and manage bridged networks. During initial installation, a bridged

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
@@ -225,6 +225,7 @@ static int open_disk(struct td_state *s,
@@ -227,6 +227,7 @@ static int open_disk(struct td_state *s,
BlockDriver* drv;
char* devname;
static int devnumber = 0;
@ -10,7 +10,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
int i;
DPRINTF("Opening %s as blktap%d\n", path, devnumber);
@@ -247,7 +248,7 @@ static int open_disk(struct td_state *s,
@@ -249,7 +250,7 @@ static int open_disk(struct td_state *s,
DPRINTF("%s driver specified\n", drv ? drv->format_name : "No");
/* Open the image */
@ -23,7 +23,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c
+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
@@ -133,7 +133,8 @@ static void insert_media(void *opaque)
@@ -136,7 +136,8 @@ static void insert_media(void *opaque)
else
format = &bdrv_raw;
@ -33,7 +33,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
#ifdef CONFIG_STUBDOM
{
char *buf, *backend, *params_path, *params;
@@ -397,9 +398,9 @@ void xenstore_parse_domain_config(int hv
@@ -400,9 +401,9 @@ void xenstore_parse_domain_config(int hv
{
char **e_danger = NULL;
char *buf = NULL;
@ -45,7 +45,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
unsigned int len, num, hd_index, pci_devid = 0;
BlockDriverState *bs;
BlockDriver *format;
@@ -461,7 +462,8 @@ void xenstore_parse_domain_config(int hv
@@ -464,7 +465,8 @@ void xenstore_parse_domain_config(int hv
}
for (i = 0; i < num; i++) {
@ -55,8 +55,8 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
/* read the backend path */
xenstore_get_backend_path(&bpath, "vbd", danger_path, hvm_domid, e_danger[i]);
if (bpath == NULL)
@@ -560,6 +562,17 @@ void xenstore_parse_domain_config(int hv
}
@@ -550,6 +552,17 @@ void xenstore_parse_domain_config(int hv
format = &bdrv_raw;
}
+ /* read the mode of the device */
@ -73,7 +73,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
#if 0
/* Phantom VBDs are disabled because the use of paths
* from guest-controlled areas in xenstore is unsafe.
@@ -612,7 +625,7 @@ void xenstore_parse_domain_config(int hv
@@ -617,7 +630,7 @@ void xenstore_parse_domain_config(int hv
#ifdef CONFIG_STUBDOM
if (pasprintf(&danger_buf, "%s/device/vbd/%s", danger_path, e_danger[i]) == -1)
continue;
@ -82,12 +82,12 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
pstrcpy(bs->filename, sizeof(bs->filename), params);
}
#else
@@ -641,7 +654,7 @@ void xenstore_parse_domain_config(int hv
@@ -646,7 +659,7 @@ void xenstore_parse_domain_config(int hv
}
}
pstrcpy(bs->filename, sizeof(bs->filename), params);
- if (bdrv_open2(bs, params, BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0)
+ if (bdrv_open2(bs, params, flags|BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0)
- if (bdrv_open2(bs, params, BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) {
+ if (bdrv_open2(bs, params, flags|BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) {
fprintf(stderr, "qemu: could not open vbd '%s' or hard disk image '%s' (drv '%s' format '%s')\n", buf, params, drv ? drv : "?", format ? format->format_name : "0");
}
} else {
char* snapshot = get_snapshot_name(atoi(e_danger[i]));

View File

@ -741,7 +741,7 @@ Index: xen-4.0.0-testing/tools/blktap/lib/blktaplib.h
===================================================================
--- xen-4.0.0-testing.orig/tools/blktap/lib/blktaplib.h
+++ xen-4.0.0-testing/tools/blktap/lib/blktaplib.h
@@ -219,6 +219,7 @@ typedef struct msg_pid {
@@ -220,6 +220,7 @@ typedef struct msg_pid {
#define DISK_TYPE_RAM 3
#define DISK_TYPE_QCOW 4
#define DISK_TYPE_QCOW2 5

View File

@ -5,7 +5,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -3262,7 +3262,7 @@ class XendDomainInfo:
@@ -3286,7 +3286,7 @@ class XendDomainInfo:
(fn, BOOTLOADER_LOOPBACK_DEVICE))
vbd = {
@ -18,7 +18,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c
+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
@@ -396,9 +396,9 @@ void xenstore_parse_domain_config(int hv
@@ -397,9 +397,9 @@ void xenstore_parse_domain_config(int hv
{
char **e_danger = NULL;
char *buf = NULL;
@ -30,7 +30,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
unsigned int len, num, hd_index, pci_devid = 0;
BlockDriverState *bs;
BlockDriver *format;
@@ -438,6 +438,14 @@ void xenstore_parse_domain_config(int hv
@@ -439,6 +439,14 @@ void xenstore_parse_domain_config(int hv
e_danger[i]);
if (bpath == NULL)
continue;
@ -45,7 +45,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
/* read the name of the device */
if (pasprintf(&buf, "%s/dev", bpath) == -1)
continue;
@@ -712,6 +720,7 @@ void xenstore_parse_domain_config(int hv
@@ -715,6 +723,7 @@ void xenstore_parse_domain_config(int hv
free(danger_type);
free(params);
free(dev);

View File

@ -1,7 +1,5 @@
Index: xen-4.0.0-testing/tools/python/xen/xend/server/HalDaemon.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/python/xen/xend/server/HalDaemon.py
+++ b/tools/python/xen/xend/server/HalDaemon.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+# -*- mode: python; -*-
@ -246,10 +244,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/HalDaemon.py
+ print 'Falling off end'
+
+
Index: xen-4.0.0-testing/tools/python/xen/xend/server/Hald.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/python/xen/xend/server/Hald.py
+++ b/tools/python/xen/xend/server/Hald.py
@@ -0,0 +1,125 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
@ -376,10 +372,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/Hald.py
+ watcher.run()
+ time.sleep(10)
+ watcher.shutdown()
Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/server/SrvServer.py
+++ xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
--- a/tools/python/xen/xend/server/SrvServer.py
+++ b/tools/python/xen/xend/server/SrvServer.py
@@ -56,6 +56,7 @@ from xen.web.SrvDir import SrvDir
from SrvRoot import SrvRoot
@ -397,15 +391,22 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
def create():
root = SrvDir()
Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c
+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
@@ -513,6 +513,19 @@ void xenstore_parse_domain_config(int hv
params = newparams;
format = &bdrv_raw;
}
+ /* if cdrom pyhsical put a watch on media-present */
--- a/tools/ioemu-remote/xenstore.c
+++ b/tools/ioemu-remote/xenstore.c
@@ -18,6 +18,7 @@
#include "exec-all.h"
#include "sysemu.h"
+#include "console.h"
#include "hw.h"
#include "pci.h"
#include "qemu-timer.h"
@@ -548,6 +549,21 @@ void xenstore_parse_domain_config(int hv
#endif
bs = bdrv_new(dev);
+
+ /* if cdrom physical put a watch on media-present */
+ if (bdrv_get_type_hint(bs) == BDRV_TYPE_CDROM) {
+ if (drv && !strcmp(drv, "phy")) {
+ if (pasprintf(&buf, "%s/media-present", bpath) != -1) {
@ -418,14 +419,15 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
+ }
+ }
+ }
#if 0
/* Phantom VBDs are disabled because the use of paths
@@ -938,6 +951,50 @@ void xenstore_record_dm_state(const char
+
/* check if it is a cdrom */
if (danger_type && !strcmp(danger_type, "cdrom")) {
bdrv_set_type_hint(bs, BDRV_TYPE_CDROM);
@@ -938,6 +954,50 @@ void xenstore_record_dm_state(const char
xenstore_record_dm("state", state);
}
+void xenstore_process_media_change_event(char **vec)
+static void xenstore_process_media_change_event(char **vec)
+{
+ char *media_present = NULL;
+ unsigned int len;
@ -472,7 +474,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
void xenstore_process_event(void *opaque)
{
char **vec, *offset, *bpath = NULL, *buf = NULL, *drv = NULL, *image = NULL;
@@ -968,6 +1025,11 @@ void xenstore_process_event(void *opaque
@@ -968,6 +1028,11 @@ void xenstore_process_event(void *opaque
xenstore_watch_callbacks[i].cb(vec[XS_WATCH_TOKEN],
xenstore_watch_callbacks[i].opaque);

1484
cpu-pools-docs.patch Normal file

File diff suppressed because it is too large Load Diff

360
cpu-pools-libxc.patch Normal file
View File

@ -0,0 +1,360 @@
Index: xen-4.0.0-testing/tools/libxc/Makefile
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/Makefile
+++ xen-4.0.0-testing/tools/libxc/Makefile
@@ -8,6 +8,7 @@ CTRL_SRCS-y :=
CTRL_SRCS-y += xc_core.c
CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c
+CTRL_SRCS-y += xc_cpupool.c
CTRL_SRCS-y += xc_domain.c
CTRL_SRCS-y += xc_evtchn.c
CTRL_SRCS-y += xc_misc.c
Index: xen-4.0.0-testing/tools/libxc/xc_cpupool.c
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/libxc/xc_cpupool.c
@@ -0,0 +1,154 @@
+/******************************************************************************
+ * xc_cpupool.c
+ *
+ * API for manipulating and obtaining information on cpupools.
+ *
+ * Copyright (c) 2009, J Gross.
+ */
+
+#include <stdarg.h>
+#include "xc_private.h"
+
+int xc_cpupool_create(int xc_handle,
+ uint32_t *ppoolid,
+ uint32_t sched_id)
+{
+ int err;
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
+ domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+ XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
+ domctl.u.cpupool_op.sched_id = sched_id;
+ if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
+ return err;
+
+ *ppoolid = domctl.u.cpupool_op.cpupool_id;
+ return 0;
+}
+
+int xc_cpupool_destroy(int xc_handle,
+ uint32_t poolid)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_getinfo(int xc_handle,
+ uint32_t first_poolid,
+ uint32_t n_max,
+ xc_cpupoolinfo_t *info)
+{
+ int err = 0;
+ int p;
+ uint32_t poolid = first_poolid;
+ uint8_t local[sizeof (info->cpumap)];
+ DECLARE_DOMCTL;
+
+ memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+
+ for (p = 0; p < n_max; p++)
+ {
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ break;
+ }
+ err = do_domctl_save(xc_handle, &domctl);
+ unlock_pages(local, sizeof (local));
+
+ if ( err < 0 )
+ break;
+
+ info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
+ info->sched_id = domctl.u.cpupool_op.sched_id;
+ info->n_dom = domctl.u.cpupool_op.n_dom;
+ bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
+ poolid = domctl.u.cpupool_op.cpupool_id + 1;
+ info++;
+ }
+
+ if ( p == 0 )
+ return err;
+
+ return p;
+}
+
+int xc_cpupool_addcpu(int xc_handle,
+ uint32_t poolid,
+ int cpu)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_removecpu(int xc_handle,
+ uint32_t poolid,
+ int cpu)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_movedomain(int xc_handle,
+ uint32_t poolid,
+ uint32_t domid)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.domid = domid;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_freeinfo(int xc_handle,
+ uint64_t *cpumap)
+{
+ int err;
+ uint8_t local[sizeof (*cpumap)];
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ return err;
+ }
+
+ err = do_domctl_save(xc_handle, &domctl);
+ unlock_pages(local, sizeof (local));
+
+ if (err < 0)
+ return err;
+
+ bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
+
+ return 0;
+}
Index: xen-4.0.0-testing/tools/libxc/xc_domain.c
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xc_domain.c
+++ xen-4.0.0-testing/tools/libxc/xc_domain.c
@@ -6,6 +6,7 @@
* Copyright (c) 2003, K A Fraser.
*/
+#include <stdarg.h>
#include "xc_private.h"
#include "xg_save_restore.h"
#include <xen/memory.h>
@@ -15,15 +16,21 @@ int xc_domain_create(int xc_handle,
uint32_t ssidref,
xen_domain_handle_t handle,
uint32_t flags,
- uint32_t *pdomid)
+ uint32_t *pdomid, ...)
{
int err;
+ va_list ap;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_createdomain;
domctl.domain = (domid_t)*pdomid;
domctl.u.createdomain.ssidref = ssidref;
domctl.u.createdomain.flags = flags;
+ if ( flags & XEN_DOMCTL_CDF_pool ) {
+ va_start(ap, pdomid);
+ domctl.u.createdomain.cpupool = va_arg(ap, uint32_t);
+ va_end(ap);
+ }
memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
return err;
@@ -206,6 +213,7 @@ int xc_domain_getinfo(int xc_handle,
info->cpu_time = domctl.u.getdomaininfo.cpu_time;
info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
+ info->cpupool = domctl.u.getdomaininfo.cpupool;
memcpy(info->handle, domctl.u.getdomaininfo.handle,
sizeof(xen_domain_handle_t));
Index: xen-4.0.0-testing/tools/libxc/xc_private.h
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xc_private.h
+++ xen-4.0.0-testing/tools/libxc/xc_private.h
@@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl
return ret;
}
+static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl)
+{
+ int ret;
+
+ do
+ {
+ ret = do_domctl(xc_handle, domctl);
+ }
+ while ( (ret < 0 ) && (errno == EAGAIN) );
+
+ return ret;
+}
+
static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
{
int ret = -1;
Index: xen-4.0.0-testing/tools/libxc/xenctrl.h
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xenctrl.h
+++ xen-4.0.0-testing/tools/libxc/xenctrl.h
@@ -171,6 +171,7 @@ typedef struct xc_dominfo {
unsigned int nr_online_vcpus;
unsigned int max_vcpu_id;
xen_domain_handle_t handle;
+ unsigned int cpupool;
} xc_dominfo_t;
typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
@@ -207,7 +208,7 @@ int xc_domain_create(int xc_handle,
uint32_t ssidref,
xen_domain_handle_t handle,
uint32_t flags,
- uint32_t *pdomid);
+ uint32_t *pdomid, ...);
/* Functions to produce a dump of a given domain
@@ -500,6 +501,100 @@ int xc_domain_setdebugging(int xc_handle
unsigned int enable);
/*
+ * CPUPOOL MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_cpupoolinfo {
+ uint32_t cpupool_id;
+ uint32_t sched_id;
+ uint32_t n_dom;
+ uint64_t cpumap;
+} xc_cpupoolinfo_t;
+
+/**
+ * Create a new cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm ppoolid pointer to the new cpupool id (in/out)
+ * @parm sched_id id of scheduler to use for pool
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_create(int xc_handle,
+ uint32_t *ppoolid,
+ uint32_t sched_id);
+
+/**
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool to destroy
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_destroy(int xc_handle,
+ uint32_t poolid);
+
+/**
+ * Get cpupool info. Returns info for up to the specified number of cpupools
+ * starting at the given id.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm first_poolid lowest id for which info is returned
+ * @parm n_max maximum number of cpupools to return info
+ * @parm info pointer to xc_cpupoolinfo_t array
+ * return number of cpupool infos
+ */
+int xc_cpupool_getinfo(int xc_handle,
+ uint32_t first_poolid,
+ uint32_t n_max,
+ xc_cpupoolinfo_t *info);
+
+/**
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to add
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_addcpu(int xc_handle,
+ uint32_t poolid,
+ int cpu);
+
+/**
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to remove
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_removecpu(int xc_handle,
+ uint32_t poolid,
+ int cpu);
+
+/**
+ * Move domain to another cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the destination cpupool
+ * @parm domid id of the domain to move
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_movedomain(int xc_handle,
+ uint32_t poolid,
+ uint32_t domid);
+
+/**
+ * Return map of cpus not in any cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm cpumap pointer where to store the cpumap
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_freeinfo(int xc_handle,
+ uint64_t *cpumap);
+
+
+/*
* EVENT CHANNEL FUNCTIONS
*/

2180
cpu-pools-libxen.patch Normal file

File diff suppressed because it is too large Load Diff

2543
cpu-pools-python.patch Normal file

File diff suppressed because it is too large Load Diff

838
cpu-pools-xmtest.patch Normal file
View File

@ -0,0 +1,838 @@
Index: xen-4.0.0-testing/tools/xm-test/configure.ac
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/configure.ac
+++ xen-4.0.0-testing/tools/xm-test/configure.ac
@@ -161,6 +161,7 @@ AC_CONFIG_FILES([
tests/vtpm/Makefile
tests/xapi/Makefile
tests/enforce_dom0_cpus/Makefile
+ tests/cpupool/Makefile
lib/XmTestReport/xmtest.py
lib/XmTestLib/config.py
])
Index: xen-4.0.0-testing/tools/xm-test/grouptest/cpupool
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/grouptest/cpupool
@@ -0,0 +1 @@
+cpupool
Index: xen-4.0.0-testing/tools/xm-test/lib/XmTestLib/NetConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/lib/XmTestLib/NetConfig.py
+++ xen-4.0.0-testing/tools/xm-test/lib/XmTestLib/NetConfig.py
@@ -56,17 +56,21 @@ def getXendNetConfig():
val = pin.get_val()
while val[0] != 'network-script':
val = pin.get_val()
-
- # split network command into script name and its parameters
- sub_val = val[1].split()
- if sub_val[0] == "network-bridge":
+
+ if val[0] != 'network-script' or len(val) < 2:
+ # entry network-script not found or no type specified
netenv = "bridge"
- elif sub_val[0] == "network-route":
- netenv = "route"
- elif sub_val[0] == "network-nat":
- netenv = "nat"
else:
- raise NetworkError("Failed to get network env from xend config")
+ # split network command into script name and its parameters
+ sub_val = val[1].split()
+ if sub_val[0] == "network-bridge":
+ netenv = "bridge"
+ elif sub_val[0] == "network-route":
+ netenv = "route"
+ elif sub_val[0] == "network-nat":
+ netenv = "nat"
+ else:
+ raise NetworkError("Failed to get network env from xend config")
configfile.close()
return netenv
Index: xen-4.0.0-testing/tools/xm-test/lib/XmTestLib/XenDomain.py
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/lib/XmTestLib/XenDomain.py
+++ xen-4.0.0-testing/tools/xm-test/lib/XmTestLib/XenDomain.py
@@ -181,6 +181,7 @@ class XenDomain:
if not self.isManaged:
ret, output = traceCommand("xm create %s" % self.config)
+ print self.config
else:
ret, output = traceCommand("xm new %s" % self.config)
if ret != 0:
Index: xen-4.0.0-testing/tools/xm-test/runtest.sh
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/runtest.sh
+++ xen-4.0.0-testing/tools/xm-test/runtest.sh
@@ -91,7 +91,7 @@ runnable_tests() {
echo "Error: ramdisk/initrd.img is from an old version, or is not for this "
echo "architecture ($ARCH)."
echo "You need to build a ramdisk from at least ${XM_TEST_MAJ}.${XM_TEST_MIN}"
- exit 1
+ #exit 1
fi
# See if xend is running
Index: xen-4.0.0-testing/tools/xm-test/tests/Makefile.am
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/tests/Makefile.am
+++ xen-4.0.0-testing/tools/xm-test/tests/Makefile.am
@@ -28,7 +28,8 @@ SUBDIRS = \
vcpu-pin \
vtpm \
enforce_dom0_cpus \
- save restore migrate
+ save restore migrate \
+ cpupool
EXTRA_DIST = $(SUBDIRS) Makefile.am.template
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/01_cpupool_basic_pos.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/01_cpupool_basic_pos.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+
+import sys
+import re
+import time
+
+from XmTestLib import *
+
+
+#
+# Check output of xm info. It must include field 'free_cpus'
+# The value must be between 0 - nr_cpus
+#
+free_cpus = getInfo("free_cpus")
+if free_cpus == "":
+ FAIL("Missing 'free_cpus' entry in xm info output")
+if int(free_cpus) not in range(int(getInfo("nr_cpus")) + 1):
+ FAIL("Wrong value of 'free_cpus' (%s)" % int(free_cpus))
+
+
+#
+# Check output of xm list -l. It must contain the key 'pool_name'
+# If XM_USES_API is set, output must also contain 'cpu_pool'.
+#
+status, output = traceCommand("xm list -l Domain-0")
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if not re.search("pool_name Pool-0", output):
+ FAIL("Missing or wrong attribute 'pool_name' in output of 'xm list -l'")
+if os.getenv("XM_USES_API"):
+ if not re.search("cpu_pool (.+)", output):
+ FAIL("Missing or wrong attribute 'cpu_pool' in output of 'xm list -l'")
+
+#
+# Test pool selection option of xm list.
+#
+status, output = traceCommand("xm list --pool=Pool-0")
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if not re.search("Domain-0 +0 +", output):
+ FAIL("Missing 'Domain-0' in Pool-0")
+
+status, output = traceCommand("xm list --pool=Dummy-Pool")
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if len(output.splitlines()) != 1:
+ FAIL("Wrong pool selection; output must be empty")
+
+
+#
+# Create a Domain without pool specification.
+# Default pool is Pool-0
+#
+name = "TestDomPool-1"
+domain = XmTestDomain(name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+if not isDomainRunning(name):
+ FAIL("Couldn't start domain without pool specification")
+
+status, output = traceCommand("xm list -l %s" % name)
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if not re.search("pool_name Pool-0", output):
+ FAIL("Missing or wrong attribute 'pool_name' in output of 'xm list -l %s'" % name)
+
+destroyAllDomUs()
+
+
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/02_cpupool_manage_pos.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/02_cpupool_manage_pos.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+
+# Description:
+# Verify commands pool-new and pool-delete.
+#
+import sys
+import re
+import time
+
+from XmTestLib import *
+from pools import *
+
+checkRequirements()
+
+#
+# Check output of xm pool-list (of Pool-0)
+#
+status, output = traceCommand("xm pool-list Pool-0")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+lines = output.splitlines()
+if len(lines) != 2:
+ FAIL("Wrong output of xm pool-list Pool-0 (%s)" % lines)
+if not re.search("Pool-0 +[0-9]+ +credit +y +[0-9]", lines[1]):
+ FAIL("Wrong output of xm pool-list Pool-0 (%s)" % lines)
+
+#
+# Check output of xm pool-list -l (of Pool-0)
+#
+status, output = traceCommand("xm pool-list Pool-0 -l")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if not re.search("name_label Pool-0", output):
+ FAIL("Wrong output of xm pool-list Pool-0 -l; missing 'name_label'")
+if not re.search("started_VMs 00000000-0000-0000-0000-000000000000", output):
+ FAIL("Wrong output of xm pool-list Pool-0 -l; missing 'started_VMs'")
+if not re.search("started_VM_names Domain-0", output):
+ FAIL("Wrong output of xm pool-list Pool-0 -l; missing 'started_VMi_names'")
+
+
+#
+# Create a pool from pool1.cfg
+#
+cmd = "xm pool-new pool1.cfg name=Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+status, output = traceCommand("xm pool-list")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if not re.search("Pool-1 +1 +credit", output):
+ FAIL("Missing or wrong pool definition for 'Pool-1'")
+
+
+#
+# check persistence of pool; restart xend
+#
+restartXend()
+
+status, output = traceCommand("xm pool-list")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if not re.search("Pool-1 +1 +credit", output):
+ FAIL("Missing or wrong pool definition for 'Pool-1'")
+
+
+#
+# Delete pool
+#
+deletePool("Pool-1")
+status, output = traceCommand("xm pool-list")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if re.search("Pool-1 +1 +credit", output):
+ FAIL("'Pool-1' not deleted")
+
+
+#
+# create / start / check / destroy / delete a managed pool
+#
+cmd = "xm pool-new pool1.cfg"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-start Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+restartXend()
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+destroyPool("Pool-1")
+deletePool("Pool-1")
+
+cmd = "xm pool-list Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if re.search("Pool-1 +1 +credit", output):
+ FAIL("'Pool-1' not deleted")
+
+
+#
+# create / check / destroy a unmanaged pool
+#
+cmd = "xm pool-create pool1.cfg"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+restartXend()
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+destroyPool("Pool-1", True)
+
+cmd = "xm pool-list"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if re.search("Pool-1", output):
+ FAIL("'Pool-1' not deleted")
+
+
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/03_cpupool_domain.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/03_cpupool_domain.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+
+import sys
+import re
+import time
+
+from XmTestLib import *
+from pools import *
+
+
+checkRequirements()
+
+#
+# create Pool-1 with 1 CPU and start a VM
+#
+createStdPool()
+name = "TestDomPool-1"
+domain = XmTestDomain(extraConfig={'pool' : 'Pool-1'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+cmd = "xm list --pool=Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search(name, output):
+ FAIL("%s; missing '%s' in Pool-1" % (cmd,name))
+
+domain.stop()
+waitForDomain(name)
+destroyPool("Pool-1", True)
+
+
+
+#
+# create Pool-1 with 1 CPU, add a second CPU
+# start a VM (with vpcu=3) add a third CPU
+# remove 2 CPUs from pool
+# create Pool-1 with 1 CPU and start a VM
+#
+pool_names = ['Pool-1', 'Pool-2']
+createStdPool({'name' : pool_names[0], 'cpus' : '"1"'})
+name = "TestDomPool-1"
+cmd = "xm pool-cpu-add Pool-1 2"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+domain = XmTestDomain(extraConfig={ 'pool' : 'Pool-1'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+cmd = "xm pool-cpu-add Pool-1 3"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-cpu-remove Pool-1 2"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+cmd = "xm pool-cpu-remove Pool-1 3"
+
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+
+createStdPool({'name' : pool_names[1]})
+name2 = "TestDomPool-2"
+domain2 = XmTestDomain(extraConfig={ 'pool' : 'Pool-2'}, name=name2)
+try:
+ domain2.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+domain2.stop()
+domain.stop()
+
+waitForDomain(name)
+waitForDomain(name2)
+
+for pool in pool_names:
+ destroyPool(pool, True)
+
+
+
+#
+# Create 2 pools with 1 cpu per pool.
+# Create three domains in each pool, with 1,2,3 VCPUs
+# Switch a thrid cpu between the pools.
+#
+pool_names = ['Pool-1', 'Pool-2']
+domains = {}
+cpu=3
+
+for pool in pool_names:
+ createStdPool({'name' : pool})
+ for dom_nr in range(3):
+ name = "TestDom%s-%s" % (pool, dom_nr)
+ domains[name] = XmTestDomain(extraConfig={'pool' : pool},
+ name=name)
+ try:
+ domains[name].start(noConsole=True)
+ except DomainError, ex:
+ FAIL(str(ex))
+
+cmd_add_1 = "xm pool-cpu-add Pool-1 %s" % cpu
+cmd_rem_1 = "xm pool-cpu-remove Pool-1 %s" % cpu
+cmd_add_2 = "xm pool-cpu-add Pool-2 %s" % cpu
+cmd_rem_2 = "xm pool-cpu-remove Pool-2 %s" % cpu
+
+for i in range(25):
+ traceCommand(cmd_add_1)
+ traceCommand(cmd_rem_1)
+ traceCommand(cmd_add_2)
+ traceCommand(cmd_rem_2)
+
+destroyAllDomUs()
+for pool in pool_names:
+ destroyPool(pool, True)
+
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/04_cpupool_migrate.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/04_cpupool_migrate.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+
+import sys
+import re
+import time
+
+from XmTestLib import *
+from pools import *
+
+
+
+#
+# Check requirements of test case
+# - min 2 free cpus (not assigned to a pool)
+#
+if int(getInfo("free_cpus")) < 2:
+ SKIP("Need at least 2 free cpus")
+
+
+
+#
+# Create 2 pools with one cpu per pool.
+#
+createStdPool({'name' : 'Pool-1'})
+createStdPool({'name' : 'Pool-2'})
+
+
+
+#
+# Create a domain with vcpus=1 in Pool-0.
+# Migrate it to one of the created pools afterwards to the other pool
+#
+name = "TestDomPool-1"
+domain = XmTestDomain(extraConfig={'pool' : 'Pool-0'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+if not domInPool(name, 'Pool-0'):
+ FAIL("missing '%s' in Pool-0" % name)
+
+if not migrateToPool(name, 'Pool-1'):
+ FAIL("missing '%s' in Pool-1" % name)
+if not migrateToPool(name, 'Pool-2'):
+ FAIL("missing '%s' in Pool-2" % name)
+
+
+
+#
+# Create a domain in Pool-0.
+# Migrate it to one of the created pools afterwards to the other pool
+#
+name = "TestDomPool-2"
+domain = XmTestDomain(extraConfig={'pool' : 'Pool-0'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+if not domInPool(name, 'Pool-0'):
+ FAIL("missing '%s' in Pool-0" % name)
+
+if not migrateToPool(name, 'Pool-1'):
+ FAIL("missing '%s' in Pool-1" % name)
+if not migrateToPool(name, 'Pool-2'):
+ FAIL("missing '%s' in Pool-2" % name)
+
+
+
+#
+# Migrate other domains between pools
+#
+for cnt in range(10):
+ for pool in ['Pool-0', 'Pool-1', 'Pool-2']:
+ for domain in getRunningDomains():
+ if domain != 'Domain-0':
+ if not migrateToPool(domain, pool):
+ FAIL("missing '%s' in %s" % (domain, pool))
+
+
+#
+# Cleanup
+#
+cleanupPoolsDomains()
+
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/Makefile.am
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/Makefile.am
@@ -0,0 +1,22 @@
+SUBDIRS =
+
+TESTS = 01_cpupool_basic_pos.test \
+ 02_cpupool_manage_pos.test \
+ 03_cpupool_domain.test \
+ 04_cpupool_migrate.test
+
+EXTRA_DIST = $(TESTS)
+
+TESTS_ENVIRONMENT=@TENV@
+
+%.test: %.py
+ cp $< $@
+ chmod +x $@
+
+clean-local: am_config_clean-local
+
+am_config_clean-local:
+ rm -f *test
+ rm -f *log
+ rm -f *~
+
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/pool1.cfg
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/pool1.cfg
@@ -0,0 +1 @@
+name="Pool-1"
Index: xen-4.0.0-testing/tools/xm-test/tests/cpupool/pools.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/cpupool/pools.py
@@ -0,0 +1,78 @@
+#!/usr/bin/python
+
+
+from XmTestLib import *
+
+def checkRequirements():
+ # - min 4 cpus
+ # - only Pool-0 defined
+ nr_cpus = int(getInfo("nr_cpus"))
+ if nr_cpus < 4:
+ SKIP("Need at least 4 cpus for pool tests")
+ if len(getPoolList()) > 1:
+ SKIP("More than one pool already defined")
+
+ # reduce Pool-0 to CPU-0
+ traceCommand("xm pool-cpu-add Pool-0 0")
+ for i in range(1, nr_cpus):
+ traceCommand("xm pool-cpu-remove Pool-0 %s" % i)
+
+def createStdPool(add_param=None):
+ cmd = "xm pool-create pool1.cfg "
+ if add_param:
+ for k,v in add_param.items():
+ cmd += "%s=%s " % (k,v)
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+
+def deletePool(name):
+ cmd = "xm pool-delete %s" % name
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+
+def destroyPool(name, delete_on_xenapi=False):
+ cmd = "xm pool-destroy %s" % name
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ if os.getenv("XM_USES_API") and delete_on_xenapi:
+ deletePool(name)
+
+def getPoolList():
+ status, output = traceCommand("xm pool-list")
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ lines = output.splitlines()
+ pools = []
+ for l in lines[1:]:
+ elms = l.split(" ", 1)
+ pools.append(elms[0]);
+ return pools
+
+def domInPool(dom, pool):
+ cmd = "xm list --pool=%s" % pool
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ return re.search(dom, output) != None
+
+def migrateToPool(dom, pool):
+ status, output = traceCommand("xm pool-migrate %s %s" % (dom, pool))
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ return domInPool(dom, pool)
+
+def cleanupPoolsDomains():
+ destroyAllDomUs()
+ for pool in getPoolList():
+ if pool != 'Pool-0':
+ destroyPool(pool, True)
+
+def waitForDomain(name):
+ for i in range(10):
+ if not isDomainRunning(name):
+ break
+ time.sleep(1)
+
Index: xen-4.0.0-testing/tools/xm-test/tests/xapi/20_xapi-cpu_pool_basic.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/xm-test/tests/xapi/20_xapi-cpu_pool_basic.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+
+
+# Basic Pool creation tests
+
+from XmTestLib import xapi
+from XmTestLib import *
+
+
+session = xapi.connect()
+host_ref = session.xenapi.host.get_all()[0]
+pools = session.xenapi.host.get_resident_cpu_pools(host_ref)
+if len(pools) != 1:
+ SKIP("Only Pool-0 have to be created for this test")
+
+
+# check extension of host object
+host_recs = session.xenapi.host.get_all_records()
+host_rec = host_recs[host_recs.keys()[0]]
+if len(host_recs.keys()) != 1 or not host_rec.has_key("resident_cpu_pools") or \
+ len(host_rec["resident_cpu_pools"]) != 1:
+ FAIL("Missing or wrong field 'resident_cpu_pools' in host record")
+
+
+# check extension of host_cpu object
+host_cpu_recs = session.xenapi.host_cpu.get_all_records()
+assigned_cpus = [ cpu for cpu in host_cpu_recs.values() if len(cpu["cpu_pool"]) > 0 ]
+unassigned_cpus = session.xenapi.host_cpu.get_unassigned_cpus()
+if len(host_cpu_recs) - len(assigned_cpus) != len(unassigned_cpus):
+ FAIL("Wrong host_cpu count values; CPUS total: %d, CPUS ass: %d, CPUS unass: %d" % (
+ len(host_cpu_recs), len(assigned_cpus), len(unassigned_cpus)))
+
+for cpu_rec in host_cpu_recs.values():
+ cpu_pool = session.xenapi.host_cpu.get_cpu_pool(cpu_rec['uuid'])
+ if cpu_pool != cpu_rec['cpu_pool']:
+ FAIL("Inconsistency of cpu_pool ref between host_cpu record (%s) "
+ "and get_cpu_pool (%s)" % (cpu_rec['cpu_pool'], cpu_pool))
+
+
+# create / modify / remove managed cpu pools
+pool1_cfg = { 'name_label' : 'Pool-1',
+ 'name_description' : 'new pool',
+ 'auto_power_on' : False,
+ 'ncpu' : '3',
+ 'sched_policy' : 'credit',
+ 'proposed_CPUs' : ['1','2'],
+ 'other_config' : { 'xmtest' : True },
+ }
+pool1 = session.xenapi.cpu_pool.create(pool1_cfg)
+pool1_rec = session.xenapi.cpu_pool.get_record(pool1)
+for k in pool1_cfg.keys():
+ if pool1_rec[k] != pool1_cfg[k]:
+ FAIL("Create error Pool-1 (create config %s, current config: %s, key: %s)" % (
+ pool1_cfg, pool1_rec, k))
+
+pool_all = session.xenapi.cpu_pool.get_all()
+if len(pool_all) != 2:
+ FAIL("cpu_pool.get_all() returns '%d', expected '2'" % len(pool_all))
+
+pool_all = session.xenapi.cpu_pool.get_all_records()
+if len(pool_all) != 2:
+ FAIL("cpu_pool.get_all_records() returns '%d', expected '2'" % len(pool_all))
+
+if pool1 != session.xenapi.cpu_pool.get_by_name_label(pool1_cfg['name_label'])[0]:
+ FAIL("cpu_pool.get_by_name_label() returns wrong value")
+
+if pool1 != session.xenapi.cpu_pool.get_by_uuid(pool1):
+ FAIL("cpu_pool.get_by_uuid() returns wrong value")
+
+if session.xenapi.cpu_pool.get_activated(pool1):
+ FAIL("cpu_pool.get_activated() returns 'true' instead of 'false'")
+
+if pool1_cfg['auto_power_on'] != session.xenapi.cpu_pool.get_auto_power_on(pool1):
+ FAIL("cpu_pool.get_auto_power_on() returns wrong value")
+
+if len(session.xenapi.cpu_pool.get_host_CPUs(pool1)) != 0:
+ FAIL("cpu_pool.get_host_CPUs has to return an empty list")
+
+if pool1_cfg['name_label'] != session.xenapi.cpu_pool.get_name_label(pool1):
+ FAIL("cpu_pool.get_name_label() returns wrong value")
+
+if pool1_cfg['name_description'] != session.xenapi.cpu_pool.get_name_description(pool1):
+ FAIL("cpu_pool.get_name_description() returns wrong value")
+
+if pool1_cfg['ncpu'] != session.xenapi.cpu_pool.get_ncpu(pool1):
+ FAIL("cpu_pool.get_ncpu() returns wrong value")
+
+cfg_len = len(pool1_cfg['proposed_CPUs'])
+api_len = len(session.xenapi.cpu_pool.get_proposed_CPUs(pool1))
+if cfg_len != api_len:
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value; cfg_cnt: %s, api_cnt:%s" % (cfg_len, api_len))
+
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if pool1_cfg['other_config']['xmtest'] != other_config.get('xmtest'):
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+if session.xenapi.cpu_pool.get_resident_on(pool1) != session.xenapi.host.get_all()[0]:
+ FAIL("cpu_pool.get_resident_on() returns wrong value")
+
+if pool1_cfg['sched_policy'] != session.xenapi.cpu_pool.get_sched_policy(pool1):
+ FAIL("cpu_pool.get_sched_policy() returns wrong value")
+
+if len(session.xenapi.cpu_pool.get_started_VMs(pool1)) != 0:
+ FAIL("cpu_pool.get_started_VMs() returns wrong value")
+
+if pool1 != session.xenapi.cpu_pool.get_uuid(pool1):
+ FAIL("cpu_pool.get_uuid() returns wrong value")
+
+session.xenapi.cpu_pool.set_auto_power_on(pool1, True)
+if not session.xenapi.cpu_pool.get_auto_power_on(pool1):
+ FAIL("cpu_pool.get_auto_power_on() returns wrong value")
+
+session.xenapi.cpu_pool.set_proposed_CPUs(pool1, [4])
+if '4' not in session.xenapi.cpu_pool.get_proposed_CPUs(pool1):
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value; (set_proposed_CPUs)")
+
+session.xenapi.cpu_pool.add_to_proposed_CPUs(pool1, 5)
+val = session.xenapi.cpu_pool.get_proposed_CPUs(pool1)
+if '5' not in val:
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value; %s not in %s" % ('5',val))
+
+session.xenapi.cpu_pool.remove_from_proposed_CPUs(pool1, 5)
+val = session.xenapi.cpu_pool.get_proposed_CPUs(pool1)
+if '5' in val:
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value; %s in %s" % ('5',val))
+
+session.xenapi.cpu_pool.set_name_label(pool1, 'New-Pool-1')
+if 'New-Pool-1' != session.xenapi.cpu_pool.get_name_label(pool1):
+ FAIL("cpu_pool.get_name_label() returns wrong value")
+
+session.xenapi.cpu_pool.set_ncpu(pool1, 4)
+if '4' != session.xenapi.cpu_pool.get_ncpu(pool1):
+ FAIL("cpu_pool.get_ncpu() returns wrong value")
+
+session.xenapi.cpu_pool.set_other_config(pool1, {'test' : 'ok'})
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if other_config.get('test') != 'ok':
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+session.xenapi.cpu_pool.add_to_other_config(pool1, 'new_entry', 'added')
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if other_config.get('new_entry') != 'added':
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+session.xenapi.cpu_pool.remove_from_other_config(pool1, 'new_entry')
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if other_config.get('new_entry') != None:
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+session.xenapi.cpu_pool.set_sched_policy(pool1, 'credit')
+if 'credit' != session.xenapi.cpu_pool.get_sched_policy(pool1):
+ FAIL("cpu_pool.get_sched_policy() returns wrong value")
+
+session.xenapi.cpu_pool.destroy(pool1)
+if pool1 in session.xenapi.cpu_pool.get_all():
+ FAIL("cpu_pool.destroy() has not removed pool")
+
Index: xen-4.0.0-testing/tools/xm-test/tests/xapi/Makefile.am
===================================================================
--- xen-4.0.0-testing.orig/tools/xm-test/tests/xapi/Makefile.am
+++ xen-4.0.0-testing/tools/xm-test/tests/xapi/Makefile.am
@@ -3,7 +3,8 @@ SUBDIRS =
TESTS = 01_xapi-vm_basic.test \
02_xapi-vbd_basic.test \
03_xapi-network_pos.test \
- 04_xapi-data_uri_handling.test
+ 04_xapi-data_uri_handling.test \
+ 20_xapi-cpu_pool_basic.test
XFAIL_TESTS =

127
cpupools-core-fixup.patch Normal file
View File

@ -0,0 +1,127 @@
- fix tasklet_schedule_cpu() when invoked from the tasklet's handler
- properly balance (un-)pausing in continue_hypercall_on_cpu() code
paths
- bump domctl interface version (due to the addition of the "cpupool"
member to struct xen_domctl_getdomaininfo)
- move and rename csched_priv to make sure eventual backported
upstream patches using the variable get correctly adjusted (i.e.
missing adjustments get detected at build time)
Index: xen-4.0.0-testing/xen/arch/x86/domain.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/domain.c
+++ xen-4.0.0-testing/xen/arch/x86/domain.c
@@ -1573,6 +1573,7 @@ int continue_hypercall_on_cpu(int cpu, v
v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
v->arch.continue_info = info;
+ vcpu_pause_nosync(v);
}
else
{
@@ -1583,7 +1584,6 @@ int continue_hypercall_on_cpu(int cpu, v
info->func = func;
info->data = data;
- vcpu_pause_nosync(v);
tasklet_schedule_cpu(&info->tasklet, cpu);
raise_softirq(SCHEDULE_SOFTIRQ);
Index: xen-4.0.0-testing/xen/common/sched_credit.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/sched_credit.c
+++ xen-4.0.0-testing/xen/common/sched_credit.c
@@ -176,7 +176,6 @@ struct csched_private {
/*
* Global variables
*/
-static struct csched_private csched_priv;
static struct csched_private *csched_priv0 = NULL;
static void csched_tick(void *_cpu);
@@ -1524,11 +1523,13 @@ static void csched_tick_resume(struct sc
}
}
+static struct csched_private _csched_priv;
+
struct scheduler sched_credit_def = {
.name = "SMP Credit Scheduler",
.opt_name = "credit",
.sched_id = XEN_SCHEDULER_CREDIT,
- .sched_data = &csched_priv,
+ .sched_data = &_csched_priv,
.init_domain = csched_dom_init,
.destroy_domain = csched_dom_destroy,
Index: xen-4.0.0-testing/xen/common/softirq.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/softirq.c
+++ xen-4.0.0-testing/xen/common/softirq.c
@@ -104,12 +104,15 @@ static void tasklet_schedule_list(struct
{
BUG_ON(!list_empty(&t->list));
list_add_tail(&t->list, tlist);
+ t->scheduled_on = NR_CPUS;
}
t->is_scheduled = 1;
if ( cpu == smp_processor_id() )
raise_softirq(TASKLET_SOFTIRQ);
- else
+ else if ( !t->is_running )
cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ else
+ t->scheduled_on = cpu;
}
spin_unlock_irqrestore(&tasklet_lock, flags);
@@ -156,7 +159,15 @@ static void tasklet_action(void)
if ( t->is_scheduled )
{
BUG_ON(t->is_dead || !list_empty(&t->list));
- list_add_tail(&t->list, tlist);
+ if ( t->scheduled_on >= NR_CPUS )
+ list_add_tail(&t->list, tlist);
+ else
+ {
+ unsigned int cpu = t->scheduled_on;
+
+ list_add_tail(&t->list, &per_cpu(tasklet_list_pcpu, cpu));
+ cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ }
}
/*
Index: xen-4.0.0-testing/xen/include/public/domctl.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/public/domctl.h
+++ xen-4.0.0-testing/xen/include/public/domctl.h
@@ -35,7 +35,7 @@
#include "xen.h"
#include "grant_table.h"
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000006
+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000007
struct xenctl_cpumap {
XEN_GUEST_HANDLE_64(uint8) bitmap;
Index: xen-4.0.0-testing/xen/include/xen/softirq.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/xen/softirq.h
+++ xen-4.0.0-testing/xen/include/xen/softirq.h
@@ -50,12 +50,14 @@ struct tasklet
bool_t is_scheduled;
bool_t is_running;
bool_t is_dead;
+ unsigned int scheduled_on;
void (*func)(unsigned long);
unsigned long data;
};
#define DECLARE_TASKLET(name, func, data) \
- struct tasklet name = { LIST_HEAD_INIT(name.list), 0, 0, 0, func, data }
+ struct tasklet name = { LIST_HEAD_INIT(name.list), 0, 0, 0, NR_CPUS, \
+ func, data }
void tasklet_schedule(struct tasklet *t);
void tasklet_schedule_cpu(struct tasklet *t, int cpu);

3267
cpupools-core.patch Normal file

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.
===================================================================
--- xen-4.0.0-testing.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
+++ xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
@@ -363,6 +363,11 @@ static int __devinit platform_pci_init(s
@@ -401,6 +401,11 @@ static int __devinit platform_pci_init(s
platform_mmio = mmio_addr;
platform_mmiolen = mmio_len;

View File

@ -235,6 +235,13 @@ class Wholedisk:
os.system("lvchange -ay '%s' > /dev/null 2>&1" % (self.vdev))
self.mapped += 1
def partitionsdeactivated(self):
"Return True if partition mappings have been removed, False otherwise"
for part in self.partitions:
if os.access(part.pdev, os.F_OK):
return False
return True
def deactivatepartitions(self):
"""Remove device-mapper mappings and loop mapping.
@ -246,9 +253,14 @@ class Wholedisk:
self.mapped -= 1
if not self.mapped:
if self.pcount:
retries = 10
while retries and not self.partitionsdeactivated():
verbose_print("kpartx %s -d '%s'" % (kpartx_args, self.physdev()))
fd = os.popen("kpartx %s -d '%s'" % (kpartx_args, self.physdev()))
fd.close()
os.system("kpartx %s -d '%s'" % (kpartx_args, self.physdev()))
time.sleep(0.1)
retries -= 1
if retries == 0:
error("unable to remove partition mappings with kpartx -d")
if self.pcount and self.lvm:
verbose_print("lvchange -an '%s'" % (self.vdev))
ret = os.system("lvchange -an '%s' > /dev/null 2>&1" % (self.vdev)) >> 8

View File

@ -1,5 +1,7 @@
--- 2010-01-06.orig/xen/arch/ia64/linux-xen/smp.c 2009-05-27 13:54:05.000000000 +0200
+++ 2010-01-06/xen/arch/ia64/linux-xen/smp.c 2010-01-06 11:22:12.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/ia64/linux-xen/smp.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/ia64/linux-xen/smp.c
+++ xen-4.0.0-testing/xen/arch/ia64/linux-xen/smp.c
@@ -189,7 +189,7 @@ handle_IPI (int irq, void *dev_id, struc
* At this point the structure may be gone unless
* wait is true.
@ -9,8 +11,10 @@
/* Notify the sending CPU that the task is done. */
mb();
--- 2010-01-06.orig/xen/arch/x86/smp.c 2009-08-19 17:01:49.000000000 +0200
+++ 2010-01-06/xen/arch/x86/smp.c 2010-01-06 11:22:12.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/smp.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/smp.c
+++ xen-4.0.0-testing/xen/arch/x86/smp.c
@@ -394,7 +394,7 @@ static void __smp_call_function_interrup
if ( call_data.wait )
@ -29,9 +33,11 @@
}
irq_exit();
--- 2010-01-06.orig/xen/common/keyhandler.c 2009-12-16 09:14:13.000000000 +0100
+++ 2010-01-06/xen/common/keyhandler.c 2010-01-06 11:22:12.000000000 +0100
@@ -72,14 +72,25 @@ static struct keyhandler show_handlers_k
Index: xen-4.0.0-testing/xen/common/keyhandler.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/keyhandler.c
+++ xen-4.0.0-testing/xen/common/keyhandler.c
@@ -71,14 +71,25 @@ static struct keyhandler show_handlers_k
.desc = "show this message"
};
@ -61,7 +67,7 @@
}
static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
@@ -93,14 +104,12 @@ static void dump_registers(unsigned char
@@ -92,14 +103,12 @@ static void dump_registers(unsigned char
printk("'%c' pressed -> dumping registers\n", key);
/* Get local execution state out immediately, in case we get stuck. */
@ -77,8 +83,10 @@
on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1);
}
--- 2010-01-06.orig/xen/include/asm-ia64/linux-xen/asm/ptrace.h 2009-05-20 08:46:00.000000000 +0200
+++ 2010-01-06/xen/include/asm-ia64/linux-xen/asm/ptrace.h 2010-01-06 11:22:12.000000000 +0100
Index: xen-4.0.0-testing/xen/include/asm-ia64/linux-xen/asm/ptrace.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/asm-ia64/linux-xen/asm/ptrace.h
+++ xen-4.0.0-testing/xen/include/asm-ia64/linux-xen/asm/ptrace.h
@@ -278,7 +278,7 @@ struct switch_stack {
# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)

28
hibernate.patch Normal file
View File

@ -0,0 +1,28 @@
Index: xen-4.0.0-testing/tools/firmware/hvmloader/acpi/dsdt.asl
===================================================================
--- xen-4.0.0-testing.orig/tools/firmware/hvmloader/acpi/dsdt.asl
+++ xen-4.0.0-testing/tools/firmware/hvmloader/acpi/dsdt.asl
@@ -30,21 +30,9 @@ DefinitionBlock ("DSDT.aml", "DSDT", 2,
/*
* S3 (suspend-to-ram), S4 (suspend-to-disc) and S5 (power-off) type codes:
* must match piix4 emulation.
+ * Turn off support for s3 and s4 sleep states to deal with SVVP tests.
+ * This is what MSFT does on HyperV.
*/
- Name (\_S3, Package (0x04)
- {
- 0x05, /* PM1a_CNT.SLP_TYP */
- 0x05, /* PM1b_CNT.SLP_TYP */
- 0x0, /* reserved */
- 0x0 /* reserved */
- })
- Name (\_S4, Package (0x04)
- {
- 0x06, /* PM1a_CNT.SLP_TYP */
- 0x06, /* PM1b_CNT.SLP_TYP */
- 0x00, /* reserved */
- 0x00 /* reserved */
- })
Name (\_S5, Package (0x04)
{
0x07, /* PM1a_CNT.SLP_TYP */

View File

@ -2,28 +2,29 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c
+++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
@@ -914,15 +914,15 @@ static PyObject *pyxc_hvm_build(XcObject
int i;
@@ -917,16 +917,16 @@ static PyObject *pyxc_hvm_build(XcObject
#endif
int i;
char *image;
- int memsize, target=-1, vcpus = 1, acpi = 0, apic = 1;
+ int memsize, target=-1, vcpus = 1, acpi = 0, apic = 1, extid = 0;
uint64_t vcpu_avail = 1;
PyObject *vcpu_avail_handle = NULL;
uint8_t vcpu_avail[(HVM_MAX_VCPUS + 7)/8];
static char *kwd_list[] = { "domid",
- "memsize", "image", "target", "vcpus",
- "vcpu_avail", "acpi", "apic", NULL };
- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iis|iilii", kwd_list,
- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iis|iiOii", kwd_list,
+ "memsize", "image", "target", "vcpus",
+ "vcpu_avail", "extid", "acpi", "apic", NULL };
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iis|iiliii", kwd_list,
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iis|iiOiii", kwd_list,
&dom, &memsize, &image, &target, &vcpus,
- &vcpu_avail, &acpi, &apic) )
+ &vcpu_avail, &extid, &acpi, &apic) )
- &vcpu_avail_handle, &acpi, &apic) )
+ &vcpu_avail_handle, &extid, &acpi, &apic) )
return NULL;
if ( target == -1 )
@@ -950,6 +950,7 @@ static PyObject *pyxc_hvm_build(XcObject
memset(vcpu_avail, 0, sizeof(vcpu_avail));
@@ -978,6 +978,7 @@ static PyObject *pyxc_hvm_build(XcObject
va_hvm->checksum -= sum;
munmap(va_map, XC_PAGE_SIZE);
#endif
@ -35,7 +36,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
@@ -150,6 +150,7 @@ XENAPI_PLATFORM_CFG_TYPES = {
@@ -151,6 +151,7 @@ XENAPI_PLATFORM_CFG_TYPES = {
'nographic': int,
'nomigrate': int,
'pae' : int,
@ -47,7 +48,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/image.py
+++ xen-4.0.0-testing/tools/python/xen/xend/image.py
@@ -841,6 +841,7 @@ class HVMImageHandler(ImageHandler):
@@ -839,6 +839,7 @@ class HVMImageHandler(ImageHandler):
self.apic = int(vmConfig['platform'].get('apic', 0))
self.acpi = int(vmConfig['platform'].get('acpi', 0))
@ -55,7 +56,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py
self.guest_os_type = vmConfig['platform'].get('guest_os_type')
self.memory_sharing = int(vmConfig['memory_sharing'])
xc.dom_set_memshr(self.vm.getDomid(), self.memory_sharing)
@@ -966,6 +967,7 @@ class HVMImageHandler(ImageHandler):
@@ -964,6 +965,7 @@ class HVMImageHandler(ImageHandler):
log.debug("target = %d", mem_mb)
log.debug("vcpus = %d", self.vm.getVCpuCount())
log.debug("vcpu_avail = %li", self.vm.getVCpuAvail())
@ -63,7 +64,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py
log.debug("acpi = %d", self.acpi)
log.debug("apic = %d", self.apic)
@@ -975,6 +977,7 @@ class HVMImageHandler(ImageHandler):
@@ -973,6 +975,7 @@ class HVMImageHandler(ImageHandler):
target = mem_mb,
vcpus = self.vm.getVCpuCount(),
vcpu_avail = self.vm.getVCpuAvail(),
@ -86,7 +87,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
gopts.var('acpi', val='ACPI',
fn=set_int, default=1,
use="Disable or enable ACPI of HVM domain.")
@@ -1077,7 +1081,7 @@ def configure_hvm(config_image, vals):
@@ -1081,7 +1085,7 @@ def configure_hvm(config_image, vals):
'timer_mode',
'usb', 'usbdevice',
'vcpus', 'vnc', 'vncconsole', 'vncdisplay', 'vnclisten',

View File

@ -151,14 +151,14 @@ Index: xen-4.0.0-testing/xen/include/public/arch-x86/hvm/save.h
+ uint32_t long_mode;
+ uint32_t ext_id;
+};
+DECLARE_HVM_SAVE_TYPE(HYPERV_DOM, 15, struct hvm_hyperv_dom);
+DECLARE_HVM_SAVE_TYPE(HYPERV_DOM, 16, struct hvm_hyperv_dom);
+
+struct hvm_hyperv_cpu {
+ uint64_t control_msr;
+ uint64_t version_msr;
+ uint64_t pad[27]; //KYS: sles10 sp2 compatibility
+};
+DECLARE_HVM_SAVE_TYPE(HYPERV_CPU, 16, struct hvm_hyperv_cpu);
+DECLARE_HVM_SAVE_TYPE(HYPERV_CPU, 17, struct hvm_hyperv_cpu);
+
/*
* Largest type-code in use

View File

@ -1,8 +1,5 @@
%patch
Index: xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm_extensions.h
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm_extensions.h
+++ b/xen/include/asm-x86/hvm/hvm_extensions.h
@@ -0,0 +1,183 @@
+/****************************************************************************
+ |
@ -187,17 +184,13 @@ Index: xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm_extensions.h
+int hyperx_initialize(struct domain *d);
+
+#endif
Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/Makefile
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/Makefile
+++ b/xen/arch/x86/hvm/hyperv/Makefile
@@ -0,0 +1,2 @@
+obj-y += hv_intercept.o
+obj-y += hv_hypercall.o
Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_errno.h
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_errno.h
+++ b/xen/arch/x86/hvm/hyperv/hv_errno.h
@@ -0,0 +1,62 @@
+/****************************************************************************
+ |
@ -261,10 +254,8 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_errno.h
+#define HV_STATUS_NO_MEMORY_256PAGES 0x0103
+#define HV_STATUS_NO_MEMORY_1024PAGES 0x0104
+#endif
Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c
+++ b/xen/arch/x86/hvm/hyperv/hv_hypercall.c
@@ -0,0 +1,153 @@
+/****************************************************************************
+ |
@ -419,10 +410,8 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c
+ return;
+ }
+}
Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h
+++ b/xen/arch/x86/hvm/hyperv/hv_hypercall.h
@@ -0,0 +1,46 @@
+/****************************************************************************
+ |
@ -470,10 +459,8 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h
+#define HV_NOTIFY_LONG_SPIN_WAIT 0x0008
+
+#endif /* HV_HYPERCALL_H */
Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
+++ b/xen/arch/x86/hvm/hyperv/hv_intercept.c
@@ -0,0 +1,1009 @@
+/****************************************************************************
+ |
@ -647,7 +634,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
+ printk("Hyperv dom create failed\n");
+ return (1);
+ }
+ for (i=0; i < MAX_VIRT_CPUS; i++)
+ for (i = 0; i < d->max_vcpus; i++)
+ {
+ if (d->vcpu[i] != NULL)
+ {
@ -723,7 +710,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
+static inline u32
+hv_get_max_vcpus_supported(void)
+{
+ return (MAX_VIRT_CPUS);
+ return HVM_MAX_VCPUS;
+}
+
+
@ -929,7 +916,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
+ ASSERT(curp != NULL);
+#ifdef HV_STATS
+ printk("DUMP STATS\n");
+ for (i=0; i < MAX_VIRT_CPUS; i++)
+ for (i = 0; i < d->max_vcpus; i++)
+ if (d->vcpu[i] != NULL)
+ hv_print_stats(curp, i);
+#endif
@ -1484,11 +1471,9 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
+ hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ return (1);
+}
Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_shim.h
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_shim.h
@@ -0,0 +1,285 @@
+++ b/xen/arch/x86/hvm/hyperv/hv_shim.h
@@ -0,0 +1,286 @@
+/****************************************************************************
+ |
+ | Copyright (c) [2007, 2008] Novell, Inc.
@ -1528,6 +1513,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_shim.h
+#include <asm/domain.h>
+#include <asm/shadow.h>
+#include <public/xen.h>
+#include <public/hvm/hvm_info_table.h>
+
+#include "hv_hypercall.h"
+
@ -1704,7 +1690,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_shim.h
+ * Each VCPU here corresponds to the vcpu in the underlying hypervisor;
+ * they share the same ID.
+ */
+ hv_vcpu_t vcpu_state[MAX_VIRT_CPUS];
+ hv_vcpu_t vcpu_state[HVM_MAX_VCPUS];
+} hv_partition_t;
+
+

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
@@ -247,8 +247,11 @@ static int open_disk(struct td_state *s,
@@ -249,8 +249,11 @@ static int open_disk(struct td_state *s,
drv = blktap_drivers[i].drv;
DPRINTF("%s driver specified\n", drv ? drv->format_name : "No");

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
@@ -360,6 +360,15 @@ static void qemu_send_responses(void* op
@@ -362,6 +362,15 @@ static void qemu_send_responses(void* op
}
/**
@ -18,7 +18,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
* Callback function for the IO message pipe. Reads requests from the ring
* and processes them (call qemu read/write functions).
*
@@ -378,6 +387,7 @@ static void handle_blktap_iomsg(void* pr
@@ -380,6 +389,7 @@ static void handle_blktap_iomsg(void* pr
blkif_t *blkif = s->blkif;
tapdev_info_t *info = s->ring_info;
int page_size = getpagesize();
@ -26,7 +26,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
struct aiocb_info *aiocb_info;
@@ -410,7 +420,7 @@ static void handle_blktap_iomsg(void* pr
@@ -412,7 +422,7 @@ static void handle_blktap_iomsg(void* pr
/* Don't allow writes on readonly devices */
if ((s->flags & TD_RDONLY) &&
@ -35,7 +35,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
blkif->pending_list[idx].status = BLKIF_RSP_ERROR;
goto send_response;
}
@@ -431,7 +441,7 @@ static void handle_blktap_iomsg(void* pr
@@ -433,7 +443,7 @@ static void handle_blktap_iomsg(void* pr
DPRINTF("Sector request failed:\n");
DPRINTF("%s request, idx [%d,%d] size [%llu], "
"sector [%llu,%llu]\n",
@ -44,7 +44,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
"WRITE" : "READ"),
idx,i,
(long long unsigned)
@@ -444,8 +454,14 @@ static void handle_blktap_iomsg(void* pr
@@ -446,8 +456,14 @@ static void handle_blktap_iomsg(void* pr
blkif->pending_list[idx].secs_pending += nsects;
@ -60,7 +60,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
case BLKIF_OP_WRITE:
aiocb_info = malloc(sizeof(*aiocb_info));
@@ -465,6 +481,10 @@ static void handle_blktap_iomsg(void* pr
@@ -467,6 +483,10 @@ static void handle_blktap_iomsg(void* pr
DPRINTF("ERROR: bdrv_write() == NULL\n");
goto send_response;
}

View File

@ -19,7 +19,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
@@ -218,9 +218,10 @@ static int map_new_dev(struct td_state *
@@ -220,9 +220,10 @@ static int map_new_dev(struct td_state *
return -1;
}
@ -31,7 +31,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
char* devname;
static int devnumber = 0;
int i;
@@ -230,7 +231,22 @@ static int open_disk(struct td_state *s,
@@ -232,7 +233,22 @@ static int open_disk(struct td_state *s,
bs = bdrv_new(devname);
free(devname);
@ -55,7 +55,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
fprintf(stderr, "Could not open image file %s\n", path);
return -ENOMEM;
}
@@ -521,7 +537,7 @@ static void handle_blktap_ctrlmsg(void*
@@ -527,7 +543,7 @@ static void handle_blktap_ctrlmsg(void*
s = state_init();
/*Open file*/

View File

@ -16,7 +16,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
@@ -256,6 +256,12 @@ static int open_disk(struct td_state *s,
@@ -258,6 +258,12 @@ static int open_disk(struct td_state *s,
s->size = bs->total_sectors;
s->sector_size = 512;

80
ioemu-disable-scsi.patch Normal file
View File

@ -0,0 +1,80 @@
Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_platform.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_platform.c 2010-02-18 14:08:08.000000000 -0700
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_platform.c 2010-02-18 14:28:59.000000000 -0700
@@ -359,6 +359,8 @@ static void platform_ioport_write(void *
case 4:
fprintf(logfile, "Disconnect IDE hard disk...\n");
ide_unplug_harddisks();
+ fprintf(logfile, "Disconnect SCSI hard disk...\n");
+ pci_unplug_scsi();
fprintf(logfile, "Disconnect netifs...\n");
pci_unplug_netifs();
fprintf(logfile, "Shutdown taps...\n");
Index: xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/qemu-xen.h 2010-02-18 14:08:08.000000000 -0700
+++ xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h 2010-02-18 14:44:28.000000000 -0700
@@ -57,6 +57,7 @@ void unset_vram_mapping(void *opaque);
#endif
void pci_unplug_netifs(void);
+void pci_unplug_scsi(void);
void destroy_hvm_domain(void);
void unregister_iomem(target_phys_addr_t start);
Index: xen-4.0.0-testing/tools/ioemu-remote/hw/pci.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/pci.c 2010-02-18 14:49:36.000000000 -0700
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/pci.c 2010-02-18 14:53:47.000000000 -0700
@@ -892,6 +892,50 @@ void pci_unplug_netifs(void)
}
}
+void pci_unplug_scsi(void)
+{
+ PCIBus *bus;
+ PCIDevice *dev;
+ PCIIORegion *region;
+ int x;
+ int i;
+
+ /* We only support one PCI bus */
+ for (bus = first_bus; bus; bus = NULL) {
+ for (x = 0; x < 256; x++) {
+ dev = bus->devices[x];
+ if (dev &&
+ dev->config[0xa] == 0 &&
+ dev->config[0xb] == 1
+#ifdef CONFIG_PASSTHROUGH
+ && test_pci_devfn(x) != 1
+#endif
+ ) {
+ /* Found a scsi disk. Remove it from the bus. Note that
+ we don't free it here, since there could still be
+ references to it floating around. There are only
+ ever one or two structures leaked, and it's not
+ worth finding them all. */
+ bus->devices[x] = NULL;
+ for (i = 0; i < PCI_NUM_REGIONS; i++) {
+ region = &dev->io_regions[i];
+ if (region->addr == (uint32_t)-1 ||
+ region->size == 0)
+ continue;
+ fprintf(logfile, "region type %d at [%x,%x).\n",
+ region->type, region->addr,
+ region->addr+region->size);
+ if (region->type == PCI_ADDRESS_SPACE_IO) {
+ isa_unassign_ioport(region->addr, region->size);
+ } else if (region->type == PCI_ADDRESS_SPACE_MEM) {
+ unregister_iomem(region->addr);
+ }
+ }
+ }
+ }
+ }
+}
+
typedef struct {
PCIDevice dev;
PCIBus *bus;

View File

@ -0,0 +1,86 @@
Index: xen-4.0.0-testing/xen/common/keyhandler.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/keyhandler.c
+++ xen-4.0.0-testing/xen/common/keyhandler.c
@@ -19,6 +19,7 @@
static struct keyhandler *key_table[256];
static unsigned char keypress_key;
+static bool_t alt_key_handling;
char keyhandler_scratch[1024];
@@ -115,6 +116,26 @@ static struct keyhandler dump_registers_
.desc = "dump registers"
};
+static DECLARE_TASKLET(dump_dom0_tasklet, NULL, 0);
+
+static void dump_dom0_action(unsigned long arg)
+{
+ struct vcpu *v = (void *)arg;
+
+ for ( ; ; ) {
+ vcpu_show_execution_state(v);
+ v = v->next_in_list;
+ if ( !v )
+ break;
+ if ( softirq_pending(smp_processor_id()) )
+ {
+ dump_dom0_tasklet.data = (unsigned long)v;
+ tasklet_schedule_cpu(&dump_dom0_tasklet, v->processor);
+ break;
+ }
+ }
+}
+
static void dump_dom0_registers(unsigned char key)
{
struct vcpu *v;
@@ -125,7 +146,17 @@ static void dump_dom0_registers(unsigned
printk("'%c' pressed -> dumping Dom0's registers\n", key);
for_each_vcpu ( dom0, v )
+ {
+ if ( alt_key_handling && softirq_pending(smp_processor_id()) )
+ {
+ tasklet_kill(&dump_dom0_tasklet);
+ tasklet_init(&dump_dom0_tasklet, dump_dom0_action,
+ (unsigned long)v);
+ tasklet_schedule_cpu(&dump_dom0_tasklet, v->processor);
+ return;
+ }
vcpu_show_execution_state(v);
+ }
}
static struct keyhandler dump_dom0_registers_keyhandler = {
@@ -425,8 +456,28 @@ static struct keyhandler do_debug_key_ke
.desc = "trap to xendbg"
};
+static void do_toggle_alt_key(unsigned char key, struct cpu_user_regs *regs)
+{
+ alt_key_handling = !alt_key_handling;
+ printk("'%c' pressed -> using %s key handling\n", key,
+ alt_key_handling ? "alternative" : "normal");
+}
+
+static struct keyhandler toggle_alt_keyhandler = {
+ .irq_callback = 1,
+ .u.irq_fn = do_toggle_alt_key,
+ .desc = "toggle alternative key handling"
+};
+
void __init initialize_keytable(void)
{
+ if ( num_present_cpus() > 16 )
+ {
+ alt_key_handling = 1;
+ printk(XENLOG_INFO "Defaulting to alternative key handling; "
+ "send 'A' to switch to normal mode.\n");
+ }
+ register_keyhandler('A', &toggle_alt_keyhandler);
register_keyhandler('d', &dump_registers_keyhandler);
register_keyhandler('h', &show_handlers_keyhandler);
register_keyhandler('q', &dump_domains_keyhandler);

View File

@ -1,3 +1,3 @@
%defattr (-,root,root)
/lib/modules/%2-%1
/etc/modprobe.d/xen_pvdrivers
/etc/modprobe.d/xen_pvdrivers.conf

View File

@ -6,11 +6,11 @@ Index: xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.
===================================================================
--- xen-4.0.0-testing.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
+++ xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
@@ -289,7 +289,10 @@ static int check_platform_magic(struct d
@@ -320,7 +320,10 @@ static int check_platform_magic(struct d
if (magic != XEN_IOPORT_MAGIC_VAL) {
dev_err(dev, "invalid magic %#x", magic);
- return -ENODEV;
err = "unrecognised magic value";
- goto no_dev;
+ /*
+ * Older backend; just return 0 to be compatible.
+ */

View File

@ -1,5 +1,7 @@
--- 2009-11-09.orig/unmodified_drivers/linux-2.6/platform-pci/evtchn.c 2008-10-14 19:44:11.000000000 +0200
+++ 2009-11-09/unmodified_drivers/linux-2.6/platform-pci/evtchn.c 2009-11-24 17:38:08.000000000 +0100
Index: xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/evtchn.c
===================================================================
--- xen-4.0.0-testing.orig/unmodified_drivers/linux-2.6/platform-pci/evtchn.c
+++ xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/evtchn.c
@@ -40,7 +40,9 @@
#include <xen/platform-compat.h>
#endif
@ -10,9 +12,11 @@
#define is_valid_evtchn(x) ((x) != 0)
#define evtchn_from_irq(x) (irq_evtchn[irq].evtchn)
--- 2009-11-09.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c 2009-02-24 20:09:53.000000000 +0100
+++ 2009-11-09/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c 2009-11-24 17:45:02.000000000 +0100
@@ -70,7 +70,6 @@ static uint64_t callback_via;
Index: xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
===================================================================
--- xen-4.0.0-testing.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
+++ xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
@@ -77,7 +77,6 @@ static uint64_t callback_via;
static int __devinit init_xen_info(void)
{
struct xen_add_to_physmap xatp;
@ -20,7 +24,7 @@
#ifdef __ia64__
xencomm_initialize();
@@ -78,6 +77,7 @@ static int __devinit init_xen_info(void)
@@ -85,6 +84,7 @@ static int __devinit init_xen_info(void)
setup_xen_features();
@ -28,7 +32,7 @@
shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT;
xatp.domid = DOMID_SELF;
xatp.idx = 0;
@@ -90,6 +90,11 @@ static int __devinit init_xen_info(void)
@@ -97,6 +97,11 @@ static int __devinit init_xen_info(void)
ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE);
if (shared_info_area == NULL)
panic("can't map shared info\n");
@ -40,8 +44,10 @@
return 0;
}
--- 2009-11-09.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h 2008-10-14 19:44:11.000000000 +0200
+++ 2009-11-09/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h 2009-11-24 17:40:08.000000000 +0100
Index: xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h
===================================================================
--- xen-4.0.0-testing.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h
+++ xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h
@@ -27,6 +27,11 @@
unsigned long alloc_xen_mmio(unsigned long len);
void platform_pci_resume(void);

25
qemu-console-retry.patch Normal file
View File

@ -0,0 +1,25 @@
Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_console.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_console.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_console.c
@@ -182,6 +182,7 @@ static int con_init(struct XenDevice *xe
{
struct XenConsole *con = container_of(xendev, struct XenConsole, xendev);
char *type, *dom;
+ int retries = 0;
/* setup */
dom = xs_get_domain_path(xenstore, con->xendev.dom);
@@ -191,7 +192,11 @@ static int con_init(struct XenDevice *xe
snprintf(con->console, sizeof(con->console), "%s/device/console/%d", dom, xendev->dev);
free(dom);
- type = xenstore_read_str(con->console, "type");
+ while (!(type = xenstore_read_str(con->console, "type")) && retries < 5) {
+ usleep(250000);
+ retries++;
+ }
+
if (!type || 0 != strcmp(type, "ioemu")) {
xen_be_printf(xendev, 1, "not for me (type=%s)\n", type);
if (type)

66
shadow.patch Normal file
View File

@ -0,0 +1,66 @@
In domain_create, previously we reserve 1M memory for domain creation (as
described in xend comment), and these memory SHOULD NOT related with vcpu
number. And later, shadow_mem_control() will modify the shadow size to 256
pages per vcpu (also plus some other values related with guest memory size...).
Therefore the C/S 20389 which modifies 1M to 4M to fit more vcpu number is
wrong. I'm sorry for that.
Following is the reason why currently 1M doesn't work for big number vcpus,
as we mentioned, it caused Xen crash.
Each time when sh_set_allocation() is called, it checks whether
shadow_min_acceptable_pages() has been allocated, if not, it will allocate
them. That is to say, it is 128 pages per vcpu. But before we define
d->max_vcpu, guest vcpu hasn't been initialized, so
shadow_min_acceptable_pages() always returns 0. Therefore we only allocated 1M
shadow memory for domain_create, and didn't satisfy 128 pages per vcpu for
alloc_vcpu().
As we know, vcpu allocation is done in the hypercall of
XEN_DOMCTL_max_vcpus. However, at this point we haven't called
shadow_mem_control() and are still using the pre-allocated 1M shadow memory to
allocate so many vcpus. So it should be a BUG. Therefore when vcpu number
increases, 1M is not enough and causes Xen crash. C/S 20389 exposes this issue.
So I think the right process should be, after d->max_vcpu is set and before
alloc_vcpu(), we should call sh_set_allocation() to satisfy 128 pages per vcpu.
The following patch does this work. Is it work for you? Thanks!
Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
Index: xen-4.0.0-testing/xen/arch/x86/mm/shadow/common.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/mm/shadow/common.c
+++ xen-4.0.0-testing/xen/arch/x86/mm/shadow/common.c
@@ -41,6 +41,9 @@
DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags);
+static unsigned int sh_set_allocation(struct domain *d,
+ unsigned int pages,
+ int *preempted);
/* Set up the shadow-specific parts of a domain struct at start of day.
* Called for every domain from arch_domain_create() */
void shadow_domain_init(struct domain *d, unsigned int domcr_flags)
@@ -82,6 +85,12 @@ void shadow_vcpu_init(struct vcpu *v)
}
#endif
+ if ( !is_idle_domain(v->domain) )
+ {
+ shadow_lock(v->domain);
+ sh_set_allocation(v->domain, 128, NULL);
+ shadow_unlock(v->domain);
+ }
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
}
@@ -3102,7 +3111,7 @@ int shadow_enable(struct domain *d, u32
{
unsigned int r;
shadow_lock(d);
- r = sh_set_allocation(d, 1024, NULL); /* Use at least 4MB */
+ r = sh_set_allocation(d, 256, NULL); /* Use at least 1MB */
if ( r != 0 )
{
sh_set_allocation(d, 0, NULL);

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c
+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
@@ -865,6 +865,18 @@ static void xenstore_process_dm_command_
@@ -912,6 +912,18 @@ static void xenstore_process_dm_command_
}
snapshot_name = xs_read(xsh, XBT_NULL, path, &len);

View File

@ -91,7 +91,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c
+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
@@ -102,6 +102,8 @@ int xenstore_watch_new_callback(const ch
@@ -103,6 +103,8 @@ int xenstore_watch_new_callback(const ch
}
@ -100,7 +100,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
static int pasprintf(char **buf, const char *fmt, ...)
{
va_list ap;
@@ -641,8 +643,33 @@ void xenstore_parse_domain_config(int hv
@@ -644,8 +646,33 @@ void xenstore_parse_domain_config(int hv
}
}
pstrcpy(bs->filename, sizeof(bs->filename), params);
@ -135,7 +135,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
}
#endif
@@ -776,6 +803,23 @@ int xenstore_parse_disable_pf_config ()
@@ -779,6 +806,23 @@ int xenstore_parse_disable_pf_config ()
return disable_pf;
}

View File

@ -191,7 +191,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h
void xenstore_process_event(void *opaque);
void xenstore_record_dm(const char *subpath, const char *state);
void xenstore_record_dm_state(const char *state);
+void xenstore_record_dm_error(char *errmsg);
+void xenstore_record_dm_error(const char *errmsg);
void xenstore_check_new_media_present(int timeout);
void xenstore_write_vncport(int vnc_display);
void xenstore_read_vncpasswd(int domid, char *pwbuf, size_t pwbuflen);
@ -205,9 +205,9 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
#include "sysemu.h"
+#include "qemu-xen.h"
#include "console.h"
#include "hw.h"
#include "pci.h"
@@ -836,6 +837,7 @@ static void xenstore_process_dm_command_
@@ -839,6 +840,7 @@ static void xenstore_process_dm_command_
{
char *path = NULL, *command = NULL, *par = NULL;
unsigned int len;
@ -215,7 +215,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
if (pasprintf(&path,
"/local/domain/0/device-model/%u/command", domid) == -1) {
@@ -851,7 +853,18 @@ static void xenstore_process_dm_command_
@@ -854,7 +856,18 @@ static void xenstore_process_dm_command_
if (!strncmp(command, "save", len)) {
fprintf(logfile, "dm-command: pause and save state\n");
@ -235,17 +235,17 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
} else if (!strncmp(command, "continue", len)) {
fprintf(logfile, "dm-command: continue after state save\n");
xen_pause_requested = 0;
@@ -984,6 +997,13 @@ void xenstore_record_dm_state(const char
@@ -987,6 +1000,13 @@ void xenstore_record_dm_state(const char
xenstore_record_dm("state", state);
}
+void xenstore_record_dm_error(char *errmsg)
+void xenstore_record_dm_error(const char *errmsg)
+{
+ fprintf(logfile, "%s\n", errmsg);
+ xenstore_record_dm("error", errmsg);
+ xenstore_record_dm_state("error");
+}
+
void xenstore_process_media_change_event(char **vec)
static void xenstore_process_media_change_event(char **vec)
{
char *media_present = NULL;

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/image.py
+++ xen-4.0.0-testing/tools/python/xen/xend/image.py
@@ -492,7 +492,7 @@ class ImageHandler:
@@ -490,7 +490,7 @@ class ImageHandler:
domains.domains_lock.acquire()
@ -11,7 +11,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py
if self.device_model is None:
return
# Signal the device model to for action
@@ -529,10 +529,17 @@ class ImageHandler:
@@ -527,10 +527,17 @@ class ImageHandler:
while state != ret:
state = xstransact.Read("/local/domain/0/device-model/%i/state"
% self.vm.getDomid())
@ -32,7 +32,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py
#resotre orig state
xstransact.Store("/local/domain/0/device-model/%i"
@@ -557,6 +564,10 @@ class ImageHandler:
@@ -555,6 +562,10 @@ class ImageHandler:
except:
pass
@ -295,7 +295,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCheckpoint.py
if dominfo:
dominfo.resume()
else:
@@ -329,26 +403,7 @@ def restore(xd, fd, dominfo = None, paus
@@ -329,24 +403,7 @@ def restore(xd, fd, dominfo = None, paus
dominfo.completeRestore(handler.store_mfn, handler.console_mfn)
@ -314,9 +314,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCheckpoint.py
-
- try:
- dominfo.waitForDevices() # Wait for backends to set up
- except Exception, exn:
- log.exception(exn)
-
- finally:
- if lock:
- XendDomain.instance().domains_lock.acquire()
+ wait_devs(dominfo)
@ -327,10 +325,10 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
@@ -233,6 +233,7 @@ XENAPI_CFG_TYPES = {
's3_integrity' : int,
@@ -235,6 +235,7 @@ XENAPI_CFG_TYPES = {
'superpages' : int,
'memory_sharing': int,
'pool_name' : str,
+ 'snapshotname': str,
}
@ -539,7 +537,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
@@ -122,6 +122,14 @@ SUBCOMMAND_HELP = {
@@ -123,6 +123,14 @@ SUBCOMMAND_HELP = {
'Restore a domain from a saved state.'),
'save' : ('[-c|-f] <Domain> <CheckpointFile>',
'Save a domain state to restore later.'),
@ -554,7 +552,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
'shutdown' : ('<Domain> [-waRH]', 'Shutdown a domain.'),
'top' : ('', 'Monitor a host and the domains in real time.'),
'unpause' : ('<Domain>', 'Unpause a paused domain.'),
@@ -316,6 +324,9 @@ SUBCOMMAND_OPTIONS = {
@@ -335,6 +343,9 @@ SUBCOMMAND_OPTIONS = {
('-c', '--checkpoint', 'Leave domain running after creating snapshot'),
('-f', '--force', 'Force to overwrite exist file'),
),
@ -564,7 +562,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
'restore': (
('-p', '--paused', 'Do not unpause domain after restoring it'),
),
@@ -362,6 +373,10 @@ common_commands = [
@@ -385,6 +396,10 @@ common_commands = [
"restore",
"resume",
"save",
@ -575,7 +573,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
"shell",
"shutdown",
"start",
@@ -395,6 +410,10 @@ domain_commands = [
@@ -418,6 +433,10 @@ domain_commands = [
"restore",
"resume",
"save",
@ -586,7 +584,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
"shutdown",
"start",
"suspend",
@@ -815,6 +834,62 @@ def xm_event_monitor(args):
@@ -850,6 +869,62 @@ def xm_event_monitor(args):
#
#########################################################################
@ -649,7 +647,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
def xm_save(args):
arg_check(args, "save", 2, 4)
@@ -3467,6 +3542,10 @@ commands = {
@@ -3694,6 +3769,10 @@ commands = {
"restore": xm_restore,
"resume": xm_resume,
"save": xm_save,

View File

@ -45,7 +45,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/svm/svm.c
#define set_segment_register(name, value) \
asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
@@ -840,6 +845,29 @@ static int svm_cpu_up(struct cpuinfo_x86
@@ -847,6 +852,29 @@ static int svm_cpu_up(struct cpuinfo_x86
/* Initialize core's ASID handling. */
svm_asid_init(c);
@ -79,7 +79,7 @@ Index: xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/asm-x86/hvm/hvm.h
+++ xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm.h
@@ -131,6 +131,12 @@ struct hvm_function_table {
@@ -132,6 +132,12 @@ struct hvm_function_table {
extern struct hvm_function_table hvm_funcs;
extern int hvm_enabled;

View File

@ -27,8 +27,8 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
+int connected_disks = 0;
fd_list_entry_t *fd_start = NULL;
static void handle_blktap_iomsg(void* private);
@@ -541,6 +542,7 @@ static void handle_blktap_ctrlmsg(void*
extern char* get_snapshot_name(int devid);
@@ -547,6 +548,7 @@ static void handle_blktap_ctrlmsg(void*
/* Allocate the disk structs */
s = state_init();
@ -36,7 +36,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c
/*Open file*/
if (s == NULL || open_disk(s, path, msg->drivertype, msg->readonly)) {
@@ -591,7 +593,8 @@ static void handle_blktap_ctrlmsg(void*
@@ -629,7 +631,8 @@ static void handle_blktap_ctrlmsg(void*
case CTLMSG_CLOSE:
s = get_state(msg->cookie);
if (s) unmap_disk(s);

View File

@ -0,0 +1,13 @@
Index: xen-4.0.0-testing/tools/hotplug/Linux/vif-bridge
===================================================================
--- xen-4.0.0-testing.orig/tools/hotplug/Linux/vif-bridge
+++ xen-4.0.0-testing/tools/hotplug/Linux/vif-bridge
@@ -91,7 +91,7 @@ case "$command" in
;;
esac
-handle_iptable
+#handle_iptable
log debug "Successful vif-bridge $command for $vif, bridge $bridge."
if [ "$command" == "online" ]

View File

@ -1,7 +1,9 @@
--- 2010-01-06.orig/xen/arch/x86/platform_hypercall.c 2009-12-14 08:34:19.000000000 +0100
+++ 2010-01-06/xen/arch/x86/platform_hypercall.c 2010-01-06 11:19:05.000000000 +0100
@@ -21,7 +21,7 @@
#include <xen/acpi.h>
Index: xen-4.0.0-testing/xen/arch/x86/platform_hypercall.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/platform_hypercall.c
+++ xen-4.0.0-testing/xen/arch/x86/platform_hypercall.c
@@ -22,7 +22,7 @@
#include <xen/sched-if.h>
#include <asm/current.h>
#include <public/platform.h>
-#include <acpi/cpufreq/processor_perf.h>
@ -9,7 +11,7 @@
#include <asm/edd.h>
#include <asm/mtrr.h>
#include "cpu/mtrr/mtrr.h"
@@ -62,6 +62,7 @@ static long cpu_down_helper(void *data)
@@ -63,6 +63,7 @@ static long cpu_down_helper(void *hdl, v
ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
{
ret_t ret = 0;
@ -17,7 +19,7 @@
struct xen_platform_op curop, *op = &curop;
if ( !IS_PRIV(current->domain) )
@@ -474,6 +475,24 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
@@ -475,6 +476,24 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
op->u.mem_add.epfn,
op->u.mem_add.pxm);
break;
@ -42,8 +44,10 @@
default:
ret = -ENOSYS;
break;
--- 2010-01-06.orig/xen/include/public/platform.h 2009-12-14 08:34:19.000000000 +0100
+++ 2010-01-06/xen/include/public/platform.h 2010-01-06 11:22:06.000000000 +0100
Index: xen-4.0.0-testing/xen/include/public/platform.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/public/platform.h
+++ xen-4.0.0-testing/xen/include/public/platform.h
@@ -355,6 +355,14 @@ struct xenpf_mem_hotadd
uint32_t flags;
};

View File

@ -1,5 +1,7 @@
--- 2010-01-06.orig/xen/arch/x86/mm.c 2010-01-05 13:29:13.000000000 +0100
+++ 2010-01-06/xen/arch/x86/mm.c 2010-01-06 11:22:26.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/mm.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/mm.c
+++ xen-4.0.0-testing/xen/arch/x86/mm.c
@@ -154,6 +154,8 @@ unsigned long __read_mostly pdx_group_va
int opt_allow_hugepage;
boolean_param("allowhugepage", opt_allow_hugepage);
@ -9,9 +11,11 @@
#define l1_disallow_mask(d) \
((d != dom_io) && \
(rangeset_is_empty((d)->iomem_caps) && \
--- 2010-01-06.orig/xen/arch/x86/traps.c 2009-12-17 12:20:22.000000000 +0100
+++ 2010-01-06/xen/arch/x86/traps.c 2010-01-06 11:22:26.000000000 +0100
@@ -1349,6 +1349,7 @@ asmlinkage void do_early_page_fault(stru
Index: xen-4.0.0-testing/xen/arch/x86/traps.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/traps.c
+++ xen-4.0.0-testing/xen/arch/x86/traps.c
@@ -1352,6 +1352,7 @@ asmlinkage void do_early_page_fault(stru
unsigned long *stk = (unsigned long *)regs;
printk("Early fatal page fault at %04x:%p (cr2=%p, ec=%04x)\n",
regs->cs, _p(regs->eip), _p(cr2), regs->error_code);
@ -19,8 +23,10 @@
printk("Stack dump: ");
while ( ((long)stk & ((PAGE_SIZE - 1) & ~(BYTES_PER_LONG - 1))) != 0 )
printk("%p ", _p(*stk++));
--- 2010-01-06.orig/xen/arch/x86/x86_32/mm.c 2009-10-29 12:24:48.000000000 +0100
+++ 2010-01-06/xen/arch/x86/x86_32/mm.c 2010-01-06 11:22:26.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/x86_32/mm.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/x86_32/mm.c
+++ xen-4.0.0-testing/xen/arch/x86/x86_32/mm.c
@@ -122,6 +122,8 @@ void __init paging_init(void)
#undef CNT
#undef MFN
@ -30,8 +36,10 @@
/* Create page tables for ioremap()/map_domain_page_global(). */
for ( i = 0; i < (IOREMAP_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
{
--- 2010-01-06.orig/xen/arch/x86/x86_32/traps.c 2009-12-02 10:02:49.000000000 +0100
+++ 2010-01-06/xen/arch/x86/x86_32/traps.c 2010-01-06 11:22:26.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/x86_32/traps.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/x86_32/traps.c
+++ xen-4.0.0-testing/xen/arch/x86/x86_32/traps.c
@@ -161,7 +161,8 @@ void show_page_walk(unsigned long addr)
l3t += (cr3 & 0xFE0UL) >> 3;
l3e = l3t[l3_table_offset(addr)];
@ -62,8 +70,10 @@
printk(" L1[0x%03lx] = %"PRIpte" %08lx\n",
l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
unmap_domain_page(l1t);
--- 2010-01-06.orig/xen/arch/x86/x86_64/mm.c 2010-01-05 13:29:13.000000000 +0100
+++ 2010-01-06/xen/arch/x86/x86_64/mm.c 2010-01-06 11:22:26.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/x86_64/mm.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/x86_64/mm.c
+++ xen-4.0.0-testing/xen/arch/x86/x86_64/mm.c
@@ -725,6 +725,8 @@ void __init paging_init(void)
#undef CNT
#undef MFN
@ -73,8 +83,10 @@
/* Create user-accessible L2 directory to map the MPT for compat guests. */
BUILD_BUG_ON(l4_table_offset(RDWR_MPT_VIRT_START) !=
l4_table_offset(HIRO_COMPAT_MPT_VIRT_START));
--- 2010-01-06.orig/xen/arch/x86/x86_64/traps.c 2009-12-02 10:02:49.000000000 +0100
+++ 2010-01-06/xen/arch/x86/x86_64/traps.c 2010-01-06 11:22:26.000000000 +0100
Index: xen-4.0.0-testing/xen/arch/x86/x86_64/traps.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/x86_64/traps.c
+++ xen-4.0.0-testing/xen/arch/x86/x86_64/traps.c
@@ -176,7 +176,8 @@ void show_page_walk(unsigned long addr)
l4t = mfn_to_virt(mfn);
l4e = l4t[l4_table_offset(addr)];
@ -115,8 +127,10 @@
printk(" L1[0x%03lx] = %"PRIpte" %016lx\n",
l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
}
--- 2010-01-06.orig/xen/include/asm-x86/mm.h 2009-12-17 12:20:22.000000000 +0100
+++ 2010-01-06/xen/include/asm-x86/mm.h 2010-01-06 11:23:39.000000000 +0100
Index: xen-4.0.0-testing/xen/include/asm-x86/mm.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/asm-x86/mm.h
+++ xen-4.0.0-testing/xen/include/asm-x86/mm.h
@@ -443,6 +443,8 @@ TYPE_SAFE(unsigned long,mfn);
#define SHARED_M2P_ENTRY (~0UL - 1UL)
#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY)

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e09d164603872500120d70e947b329525b920f45ce924c3661057cdff3fb97bd
size 23209042
oid sha256:88c2cad04e93a909e405bee6f4c3dff2c6b12ea2485b6e7f1db4813cb74f2f38
size 23155997

View File

@ -9,9 +9,9 @@ Index: xen-4.0.0-testing/Config.mk
-CONFIG_QEMU ?= $(QEMU_REMOTE)
+CONFIG_QEMU ?= ioemu-remote
QEMU_TAG := xen-4.0.0-rc1
#QEMU_TAG ?= 2621a102cd74cd6691bed30f638581639fcb141d
@@ -164,9 +164,9 @@ CONFIG_OCAML_XENSTORED ?= n
QEMU_TAG ?= e5d14857cd67490bf956d97c8888c0be95ed3f78
# Thu Feb 18 15:36:29 2010 +0000
@@ -163,9 +163,9 @@ CONFIG_OCAML_XENSTORED ?= n
# Optional components
XENSTAT_XENTOP ?= y
VTPM_TOOLS ?= n

View File

@ -62,7 +62,7 @@ Index: xen-4.0.0-testing/tools/python/Makefile
+ --prefix="/usr" --root="$(DESTDIR)" --force
install-dtd: all
$(INSTALL_DIR) $(DESTDIR)$(SHAREDIR)
$(INSTALL_DIR) $(DESTDIR)$(SHAREDIR)/xen
Index: xen-4.0.0-testing/tools/xenstore/Makefile
===================================================================
--- xen-4.0.0-testing.orig/tools/xenstore/Makefile

View File

@ -123,7 +123,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
import xen.util.xsm.xsm as security
from xen.util import xsconstants
from xen.util import mkdir
@@ -2320,6 +2320,10 @@ class XendDomainInfo:
@@ -2342,6 +2342,10 @@ class XendDomainInfo:
deviceClass, config = self.info['devices'].get(dev_uuid)
self._waitForDevice(deviceClass, config['devid'])
@ -134,7 +134,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
def _waitForDevice_destroy(self, deviceClass, devid, backpath):
return self.getDeviceController(deviceClass).waitForDevice_destroy(
devid, backpath)
@@ -3206,7 +3210,7 @@ class XendDomainInfo:
@@ -3230,7 +3234,7 @@ class XendDomainInfo:
devtype = devinfo[0]
disk = devinfo[1]['uname']
@ -143,7 +143,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
# If this is a drbd volume, check if we need to activate it
if disk.find(":") != -1:
@@ -3217,8 +3221,17 @@ class XendDomainInfo:
@@ -3241,8 +3245,17 @@ class XendDomainInfo:
if state == 'Secondary':
os.system('/sbin/drbdadm primary ' + diskname)
@ -163,7 +163,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
if mounted:
# This is a file, not a device. pygrub can cope with a
# file if it's raw, but if it's QCOW or other such formats
@@ -3234,7 +3247,8 @@ class XendDomainInfo:
@@ -3258,7 +3271,8 @@ class XendDomainInfo:
from xen.xend import XendDomain
dom0 = XendDomain.instance().privilegedDomain()
@ -173,7 +173,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
fn = BOOTLOADER_LOOPBACK_DEVICE
try:
@@ -3244,8 +3258,10 @@ class XendDomainInfo:
@@ -3268,8 +3282,10 @@ class XendDomainInfo:
if mounted:
log.info("Unmounting %s from %s." %
(fn, BOOTLOADER_LOOPBACK_DEVICE))

99
xen-gcc45-fixes.patch Normal file
View File

@ -0,0 +1,99 @@
--- xen-4.0.0-testing/tools/xenstore/xs_lib.c.orig 2010-03-01 08:28:04.000000000 -0700
+++ xen-4.0.0-testing/tools/xenstore/xs_lib.c 2010-03-01 09:12:04.000000000 -0700
@@ -149,7 +149,7 @@ bool xs_strings_to_perms(struct xs_permi
bool xs_perm_to_string(const struct xs_permissions *perm,
char *buffer, size_t buf_len)
{
- switch (perm->perms) {
+ switch ((int)perm->perms) {
case XS_PERM_WRITE:
*buffer = 'w';
break;
--- xen-4.0.0-testing/tools/blktap/lib/blktaplib.h.orig 2010-03-01 09:24:26.000000000 -0700
+++ xen-4.0.0-testing/tools/blktap/lib/blktaplib.h 2010-03-01 09:28:16.000000000 -0700
@@ -42,7 +42,7 @@
#include <sys/types.h>
#include <unistd.h>
-#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, XC_PAGE_SIZE)
+#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, XC_PAGE_SIZE)
/* size of the extra VMA area to map in attached pages. */
#define BLKTAP_VMA_PAGES BLK_RING_SIZE
--- xen-4.0.0-testing/tools/blktap2/include/blktaplib.h.orig 2010-03-01 09:46:30.000000000 -0700
+++ xen-4.0.0-testing/tools/blktap2/include/blktaplib.h 2010-03-01 09:46:50.000000000 -0700
@@ -45,7 +45,7 @@
#define EPRINTF(_f, _a...) syslog(LOG_ERR, "tap-err:%s: " _f, __func__, ##_a)
#define PERROR(_f, _a...) EPRINTF(_f ": %s", ##_a, strerror(errno))
-#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, XC_PAGE_SIZE)
+#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, XC_PAGE_SIZE)
/* size of the extra VMA area to map in attached pages. */
#define BLKTAP_VMA_PAGES BLK_RING_SIZE
--- xen-4.0.0-testing/xen/include/xen/compat.h.orig 2010-03-01 13:22:34.000000000 -0700
+++ xen-4.0.0-testing/xen/include/xen/compat.h 2010-03-01 14:30:34.000000000 -0700
@@ -129,8 +129,8 @@
#define CHECK_TYPE(name) \
- typedef int __checkT ## name[1 - ((xen_ ## name ## _t *)0 != \
- (compat_ ## name ## _t *)0) * 2]
+ typedef int __checkT ## name[1 - (sizeof(xen_ ## name ## _t) != \
+ sizeof(compat_ ## name ## _t)) * 2]
#define CHECK_TYPE_(k, n) \
typedef int __checkT ## k ## _ ## n[1 - ((k xen_ ## n *)0 != \
(k compat_ ## n *)0) * 2]
@@ -146,26 +146,30 @@
typedef int __checkF ## t ## __ ## f[1 - (&((xen_ ## t ## _t *)0)->f != \
&((compat_ ## t ## _t *)0)->f) * 2]
#define CHECK_FIELD_(k, n, f) \
- typedef int __checkF ## k ## _ ## n ## __ ## f[1 - (&((k xen_ ## n *)0)->f != \
- &((k compat_ ## n *)0)->f) * 2]
+ typedef int __checkF ## k ## _ ## n ## __ ## f[1 - (offsetof(k xen_ ## n,f) != \
+ offsetof(k compat_ ## n,f)) * 2]
#define CHECK_SUBFIELD_1(t, f1, f2) \
typedef int __checkF1 ## t ## __ ## f1 ## __ ## f2 \
- [1 - (&((xen_ ## t ## _t *)0)->f1.f2 != \
- &((compat_ ## t ## _t *)0)->f1.f2) * 2]
+ [1 - (offsetof(xen_ ## t ## _t,f1.f2) != \
+ offsetof(compat_ ## t ## _t,f1.f2)) * 2]
#define CHECK_SUBFIELD_1_(k, n, f1, f2) \
typedef int __checkF1 ## k ## _ ## n ## __ ## f1 ## __ ## f2 \
- [1 - (&((k xen_ ## n *)0)->f1.f2 != \
- &((k compat_ ## n *)0)->f1.f2) * 2]
+ [1 - (offsetof(k xen_ ## n,f1.f2) != \
+ offsetof(k compat_ ## n,f1.f2)) * 2]
#define CHECK_SUBFIELD_2(t, f1, f2, f3) \
typedef int __checkF2 ## t ## __ ## f1 ## __ ## f2 ## __ ## f3 \
- [1 - (&((xen_ ## t ## _t *)0)->f1.f2.f3 != \
- &((compat_ ## t ## _t *)0)->f1.f2.f3) * 2]
+ [1 - (offsetof(xen_ ## t ## _t,f1.f2.f3) != \
+ offsetof(compat_ ## t ## _t,f1.f2.f3)) * 2]
+#define CHECK_SUBFIELD_2_(k, n, f1, f2, f3) \
+ typedef int __checkF2 ## k ## _ ## n ## __ ## f1 ## __ ## f2 ## __ ## f3 \
+ [1 - (offsetof(k xen_ ## n,f1.f2.f3) != \
+ offsetof(k compat_ ## n,f1.f2.f3)) * 2]
#define CHECK_SUBFIELD_2_(k, n, f1, f2, f3) \
typedef int __checkF2 ## k ## _ ## n ## __ ## f1 ## __ ## f2 ## __ ## f3 \
- [1 - (&((k xen_ ## n *)0)->f1.f2.f3 != \
- &((k compat_ ## n *)0)->f1.f2.f3) * 2]
+ [1 - (offsetof(k xen_ ## n,f1.f2.f3) != \
+ offsetof(k compat_ ## n,f1.f2.f3)) * 2]
int hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...);
--- xen-4.0.0-testing/tools/ioemu-remote/block-vvfat.c.orig 2010-03-01 14:53:11.000000000 -0700
+++ xen-4.0.0-testing/tools/ioemu-remote/block-vvfat.c 2010-03-01 14:53:56.000000000 -0700
@@ -865,7 +865,8 @@ static int init_directories(BDRVVVFATSta
{
direntry_t* entry=array_get_next(&(s->directory));
entry->attributes=0x28; /* archive | volume label */
- snprintf((char*)entry->name,11,"QEMU VVFAT");
+ memcpy(entry->name,"QEMU VVF",8);
+ memcpy(entry->extension,"AT ",3);
}
/* Now build FAT, and write back information into directory */

View File

@ -34,7 +34,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/image.py
+++ xen-4.0.0-testing/tools/python/xen/xend/image.py
@@ -912,11 +912,13 @@ class HVMImageHandler(ImageHandler):
@@ -910,11 +910,13 @@ class HVMImageHandler(ImageHandler):
mac = devinfo.get('mac')
if mac is None:
raise VmError("MAC address not specified or generated.")

View File

@ -4,7 +4,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/image.py
+++ xen-4.0.0-testing/tools/python/xen/xend/image.py
@@ -1030,7 +1030,7 @@ class X86_HVM_ImageHandler(HVMImageHandl
@@ -1028,7 +1028,7 @@ class X86_HVM_ImageHandler(HVMImageHandl
def configure(self, vmConfig):
HVMImageHandler.configure(self, vmConfig)

View File

@ -56,7 +56,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
+ 'max_hvm_memory',
'node_to_cpu',
'node_to_memory',
'node_to_dma32_mem'
'node_to_dma32_mem',
Index: xen-4.0.0-testing/tools/python/xen/xend/balloon.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/balloon.py
@ -98,7 +98,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -1452,6 +1452,27 @@ class XendDomainInfo:
@@ -1469,6 +1469,27 @@ class XendDomainInfo:
pci_conf = self.info['devices'][dev_uuid][1]
return map(pci_dict_to_bdf_str, pci_conf['devs'])

View File

@ -1,8 +1,6 @@
Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c
+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
@@ -398,7 +398,7 @@ void xenstore_parse_domain_config(int hv
--- a/tools/ioemu-remote/xenstore.c
+++ b/tools/ioemu-remote/xenstore.c
@@ -399,7 +399,7 @@ void xenstore_parse_domain_config(int hv
char *buf = NULL;
char *fpath = NULL, *bpath = NULL, *btype = NULL,
*dev = NULL, *params = NULL, *drv = NULL;
@ -11,7 +9,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
unsigned int len, num, hd_index, pci_devid = 0;
BlockDriverState *bs;
BlockDriver *format;
@@ -485,12 +485,7 @@ void xenstore_parse_domain_config(int hv
@@ -486,12 +486,7 @@ void xenstore_parse_domain_config(int hv
continue;
free(danger_type);
danger_type = xs_read(xsh, XBT_NULL, danger_buf, &len);
@ -25,7 +23,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c
/* read the name of the device */
if (pasprintf(&buf, "%s/type", bpath) == -1)
continue;
@@ -498,6 +493,35 @@ void xenstore_parse_domain_config(int hv
@@ -499,6 +494,35 @@ void xenstore_parse_domain_config(int hv
drv = xs_read(xsh, XBT_NULL, buf, &len);
if (drv == NULL)
continue;

View File

@ -178,7 +178,7 @@ Index: xen-4.0.0-testing/tools/libxl/Makefile
===================================================================
--- xen-4.0.0-testing.orig/tools/libxl/Makefile
+++ xen-4.0.0-testing/tools/libxl/Makefile
@@ -49,7 +49,7 @@ xl.o: $(LIBCONFIG_OUTPUT)/libconfig.so x
@@ -50,7 +50,7 @@ xl.o: $(LIBCONFIG_OUTPUT)/libconfig.so x
$(CC) $(CFLAGS) -I$(LIBCONFIG_SOURCE) -c xl.c
$(CLIENTS): xl.o libxenlight.so $(LIBCONFIG_OUTPUT)/libconfig.so

View File

@ -1,3 +1,84 @@
-------------------------------------------------------------------
Mon Feb 22 08:26:01 MST 2010 - jfehlig@novell.com
- bnc#556939 - Improve device map cleanup code in domUloader
-------------------------------------------------------------------
Sun Feb 21 21:34:23 MST 2010 - jfehlig@novell.com
- bnc# 578910 - xm block-detach does not cleanup xenstore
hotplug-cleanup-fix.patch
-------------------------------------------------------------------
Fri Feb 19 11:56:57 MST 2010 - carnold@novell.com
- bnc#579361 - Windows Server 2003 cannot wake up from stand by in
sp1
hibernate.patch
-------------------------------------------------------------------
Fri Feb 19 09:49:56 MST 2010 - carnold@novell.com
- fate#308852: XEN CPU Pools
cpupools-core.patch
cpupools-core-fixup.patch
keyhandler-alternative.patch
cpu-pools-libxc.patch
cpu-pools-python.patch
cpu-pools-libxen.patch
cpu-pools-xmtest.patch
cpu-pools-docs.patch
-------------------------------------------------------------------
Thu Feb 18 15:18:59 - ksrinivasan@novell.com
- bnc#558760: Disable scsi devices when PV drivers are loaded.
-------------------------------------------------------------------
Tue Feb 16 08:35:08 MST 2010 - carnold@novell.com
- Update to changeset 20951 Xen 4.0.0 RC4 for sle11-sp1 beta5.
-------------------------------------------------------------------
Mon Feb 8 08:08:01 MST 2010 - carnold@novell.com
- bnc#572146 - SLES11 SP1 beta 2 Xen - BUG: soft lockup - CPU#31
stuck for 61s! [kstop/31:4512]
cpuidle-hint-v3.patch
-------------------------------------------------------------------
Fri Feb 5 08:16:39 MST 2010 - carnold@novell.com
- Update to changeset 20900 RC2+ for sle11-sp1 beta4.
-------------------------------------------------------------------
Fri Jan 29 09:22:46 MST 2010 - carnold@novell.com
- bnc#573376 - OS reboot while create DomU with Windows CD
-------------------------------------------------------------------
Wed Jan 27 11:45:27 MST 2010 - carnold@novell.com
- bnc#573881 - /usr/lib64/xen/bin/qemu-dm is a broken link
-------------------------------------------------------------------
Thu Jan 21 18:50:36 MST 2010 - carnold@novell.com
- Update to changeset 20840 RC1+ for sle11-sp1 beta3.
-------------------------------------------------------------------
Thu Jan 21 10:41:59 MST 2010 - jfehlig@novell.com
- bnc#569581 - SuSEfirewall2 should handle rules. Disable
handle_iptable in vif-bridge script
vif-bridge-no-iptables.patch
-------------------------------------------------------------------
Wed Jan 20 09:12:00 MST 2010 - carnold@novell.com
- bnc#569577 - /etc/modprove.d/xen_pvdrivers, installed by
xen-kmp-default, to ../xen_pvdrivers.conf
-------------------------------------------------------------------
Wed Jan 6 16:50:16 EST 2010 - ksrinivasan@novell.com
@ -128,19 +209,10 @@ Thu Oct 8 22:44:04 MDT 2009 - jfehlig@novell.com
-------------------------------------------------------------------
Mon Sep 28 16:34:19 CST 2009 - wkong@novell.com
- Add patch ioemu-bdrv-open-CACHE_WB.patch
for install guest on tapdisk very very slow.
-------------------------------------------------------------------
Mon Sep 28 08:28:24 MDT 2009 - carnold@novell.com
- bnc#542525 - VUL-1: xen pygrub vulnerability
20099-pygrub-security.patch
20107-pygrub-security.patch
20146-pygrub-security.patch
20174-pygrub-security.patch
20201-pygrub-security.patch
-------------------------------------------------------------------
Fri Sep 25 15:08:12 MDT 2009 - jfehlig@novell.com
@ -165,25 +237,36 @@ Tue Sep 15 09:32:59 MDT 2009 - jfehlig@novell.com
20125-xc-parse-tuple-fix.patch
-------------------------------------------------------------------
Mon Aug 24 10:31:36 MDT 2009 - carnold@novell.com
Wed Sep 2 10:12:18 MDT 2009 - carnold@novell.com
- bnc#536176 - Xen panic when using iommu after updating hypervisor
19380-vtd-feature-check.patch
-------------------------------------------------------------------
Fri Aug 28 09:54:08 MDT 2009 - jfehlig@novell.com
- bnc#530959 - virsh autostart doesn't work
Fixing this libvirt bug also required fixing xend's op_pincpu
method with upstream c/s 19580
19580-xend-pincpu.patch
-------------------------------------------------------------------
Fri Aug 28 08:05:17 MDT 2009 - jbeulich@novell.com
- bnc#534146 - Xen: Fix SRAT check for discontig memory
20120-x86-srat-check-discontig.patch
-------------------------------------------------------------------
Mon Aug 24 07:59:14 MDT 2009 - carnold@novell.com
- bnc#491081 - Xen time goes backwards x3950M2
Patch for this bug plus additional upstream patches from Jan.
19614-x86-emul-lldt-ltr.patch
20026-ept-rwx-default.patch
20031-x86-pmode-load-seg-retry.patch
20035-x86-load-sreg-adjust.patch
20059-vmx-nmi-handling.patch
20077-x86-runstate-cswitch-out.patch
20078-x86_64-branch-emulation.patch
20101-hvm-no-compat-virt-start.patch
20112-x86-dom0-boot-run-timers.patch
-------------------------------------------------------------------
Fri Aug 14 13:00:48 MDT 2009 - carnold@novell.com
Tue Aug 11 01:08:51 CEST 2009 - ro@suse.de
- Modify the mkbuildtree script so the KMPs will build.
mkbuildtree.patch
- disable module build for ec2 correctly to fix build
(at the suse_kernel_module_package macro)
-------------------------------------------------------------------
Mon Aug 10 16:21:00 EDT 2009 - ksrinivasan@novell.com
@ -192,112 +275,298 @@ Mon Aug 10 16:21:00 EDT 2009 - ksrinivasan@novell.com
hv_win7_eoi_bug.patch
-------------------------------------------------------------------
Fri Aug 7 10:43:32 MDT 2009 - jfehlig@novell.com
Mon Aug 3 11:53:37 MDT 2009 - jfehlig@novell.com
- bnc#524180 - xend memory leak resulting in long garbage collector
runs. Bug applies to xen 3.4.1 as well.
runs
20013-xend-memleak.patch
-------------------------------------------------------------------
Thu Aug 6 10:10:43 MDT 2009 - carnold@novell.com
Fri Jul 31 13:22:09 MDT 2009 - carnold@novell.com
- Update to Xen 3.4.1 FCS c/s 19718.
- Upstream bugfixes from Jan.
19896-32on64-arg-xlat.patch
19960-show-page-walk.patch
19945-pae-xen-l2-entries.patch
19953-x86-fsgs-base.patch
19931-gnttblop-preempt.patch
19885-kexec-gdt-switch.patch
19894-shadow-resync-fastpath-race.patch
- hvperv shim patches no longer require being applied conditionally
-------------------------------------------------------------------
Tue Aug 4 15:48:59 MDT 2009 - carnold@novell.com
Wed Jul 29 08:47:50 MDT 2009 - jfehlig@novell.com
- Rename xen_loop to xen_loop.conf to conform with naming rules.
- bnc#520234 - npiv does not work with XEN in SLE11
Update block-npiv
- bnc#496033 - Support for creating NPIV ports without starting vm
block-npiv-common.sh
block-npiv-vport
Update block-npiv
- bnc#500043 - Fix access to NPIV disk from HVM vm
Update xen-qemu-iscsi-fix.patch
-------------------------------------------------------------------
Tue Jul 28 14:07:42 MDT 2009 - carnold@novell.com
Wed Jul 15 11:52:31 MDT 2009 - carnold@novell.com
- Update to Xen 3.4.1 RC10 c/s 19711.
- Don't build the KMPs for the ec2 kernel.
-------------------------------------------------------------------
Tue Jun 23 11:09:29 MDT 2009 - carnold@novell.com
Thu Jul 2 12:45:32 MDT 2009 - jfehlig@novell.com
- Update to Xen 3.4.1 RC4 c/s 19664.
- Upstream fixes from Jan Beulich
19606-hvm-x2apic-cpuid.patch
19734-vtd-gcmd-submit.patch
19752-vtd-srtp-sirtp-flush.patch
19753-vtd-reg-write-lock.patch
19764-hvm-domain-lock-leak.patch
19765-hvm-post-restore-vcpu-state.patch
19767-hvm-port80-inhibit.patch
19768-x86-dom0-stack-dump.patch
19770-x86-amd-s3-resume.patch
19801-x86-p2m-2mb-hap-only.patch
19815-vtd-kill-correct-timer.patch
- Patch from Jan Beulich to aid in debugging bnc#509911
gnttblop-preempt.patch
-------------------------------------------------------------------
Tue Jun 16 09:28:51 MDT 2009 - carnold@novell.com
Tue Jun 23 15:32:14 CST 2009 - wkong@novell.com
- Update to Xen 3.4.1 RC3 c/s 19657.
-------------------------------------------------------------------
Thu Jun 11 14:17:22 MDT 2009 - carnold@novell.com
- Update to Xen 3.4.1 RC2 c/s 19648.
- bnc#515220 - qemu-img-xen snapshot Segmentation fault
qemu-img-snapshot.patch update
-------------------------------------------------------------------
Tue Jun 9 13:52:07 CST 2009 - wkong@novell.com
- bnc#504491 - drop write data when set read only disk in xen config
bdrv_open2_fix_flags.patch
bdrv_open2_flags_2.patch
-------------------------------------------------------------------
Mon May 18 15:03:29 MDT 2009 - carnold@novell.com
Fri Jun 5 13:19:04 MDT 2009 - carnold@novell.com
- Update to Xen 3.4.0 FCS c/s 19607
- Upstream fixes from Jan Beulich.
19474-32on64-S3.patch
19490-log-dirty.patch
19492-sched-timer-non-idle.patch
19493-hvm-io-intercept-count.patch
19505-x86_64-clear-cr1.patch
19519-domctl-deadlock.patch
19523-32on64-restore-p2m.patch
19555-ept-live-migration.patch
19557-amd-iommu-ioapic-remap.patch
19560-x86-flush-tlb-empty-mask.patch
19571-x86-numa-shift.patch
19578-hvm-load-ldt-first.patch
19592-vmx-exit-reason-perfc-size.patch
19595-hvm-set-callback-irq-level.patch
19597-x86-ioport-quirks-BL2xx.patch
19602-vtd-multi-ioapic-remap.patch
19631-x86-frametable-map.patch
19653-hvm-vcpuid-range-checks.patch
-------------------------------------------------------------------
Mon May 17 17:15:57 CST 2009 - wkong@novell.com
Wed Jun 05 10:35:18 MDT 2009 - jsong@novell.com
- bnc#382112 - Caps lock not being passed to vm correctly.
capslock_enable.patch
-------------------------------------------------------------------
Wed May 27 10:35:18 MDT 2009 - jfehlig@novell.com
- bnc#506833 - Use pidof in xend and xendomains init scripts
-------------------------------------------------------------------
Wed May 27 09:39:25 MDT 2009 - jsong@novell.com
- bnc#484778 - XEN: PXE boot of FV domU using non-Realtek NIC fails
enable_more_nic_pxe.patch
-------------------------------------------------------------------
Wed May 27 09:38:40 MDT 2009 - jsong@novell.com
cross-migrate.patch
- bnc#390961 - cross-migration of a VM causes it to become
unresponsive (remains paused after migration)
-------------------------------------------------------------------
Tue May 19 10:58:40 MDT 2009 - carnold@novell.com
- Patches taken to fix the xenctx tool. The fixed version of this
tool is needed to debug bnc#502735.
18962-xc_translate_foreign_address.patch
18963-xenctx.patch
19168-hvm-domctl.patch
19169-remove-declare-bitmap.patch
19170-libxc.patch
19171-xenctx.patch
19450-xc_translate_foreign_address.patch
-------------------------------------------------------------------
Mon May 18 16:15:57 CST 2009 - wkong@novell.com
-bnc#485770 - check exsit file for save and snapshot-create
xm-save-check-file.patch
snapshot-xend.patch
-------------------------------------------------------------------
Thu May 14 12:00:09 MDT 2009 - jfehlig@novell.com
Mon May 18 15:06:41 CST 2009 - wkong@novell.com
-bnc#503782 - Using converted vmdk image does not work
ioemu-tapdisk-compat-QEMU_IMG.patch
-------------------------------------------------------------------
Thu May 14 10:54:03 MDT 2009 - jfehlig@novell.com
- bnc#503332 - Remove useless qcow tools
/usr/sbin/{qcow-create,img2qcow,qcow2raw} from xen-tools package.
-------------------------------------------------------------------
Wed May 13 09:59:50 CST 2009 - jsong@novell.com
- bnc#474738 - adding CD drive to VM guest makes it unbootable.
parse_boot_disk.patch
-------------------------------------------------------------------
Mon May 11 18:49:50 CST 2009 - wkong@novell.com
- bnc#477892 - snapshot windows can't accomplish.
snapshot-xend.patch
-------------------------------------------------------------------
Fri Apr 22 17:30:02 CST 2009 - wkong@novell.com
Tue Apr 28 11:57:00 MDT 2009 - carnold@novell.com
- Backport two qcow2 patches from qemu org
ioemu-6816-qcow2-revert-6404-6405-6407.patch
ioemu-7615-qcow2-fix-alloc_cluster_link_l2.patch
- bnc#495300 - L3: Xen unable to PXE boot Windows based DomU's
18545-hvm-gpxe-rom.patch, 18548-hvm-gpxe-rom.patch
-------------------------------------------------------------------
Fri Apr 17 16:21:36 CST 2009 - wkong@novell.com
- bnc#477890 - Patch: ioemu-qcow2-enhance-snapshot-create.patch
Mon Apr 27 10:42:17 MDT 2009 - jfehlig@novell.com
- bnc#459836 - Fix rtc_timeoffset when localtime=0
xend-timeoffset.patch
-------------------------------------------------------------------
Thu Apr 16 12:13:10 MDT 2009 - jfehlig@novell.com
Wed Apr 22 14:50:35 MDT 2009 - carnold@novell.com
- Updated xend-domain-lock.patch
fate#303525, bnc#494892
- bnc#497440 - xmclone.sh script incorrectly handles networking for
SLE11.
-------------------------------------------------------------------
Thu Apr 9 18:36:23 CST 2009 - wkong@novell.com
- Remove Patch155: xend-core-dump-loc.diff
- Modify Patch110: xen-domUloader.diff,
remove symbol "bootfilter"
Fri Apr 17 16:21:36 MDT 2009 - wkong@novell.com
- bnc#477890 - VM becomes unresponsive after applying snapshot
-------------------------------------------------------------------
Wed Apr 8 17:38:12 CST 2009 - wkong@novell.com
Wed Apr 15 16:34:08 MDT 2009 - jfehlig@novell.com
- PATCH: network-nat-open-SuSEfirewall2-FORWARD.patch
fate#305354, bnc#472107
Open SuSEfirewall2 FORWARD rule when use xen nat
- PATCH: network-nat-dhcpd-domain-info.patch
Add domain info for nat-dhcpd
- bnc#494892 - Update xend-domain-lock.patch to flock the lock
file.
-------------------------------------------------------------------
Thu Apr 2 16:52:04 MDT 2009 - jfehlig@novell.com
Wed Apr 8 16:30:14 EDT 2009 - ksrinivasan@novell.com
- Fix domUloader to handle block device names with ':'
- bnc#439639 - SVVP Test 273 System - Sleep Stress With IO" fails
Turned off s3/s4 sleep states for HVM guests.
-------------------------------------------------------------------
Wed Mar 11 16:28:59 MDT 2009 - jfehlig@novell.com
Tue Apr 7 21:55:14 MDT 2009 - jsong@novell.com
- Use pidofproc/checkproc in xend init script
- bnc#468169 - fix domUloader to umount the mounted device mapper target in dom0
when install a sles10 guest with disk = /dev/disk/by_path
-------------------------------------------------------------------
Thu Apr 2 16:03:25 MDT 2009 - jfehlig@novell.com
- bnc#488490 - domUloader can't handle block device names with ':'
- bnc#486244 - vms fail to start after reboot when using qcow2
-------------------------------------------------------------------
Tue Mar 31 15:00:50 MDT 2009 - carnold@novell.com
- bnc#490835 - VTd errata on Cantiga chipset
19230-vtd-mobile-series4-chipset.patch
-------------------------------------------------------------------
Mon Mar 30 15:03:16 MDT 2009 - carnold@novell.com
- bnc#482515 - Missing dependency in xen.spec
-------------------------------------------------------------------
Thu Mar 26 09:17:00 MDT 2009 - carnold@novell.com
- Additional upstream bug fix patches from Jan Beulich.
19132-page-list-mfn-links.patch
19134-fold-shadow-page-info.patch
19135-next-shadow-mfn.patch
19136-page-info-rearrange.patch
19156-page-list-simplify.patch
19161-pv-ldt-handling.patch
19162-page-info-no-cpumask.patch
19216-msix-fixmap.patch
19268-page-get-owner.patch
19293-vcpu-migration-delay.patch
19391-vpmu-double-free.patch
19415-vtd-dom0-s3.patch
-------------------------------------------------------------------
Wed Mar 25 13:55:25 MDT 2009 - carnold@novell.com
- Imported numerous upstream bug fix patches.
19083-memory-is-conventional-fix.patch
19097-M2P-table-1G-page-mappings.patch
19137-lock-domain-page-list.patch
19140-init-heap-pages-max-order.patch
19167-recover-pat-value-s3-resume.patch
19172-irq-to-vector.patch
19173-pci-passthrough-fix.patch
19176-free-irq-shutdown-fix.patch
19190-pciif-typo-fix.patch
19204-allow-old-images-restore.patch
19232-xend-exception-fix.patch
19239-ioapic-s3-suspend-fix.patch
19240-ioapic-s3-suspend-fix.patch
19242-xenstored-use-after-free-fix.patch
19259-ignore-shutdown-deferrals.patch
19266-19365-event-channel-access-fix.patch
19275-19296-schedular-deadlock-fixes.patch
19276-cpu-selection-allocation-fix.patch
19302-passthrough-pt-irq-time-out.patch
19313-hvmemul-read-msr-fix.patch
19317-vram-tracking-fix.patch
19335-apic-s3-resume-error-fix.patch
19353-amd-migration-fix.patch
19354-amd-migration-fix.patch
19371-in-sync-L1s-writable.patch
19372-2-on-3-shadow-mode-fix.patch
19377-xend-vnclisten.patch
19400-ensure-ltr-execute.patch
19410-virt-to-maddr-fix.patch
-------------------------------------------------------------------
Mon Mar 9 16:28:27 MDT 2009 - jfehlig@novell.com
- bnc#483565 - Fix block-iscsi script.
Updated block-iscsi and xen-domUloader.diff
-------------------------------------------------------------------
Mon Mar 9 16:06:03 MDT 2009 - carnold@novell.com
- bnc#465814 - Mouse stops responding when wheel is used in Windows
VM.
mouse-wheel-roll.patch (James Song)
- bnc#470704 - save/restore of windows VM throws off the mouse
tracking.
usb-save-restore.patch (James Song)
-------------------------------------------------------------------
Thu Mar 5 15:35:30 MST 2009 - jfehlig@novell.com
- bnc#436629 - Use global vnc-listen setting specified in xend
configuration file.
xend-vnclisten.patch
- bnc#482623 - Fix pygrub to append user-supplied 'extra' args
to kernel args.
19234_pygrub.patch
-------------------------------------------------------------------
Thu Mar 5 13:52:48 MST 2009 - carnold@novell.com
- bnc#481161 upgrade - sles10sp2 to sles11 upgrade keeps
xen-tools-ioemu
-------------------------------------------------------------------
Tue Mar 3 16:11:39 CET 2009 - kukuk@suse.de

View File

@ -1,5 +1,5 @@
#
# spec file for package xen (Version 4.0.0_20809_01)
# spec file for package xen (Version 4.0.0_20978_01)
#
# Copyright (c) 2010 SUSE LINUX Products GmbH, Nuernberg, Germany.
#
@ -22,10 +22,10 @@ Name: xen
ExclusiveArch: %ix86 x86_64
%define xvers 4.0
%define xvermaj 4
%define changeset 20809
%define changeset 20978
%define xen_build_dir xen-4.0.0-testing
%define with_kmp 1
BuildRequires: LibVNCServer-devel SDL-devel automake bin86 curl-devel dev86 graphviz latex2html libjpeg-devel libxml2-devel ncurses-devel openssl openssl-devel pciutils-devel python-devel texinfo transfig
%define with_kmp 0
BuildRequires: LibVNCServer-devel SDL-devel acpica automake bin86 curl-devel dev86 graphviz latex2html libjpeg-devel libxml2-devel ncurses-devel openssl openssl-devel pciutils-devel python-devel texinfo transfig
%if %suse_version >= 1030
BuildRequires: texlive texlive-latex
%else
@ -37,7 +37,7 @@ BuildRequires: glibc-32bit glibc-devel-32bit
%if %{?with_kmp}0
BuildRequires: kernel-source kernel-syms module-init-tools xorg-x11
%endif
Version: 4.0.0_20809_01
Version: 4.0.0_20978_01
Release: 1
License: GPLv2
Group: System/Kernel
@ -63,7 +63,7 @@ Source16: xmclone.sh
Source17: xend-relocation.sh
Source18: init.xen_loop
%if %{?with_kmp}0
Source19: xen_pvdrivers
Source19: xen_pvdrivers.conf
Source20: kmp_filelist
%endif
Source21: block-dmmd
@ -119,10 +119,21 @@ Patch354: xen-api-auth.patch
Patch355: tools-gdbserver-build.diff
Patch356: ioemu-vnc-resize.patch
Patch357: ioemu-debuginfo.patch
Patch358: vif-bridge-no-iptables.patch
Patch359: qemu-console-retry.patch
# Needs to go upstream
Patch360: checkpoint-rename.patch
Patch361: xm-save-check-file.patch
Patch362: xm-create-xflag.patch
Patch363: cpupools-core.patch
Patch364: cpupools-core-fixup.patch
Patch365: keyhandler-alternative.patch
Patch366: cpu-pools-libxc.patch
Patch367: cpu-pools-python.patch
Patch368: cpu-pools-libxen.patch
Patch369: cpu-pools-xmtest.patch
Patch370: cpu-pools-docs.patch
Patch371: xen-gcc45-fixes.patch
# Patches for snapshot support
Patch400: snapshot-ioemu-save.patch
Patch401: snapshot-ioemu-restore.patch
@ -144,6 +155,8 @@ Patch424: ioemu-7615-qcow2-fix-alloc_cluster_link_l2.patch
Patch425: ioemu-bdrv-open-CACHE_WB.patch
Patch426: xen-ioemu-hvm-pv-support.diff
Patch427: qemu-dm-segfault.patch
Patch428: shadow.patch
Patch429: hibernate.patch
# Jim's domain lock patch
Patch450: xend-domain-lock.patch
# Hypervisor and PV driver Patches
@ -162,6 +175,7 @@ Patch511: pv-driver-build.patch
Patch512: supported_module.diff
Patch513: magic_ioport_compat.patch
Patch650: disable_emulated_device.diff
Patch651: ioemu-disable-scsi.patch
# novell_shim patches
Patch700: hv_tools.patch
Patch701: hv_xen_base.patch
@ -546,11 +560,22 @@ Authors:
%patch355 -p1
%patch356 -p1
%patch357 -p1
%patch358 -p1
%patch359 -p1
%patch360 -p1
%patch361 -p1
%patch362 -p1
%patch363 -p1
%patch364 -p1
%patch365 -p1
%patch366 -p1
%patch367 -p1
%patch368 -p1
%patch369 -p1
%patch370 -p1
%patch371 -p1
%patch400 -p1
#%patch401 -p1
%patch401 -p1
%patch402 -p1
%patch403 -p1
%patch410 -p1
@ -567,6 +592,8 @@ Authors:
%patch425 -p1
%patch426 -p1
%patch427 -p1
%patch428 -p1
%patch429 -p1
%patch450 -p1
%patch500 -p1
%patch501 -p1
@ -583,6 +610,7 @@ Authors:
%patch512 -p1
%patch513 -p1
%patch650 -p1
%patch651 -p1
%patch700 -p1
%patch701 -p1
%patch702 -p1
@ -669,8 +697,8 @@ rm -f $RPM_BUILD_ROOT/usr/sbin/{qcow-create,img2qcow,qcow2raw}
make -C tools/misc/serial-split install \
DESTDIR=$RPM_BUILD_ROOT MANDIR=%{_mandir}
%ifarch x86_64
mkdir -p $RPM_BUILD_ROOT/usr/lib64/xen/bin/
ln -s %{_libdir}/xen/bin/qemu-dm $RPM_BUILD_ROOT/usr/lib64/xen/bin/qemu-dm
mkdir -p $RPM_BUILD_ROOT/${_libdir}/xen/bin/
ln -s /usr/lib/xen/bin/qemu-dm $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm
%endif
%if %{?with_kmp}0
# pv driver modules
@ -681,7 +709,7 @@ for flavor in %flavors_to_build; do
M=$PWD/obj/$flavor
done
mkdir -p $RPM_BUILD_ROOT/etc/modprobe.d
install -m644 %SOURCE19 $RPM_BUILD_ROOT/etc/modprobe.d/xen_pvdrivers
install -m644 %SOURCE19 $RPM_BUILD_ROOT/etc/modprobe.d/xen_pvdrivers.conf
%endif
# docs
make -C docs install \
@ -756,8 +784,13 @@ rm -f $RPM_BUILD_ROOT/usr/sbin/netfix
rm -f $RPM_BUILD_ROOT/%{_libdir}/python%{pyver}/site-packages/*.egg-info
rm -rf $RPM_BUILD_ROOT/html
rm -rf $RPM_BUILD_ROOT/usr/share/doc/xen/README.*
rm -f $RPM_BUILD_ROOT/usr/share/create.dtd
rm -f $RPM_BUILD_ROOT/usr/share/xen/create.dtd
rm -f $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm.debug
rm -f $RPM_BUILD_ROOT/%{_bindir}/qemu-img-xen
rm -f $RPM_BUILD_ROOT/%{_bindir}/qemu-nbd-xen
# FATE feature for remus rejected
rm -f $RPM_BUILD_ROOT/%{_bindir}/remus
rm -rf $RPM_BUILD_ROOT/%{_libdir}/python%{pyver}/site-packages/xen/remus
# This is necessary because of the build of libconfig for libxl
#rm -rf $RPM_BUILD_ROOT/$RPM_BUILD_ROOT
@ -792,11 +825,11 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm.debug
/usr/bin/xenstore*
/usr/bin/xentrace*
/usr/bin/pygrub
/usr/bin/qemu-img-xen
/usr/bin/qemu-nbd-xen
#/usr/bin/qemu-img-xen
#/usr/bin/qemu-nbd-xen
/usr/bin/tapdisk-ioemu
/usr/bin/gdbserver-xen
/usr/bin/remus
#/usr/bin/remus
/usr/sbin/blktapctrl
/usr/sbin/flask-loadpolicy
/usr/sbin/flask-getenforce
@ -927,7 +960,6 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm.debug
%{_libdir}/libvhd.so
/usr/bin/serial-split
/usr/include/blktaplib.h
/usr/include/flask.h
/usr/include/fsimage*
/usr/include/xen*.h
/usr/include/xen/
@ -949,6 +981,18 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm.debug
%post tools
%{fillup_and_insserv -y -n xend xend}
%{fillup_and_insserv -y -n xendomains xendomains}
if [ -f /usr/bin/qemu-img ]; then
if [ -f /usr/bin/qemu-img-xen ]; then
rm /usr/bin/qemu-img-xen
fi
ln -s /usr/bin/qemu-img /usr/bin/qemu-img-xen
fi
if [ -f /usr/bin/qemu-nbd ]; then
if [ -f /usr/bin/qemu-nbd-xen ]; then
rm /usr/bin/qemu-nbd-xen
fi
ln -s /usr/bin/qemu-nbd /usr/bin/qemu-nbd-xen
fi
%preun tools
%{stop_on_removal xendomains xend}
@ -956,6 +1000,12 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm.debug
%postun tools
%{restart_on_update xend}
%{insserv_cleanup}
if [ -f /usr/bin/qemu-img-xen ]; then
rm /usr/bin/qemu-img-xen
fi
if [ -f /usr/bin/qemu-nbd-xen ]; then
rm /usr/bin/qemu-nbd-xen
fi
%post libs
/sbin/ldconfig

7
xen_pvdrivers.conf Normal file
View File

@ -0,0 +1,7 @@
# Install the paravirtualized drivers
install libata /sbin/modprobe xen-vbd 2>&1 |:; /sbin/modprobe --ignore-install libata
install 8139cp /sbin/modprobe xen-vnif 2>&1 |:; /sbin/modprobe --ignore-install 8139cp
install 8139too /sbin/modprobe xen-vnif 2>&1 |:; /sbin/modprobe --ignore-install 8139too

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -3896,6 +3896,14 @@ class XendDomainInfo:
@@ -3920,6 +3920,14 @@ class XendDomainInfo:
if not config.has_key('backend'):
config['backend'] = "00000000-0000-0000-0000-000000000000"

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -2291,7 +2291,7 @@ class XendDomainInfo:
@@ -2313,7 +2313,7 @@ class XendDomainInfo:
# To prohibit directory traversal
based_name = os.path.basename(self.info['name_label'])

View File

@ -83,7 +83,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
from xen.util.blkif import parse_uname
import xen.util.xsm.xsm as security
from xen.util import xsconstants
@@ -457,6 +458,7 @@ class XendDomainInfo:
@@ -466,6 +467,7 @@ class XendDomainInfo:
if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
try:
@ -91,7 +91,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
XendTask.log_progress(0, 30, self._constructDomain)
XendTask.log_progress(31, 60, self._initDomain)
@@ -2933,6 +2935,11 @@ class XendDomainInfo:
@@ -2972,6 +2974,11 @@ class XendDomainInfo:
self._stateSet(DOM_STATE_HALTED)
self.domid = None # Do not push into _stateSet()!
@ -103,7 +103,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
finally:
self.refresh_shutdown_lock.release()
@@ -4434,6 +4441,74 @@ class XendDomainInfo:
@@ -4478,6 +4485,74 @@ class XendDomainInfo:
def has_device(self, dev_class, dev_uuid):
return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
@ -243,7 +243,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCheckpoint.py
+ dominfo.acquire_running_lock()
return dominfo
except:
except Exception, exn:
dominfo.destroy()
Index: xen-4.0.0-testing/tools/hotplug/Linux/Makefile
===================================================================