- bnc#596442 - Preserve device config on domain start failure

xend-preserve-devs.patch

- bnc#597770 - insserv reports a loop between xendomains and
  openais.  Remove openais from Should-Start in xendomains script.

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=42
This commit is contained in:
Charles Arnold 2010-04-26 19:37:58 +00:00 committed by Git OBS Bridge
parent 955345f274
commit 6891466418
18 changed files with 590 additions and 1251 deletions

View File

@ -0,0 +1,53 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1271353678 -3600
# Node ID d18e6a6c618af4f25a9e1a57c9e3eac55921678c
# Parent ffffddc4b1e030cce6bd4d12c4409c94599c1abf
x86_emulate: Emulate CLFLUSH instruction
We recently found that FreeBSD 8.0 guest failed to install and boot on
Xen. The reason was that FreeBSD detected clflush feature and invoked
this instruction to flush MMIO space. This caused a page fault; but
x86_emulate.c failed to emulate this instruction (not supported). As a
result, a page fault was detected inside FreeBSD. A similar issue was
reported earlier.
http://lists.xensource.com/archives/html/xen-devel/2010-03/msg00362.html
From: Wei Huang <wei.huang2@amd.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Index: xen-4.0.0-testing/xen/arch/x86/x86_emulate/x86_emulate.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/x86_emulate/x86_emulate.c
+++ xen-4.0.0-testing/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -227,7 +227,8 @@ static uint8_t twobyte_table[256] = {
DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, 0,
/* 0xA8 - 0xAF */
ImplicitOps, ImplicitOps, 0, DstBitBase|SrcReg|ModRM,
- DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, DstReg|SrcMem|ModRM,
+ DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
+ ImplicitOps|ModRM, DstReg|SrcMem|ModRM,
/* 0xB0 - 0xB7 */
ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
DstReg|SrcMem|ModRM|Mov, DstBitBase|SrcReg|ModRM,
@@ -4008,6 +4009,19 @@ x86_emulate(
emulate_2op_SrcV_nobyte("bts", src, dst, _regs.eflags);
break;
+ case 0xae: /* Grp15 */
+ switch ( modrm_reg & 7 )
+ {
+ case 7: /* clflush */
+ fail_if(ops->wbinvd == NULL);
+ if ( (rc = ops->wbinvd(ctxt)) != 0 )
+ goto done;
+ break;
+ default:
+ goto cannot_emulate;
+ }
+ break;
+
case 0xaf: /* imul */
_regs.eflags &= ~(EFLG_OF|EFLG_CF);
switch ( dst.bytes )

23
21193-blktap-script.patch Normal file
View File

@ -0,0 +1,23 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1271663923 -3600
# Node ID ae1abcdd4a670e34509e39d5280834a75afc0f6a
# Parent 6860c523d2daf1d110b130a9596469a0957b4032
tools/hotplug/Linux/blktap: remove optional tapdisk: prefix
Perhaps this should even be further generalized (e.g. to remove any
"[!/]*:" pattern prefixes) to be more forward compatible?
Signed-off-by: Jan Beulich <jbeulich@novell.com>
diff -r 6860c523d2da -r ae1abcdd4a67 tools/hotplug/Linux/blktap
--- a/tools/hotplug/Linux/blktap Mon Apr 19 08:55:33 2010 +0100
+++ b/tools/hotplug/Linux/blktap Mon Apr 19 08:58:43 2010 +0100
@@ -59,6 +59,7 @@
if [ -n "$t" ]
then
p=$(xenstore_read "$XENBUS_PATH/params")
+ p=${p#tapdisk:}
# if we have a ':', chew from head including :
if echo $p | grep -q \:
then

75
21225-conring-iommu.patch Normal file
View File

@ -0,0 +1,75 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1271954636 -3600
# Node ID 2b97855a629f1d79e1d075a6d2a8b569018b2094
# Parent a7947fd90328287dd097294b241753063c858597
console: Make initial static console buffer __initdata.
The previous scheme --- freeing an area of BSS --- did not interact
nicely with device passthrough as IOMMU will not have any Xen BSS area
in guest device pagetables. Hence if the freed BSS space gets
allocated to a guest, DMAs to guest's own memory can fail.
The simple solution here is to always free the static buffer at end of
boot (initmem is specially handled for IOMMUs) and require a
dynamically-allocated buffer always to be created.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -65,11 +65,7 @@ size_param("conring_size", opt_conring_s
#define _CONRING_SIZE 16384
#define CONRING_IDX_MASK(i) ((i)&(conring_size-1))
-static char
-#if _CONRING_SIZE >= PAGE_SIZE
- __attribute__((__section__(".bss.page_aligned"), __aligned__(PAGE_SIZE)))
-#endif
- _conring[_CONRING_SIZE];
+static char __initdata _conring[_CONRING_SIZE];
static char *__read_mostly conring = _conring;
static uint32_t __read_mostly conring_size = _CONRING_SIZE;
static uint32_t conringc, conringp;
@@ -596,25 +592,20 @@ void __init console_init_preirq(void)
void __init console_init_postirq(void)
{
char *ring;
- unsigned int i;
+ unsigned int i, order;
serial_init_postirq();
if ( !opt_conring_size )
opt_conring_size = num_present_cpus() << (9 + xenlog_lower_thresh);
- /* Round size down to a power of two. */
- while ( opt_conring_size & (opt_conring_size - 1) )
- opt_conring_size &= opt_conring_size - 1;
- if ( opt_conring_size < conring_size )
- return;
-
- ring = alloc_xenheap_pages(get_order_from_bytes(opt_conring_size), 0);
- if ( ring == NULL )
+
+ order = get_order_from_bytes(max(opt_conring_size, conring_size));
+ while ( (ring = alloc_xenheap_pages(order, 0)) == NULL )
{
- printk("Unable to allocate console ring of %u bytes.\n",
- opt_conring_size);
- return;
+ BUG_ON(order == 0);
+ order--;
}
+ opt_conring_size = PAGE_SIZE << order;
spin_lock_irq(&console_lock);
for ( i = conringc ; i != conringp; i++ )
@@ -625,8 +616,6 @@ void __init console_init_postirq(void)
spin_unlock_irq(&console_lock);
printk("Allocated console ring of %u KiB.\n", opt_conring_size >> 10);
-
- init_xenheap_pages(__pa(_conring), __pa(_conring + _CONRING_SIZE));
}
void __init console_endboot(void)

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -2909,7 +2909,7 @@ class XendDomainInfo:
@@ -2913,7 +2913,7 @@ class XendDomainInfo:
self.guest_bitsize = self.image.getBitSize()
# Make sure there's enough RAM available for the domain

View File

@ -1,16 +0,0 @@
diff -r c02cc832cb2d tools/hotplug/Linux/blktap
--- a/tools/hotplug/Linux/blktap Tue Apr 13 18:19:33 2010 +0100
+++ b/tools/hotplug/Linux/blktap Fri Apr 16 14:48:00 2010 -0600
@@ -59,10 +59,10 @@
if [ -n "$t" ]
then
p=$(xenstore_read "$XENBUS_PATH/params")
- # if we have a ':', chew from head including :
+ # if we have a ':', remove everything up to leading '/'
if echo $p | grep -q \:
then
- p=${p#*:}
+ p="/${p#*/}"
fi
fi
# some versions of readlink cannot be passed a regular file

View File

@ -1,7 +1,5 @@
Index: xen-4.0.0-testing/docs/xen-api/coversheet.tex
===================================================================
--- xen-4.0.0-testing.orig/docs/xen-api/coversheet.tex
+++ xen-4.0.0-testing/docs/xen-api/coversheet.tex
--- a/docs/xen-api/coversheet.tex
+++ b/docs/xen-api/coversheet.tex
@@ -52,6 +52,7 @@ Mike Day, IBM & Daniel Veillard, Red Hat
Jim Fehlig, Novell & Tom Wilkie, University of Cambridge \\
Jon Harrop, XenSource & Yosuke Iwamatsu, NEC \\
@ -10,10 +8,8 @@ Index: xen-4.0.0-testing/docs/xen-api/coversheet.tex
\end{tabular}
\end{large}
Index: xen-4.0.0-testing/docs/xen-api/revision-history.tex
===================================================================
--- xen-4.0.0-testing.orig/docs/xen-api/revision-history.tex
+++ xen-4.0.0-testing/docs/xen-api/revision-history.tex
--- a/docs/xen-api/revision-history.tex
+++ b/docs/xen-api/revision-history.tex
@@ -50,6 +50,12 @@
between classes. Added host.PSCSI\_HBAs and VM.DSCSI\_HBAs
fields.\tabularnewline
@ -22,15 +18,13 @@ Index: xen-4.0.0-testing/docs/xen-api/revision-history.tex
+ Added definitions of new classes cpu\_pool. Updated the table
+ and the diagram representing relationships between classes.
+ Added fields host.resident\_cpu\_pools, VM.cpu\_pool and
+ host\_cpu.cpu\_pool.
+ host\_cpu.cpu\_pool.\tabularnewline
+ \hline
\end{tabular}
\end{center}
\end{flushleft}
Index: xen-4.0.0-testing/docs/xen-api/xenapi-coversheet.tex
===================================================================
--- xen-4.0.0-testing.orig/docs/xen-api/xenapi-coversheet.tex
+++ xen-4.0.0-testing/docs/xen-api/xenapi-coversheet.tex
--- a/docs/xen-api/xenapi-coversheet.tex
+++ b/docs/xen-api/xenapi-coversheet.tex
@@ -17,12 +17,12 @@
\newcommand{\coversheetlogo}{xen.eps}
@ -46,10 +40,8 @@ Index: xen-4.0.0-testing/docs/xen-api/xenapi-coversheet.tex
%% Document authors
\newcommand{\docauthors}{
Index: xen-4.0.0-testing/docs/xen-api/xenapi-datamodel-graph.dot
===================================================================
--- xen-4.0.0-testing.orig/docs/xen-api/xenapi-datamodel-graph.dot
+++ xen-4.0.0-testing/docs/xen-api/xenapi-datamodel-graph.dot
--- a/docs/xen-api/xenapi-datamodel-graph.dot
+++ b/docs/xen-api/xenapi-datamodel-graph.dot
@@ -14,7 +14,7 @@ fontname="Verdana";
node [ shape=box ]; session VM host network VIF PIF SR VDI VBD PBD user;
@ -67,10 +59,8 @@ Index: xen-4.0.0-testing/docs/xen-api/xenapi-datamodel-graph.dot
+cpu_pool -> VM [ arrowhead="crow", arrowtail="none" ]
+host -> cpu_pool [ arrowhead="crow", arrowtail="none" ]
}
Index: xen-4.0.0-testing/docs/xen-api/xenapi-datamodel.tex
===================================================================
--- xen-4.0.0-testing.orig/docs/xen-api/xenapi-datamodel.tex
+++ xen-4.0.0-testing/docs/xen-api/xenapi-datamodel.tex
--- a/docs/xen-api/xenapi-datamodel.tex
+++ b/docs/xen-api/xenapi-datamodel.tex
@@ -56,6 +56,7 @@ Name & Description \\
{\tt debug} & A basic class for testing \\
{\tt XSPolicy} & A class for handling Xen Security Policies \\

View File

@ -1,773 +0,0 @@
Index: xen-4.0.0-testing/docs/xen-api/revision-history.tex
===================================================================
--- xen-4.0.0-testing.orig/docs/xen-api/revision-history.tex
+++ xen-4.0.0-testing/docs/xen-api/revision-history.tex
@@ -54,7 +54,7 @@
Added definitions of new classes cpu\_pool. Updated the table
and the diagram representing relationships between classes.
Added fields host.resident\_cpu\_pools, VM.cpu\_pool and
- host\_cpu.cpu\_pool.
+ host\_cpu.cpu\_pool.\tabularnewline
\hline
\end{tabular}
\end{center}
Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendCPUPool.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
@@ -547,7 +547,7 @@ class XendCPUPool(XendBase):
def pool_start(cls, poolname):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
pool.activate()
except XendAPIError, ex:
@@ -566,8 +566,12 @@ class XendCPUPool(XendBase):
for cpu_ref in pool_vals['host_CPUs'] ]
cpus.sort()
pool_vals['host_CPU_numbers'] = cpus
- vm_names = [ xd.get_vm_by_uuid(uuid).getName()
- for uuid in pool_vals['started_VMs'] ]
+ # query VMs names. Take in account, that a VM
+ # returned by get_all_records could be destroy, now
+ vm_names = [ vm.getName()
+ for vm in map(xd.get_vm_by_uuid,
+ pool_vals['started_VMs'])
+ if vm ]
pool_vals['started_VM_names'] = vm_names
pool_vals['auto_power_on'] = int(pool_vals['auto_power_on'])
sxprs += [[pool_uuid] + map2sxp(pool_vals)]
@@ -578,7 +582,7 @@ class XendCPUPool(XendBase):
def pool_destroy(cls, poolname):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
pool.deactivate()
if not pool.is_managed():
@@ -589,7 +593,7 @@ class XendCPUPool(XendBase):
def pool_delete(cls, poolname):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
pool.destroy()
except XendAPIError, ex:
@@ -598,28 +602,28 @@ class XendCPUPool(XendBase):
def pool_cpu_add(cls, poolname, cpu):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
cpu_ref = cls._cpu_number_to_ref(int(cpu))
if cpu_ref:
pool.add_host_CPU_live(cpu_ref)
else:
raise PoolError(XEND_ERROR_INVALID_CPU,
- 'CPU unkown')
+ 'CPU unknown')
except XendAPIError, ex:
raise VmError(ex.get_api_error())
def pool_cpu_remove(cls, poolname, cpu):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
cpu_ref = cls._cpu_number_to_ref(int(cpu))
if cpu_ref:
pool.remove_host_CPU_live(cpu_ref)
else:
raise PoolError(XEND_ERROR_INVALID_CPU,
- 'CPU unkown')
+ 'CPU unknown')
except XendAPIError, ex:
raise VmError(ex.get_api_error())
@@ -627,10 +631,10 @@ class XendCPUPool(XendBase):
dom = XendDomain.instance()
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
dominfo = dom.domain_lookup_nr(domname)
if not dominfo:
- raise VmError('unkown domain %s' % domname)
+ raise VmError('unknown domain %s' % domname)
domid = dominfo.getDomid()
if domid is not None:
if domid == 0:
@@ -860,8 +864,11 @@ class XendCPUPool(XendBase):
pool_uuid = None
try:
pool_id = int(id_or_name)
- # pool id given
+ # pool id given ?
pool_uuid = cls.query_pool_ref(pool_id)
+ if not pool_uuid:
+ # not found -> search name
+ pool_uuid = cls.get_by_name_label(id_or_name)
except ValueError:
# pool name given
pool_uuid = cls.get_by_name_label(id_or_name)
Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -2574,7 +2574,7 @@ class XendDomainInfo:
pool = XendCPUPool.lookup_pool(pool_name)
if pool is None:
- raise VmError("unkown pool %s" % pool_name)
+ raise VmError("unknown pool %s" % pool_name)
pool_id = pool.query_pool_id()
if pool_id is None:
raise VmError("pool %s not activated" % pool_name)
Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
@@ -3515,7 +3515,7 @@ def get_pool_ref(name):
if len(refs) > 0:
return refs[0]
else:
- err('unkown pool name')
+ err('unknown pool name')
sys.exit(1)
def xm_pool_start(args):
@@ -3643,7 +3643,7 @@ def xm_pool_cpu_add(args):
cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
if c_rec['number'] == args[1] ]
if len(cpu_ref) == 0:
- err('cpu number unkown')
+ err('cpu number unknown')
else:
server.xenapi.cpu_pool.add_host_CPU_live(ref, cpu_ref[0])
else:
@@ -3657,7 +3657,7 @@ def xm_pool_cpu_remove(args):
cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
if c_rec['number'] == args[1] ]
if len(cpu_ref) == 0:
- err('cpu number unkown')
+ err('cpu number unknown')
else:
server.xenapi.cpu_pool.remove_host_CPU_live(ref, cpu_ref[0])
else:
Index: xen-4.0.0-testing/xen/common/cpupool.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/cpupool.c
+++ xen-4.0.0-testing/xen/common/cpupool.c
@@ -29,6 +29,9 @@ static struct cpupool *cpupool_list;
static int cpupool0_max_cpus;
integer_param("pool0_max_cpus", cpupool0_max_cpus);
+static int cpupool_moving_cpu = -1;
+static struct cpupool *cpupool_cpu_moving = NULL;
+
/* cpupool lock: be carefull, this lock is sometimes released on another cpu
* as it was obtained!
*/
@@ -104,7 +107,6 @@ struct cpupool *cpupool_create(int pooli
}
*q = c;
c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
- c->cpu_in_transit = -1;
if ( schedule_init_global(sched, &(c->sched)) )
{
spin_unlock(&cpupool_lock);
@@ -151,16 +153,20 @@ int cpupool_destroy(struct cpupool *c)
* assign a specific cpu to a cpupool
* cpupool_lock must be held
*/
-static void cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
+static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
{
- printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
- c->cpupool_id, cpu);
+ if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
+ return -EBUSY;
per_cpu(cpupool, cpu) = c;
schedule_cpu_switch(cpu, c);
cpu_clear(cpu, cpupool_free_cpus);
+ if (cpupool_moving_cpu == cpu)
+ {
+ cpupool_moving_cpu = -1;
+ cpupool_cpu_moving = NULL;
+ }
cpu_set(cpu, c->cpu_valid);
- printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ready\n",
- c->cpupool_id, cpu);
+ return 0;
}
/*
@@ -177,8 +183,8 @@ int cpupool_assign_ncpu(struct cpupool *
spin_lock(&cpupool_lock);
for_each_cpu_mask(i, cpupool_free_cpus)
{
- cpupool_assign_cpu_locked(c, i);
- n++;
+ if ( cpupool_assign_cpu_locked(c, i) == 0 )
+ n++;
if ( n == ncpu )
break;
}
@@ -188,43 +194,25 @@ int cpupool_assign_ncpu(struct cpupool *
return n;
}
-static void cpupool_unassign_cpu_locked_1(struct cpupool *c, unsigned int cpu)
-{
- printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
- c->cpupool_id, cpu);
- c->cpu_in_transit = cpu;
-}
-
-static int cpupool_unassign_cpu_locked_2(struct cpupool *c)
+static long cpupool_unassign_cpu_helper(void *hdl, void *info)
{
- int cpu = c->cpu_in_transit;
- int ret;
+ struct cpupool *c = (struct cpupool *)info;
+ int cpu = cpupool_moving_cpu;
+ long ret;
+ int cpupool_id = c->cpupool_id;
- c->cpu_in_transit = -1;
- cpu_clear(cpu, c->cpu_valid);
ret = cpu_disable_scheduler(cpu, 1);
- if ( ret )
- {
- cpu_set(cpu, c->cpu_valid);
- }
- else
+ cpu_set(cpu, cpupool_free_cpus);
+ if ( !ret )
{
- cpu_set(cpu, cpupool_free_cpus);
schedule_cpu_switch(cpu, NULL);
per_cpu(cpupool, cpu) = NULL;
+ cpupool_moving_cpu = -1;
+ cpupool_cpu_moving = NULL;
}
- printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
- c->cpupool_id, cpu, ret);
- return ret;
-}
-
-static long cpupool_unassign_cpu_helper(void *hdl, void *info)
-{
- struct cpupool *c = (struct cpupool *)info;
- long ret;
-
- ret = cpupool_unassign_cpu_locked_2(c);
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %ld\n",
+ cpupool_id, cpu, ret);
return ret;
}
@@ -242,16 +230,23 @@ static long cpupool_unassign_cpu_helper(
int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
{
int work_cpu;
- int rc = 0;
+ int ret;
struct domain *d;
+ int cpupool_id = c->cpupool_id;
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
+ cpupool_id, cpu);
spin_lock(&cpupool_lock);
- if ( !cpu_isset(cpu, c->cpu_valid) )
- {
- spin_unlock(&cpupool_lock);
- return 0;
- }
- if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) )
+ ret = -EBUSY;
+ if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
+ goto out;
+
+ ret = 0;
+ if ( !cpu_isset(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
+ goto out;
+
+ if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) &&
+ (cpu != cpupool_moving_cpu) )
{
for_each_domain(d)
{
@@ -259,27 +254,24 @@ int cpupool_unassign_cpu(struct cpupool
continue;
if ( !d->is_dying )
{
- rc = -EBUSY;
+ ret = -EBUSY;
break;
}
- printk(XENLOG_DEBUG "moving dying domain %d to pool0\n",
- d->domain_id);
c->n_dom--;
- rc = sched_move_domain(d, cpupool0);
- if ( rc )
+ ret = sched_move_domain(d, cpupool0);
+ if ( ret )
{
c->n_dom++;
break;
}
cpupool0->n_dom++;
}
- if ( rc )
- {
- spin_unlock(&cpupool_lock);
- return rc;
- }
+ if ( ret )
+ goto out;
}
- cpupool_unassign_cpu_locked_1(c, cpu);
+ cpupool_moving_cpu = cpu;
+ cpupool_cpu_moving = c;
+ cpu_clear(cpu, c->cpu_valid);
work_cpu = smp_processor_id();
if ( work_cpu == cpu )
{
@@ -289,6 +281,12 @@ int cpupool_unassign_cpu(struct cpupool
}
return continue_hypercall_on_cpu(work_cpu, NULL,
cpupool_unassign_cpu_helper, c);
+
+out:
+ spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
+ cpupool_id, cpu, ret);
+ return ret;
}
/*
@@ -316,6 +314,7 @@ int cpupool_add_domain(struct domain *d,
{
struct cpupool *c;
int rc = 1;
+ int n_dom;
if ( poolid == CPUPOOLID_NONE )
return 0;
@@ -324,12 +323,14 @@ int cpupool_add_domain(struct domain *d,
if ( (c != NULL) && cpus_weight(c->cpu_valid) )
{
c->n_dom++;
+ n_dom = c->n_dom;
d->cpupool = c;
- printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
- d->domain_id, poolid, c->n_dom);
rc = 0;
}
spin_unlock(&cpupool_lock);
+ if (!rc)
+ printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
+ d->domain_id, poolid, n_dom);
return rc;
}
@@ -338,14 +339,19 @@ int cpupool_add_domain(struct domain *d,
*/
void cpupool_rm_domain(struct domain *d)
{
+ int cpupool_id;
+ int n_dom;
+
if ( d->cpupool == NULL )
return;
spin_lock(&cpupool_lock);
+ cpupool_id = d->cpupool->cpupool_id;
d->cpupool->n_dom--;
- printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
- d->domain_id, d->cpupool->cpupool_id, d->cpupool->n_dom);
+ n_dom = d->cpupool->n_dom;
d->cpupool = NULL;
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
+ d->domain_id, cpupool_id, n_dom);
return;
}
@@ -359,7 +365,7 @@ void cpupool_cpu_add(unsigned int cpu)
return;
spin_lock(&cpupool_lock);
cpu_set(cpu, cpupool_free_cpus);
- cpupool_assign_cpu_locked(cpupool0, cpu);
+ (void)cpupool_assign_cpu_locked(cpupool0, cpu);
spin_unlock(&cpupool_lock);
return;
}
@@ -428,6 +434,8 @@ int cpupool_do_domctl(struct xen_domctl_
unsigned cpu;
cpu = op->cpu;
+ printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
+ op->cpupool_id, cpu);
spin_lock(&cpupool_lock);
if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
cpu = first_cpu(cpupool_free_cpus);
@@ -441,10 +449,11 @@ int cpupool_do_domctl(struct xen_domctl_
ret = -ENOENT;
if ( c == NULL )
goto addcpu_out;
- cpupool_assign_cpu_locked(c, cpu);
- ret = 0;
+ ret = cpupool_assign_cpu_locked(c, cpu);
addcpu_out:
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
+ op->cpupool_id, cpu, ret);
}
break;
@@ -488,23 +497,23 @@ addcpu_out:
rcu_unlock_domain(d);
break;
}
+ printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
+ d->domain_id, op->cpupool_id);
ret = -ENOENT;
spin_lock(&cpupool_lock);
c = cpupool_find_by_id(op->cpupool_id, 1);
if ( (c != NULL) && cpus_weight(c->cpu_valid) )
{
- printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
- d->domain_id, c->cpupool_id);
d->cpupool->n_dom--;
ret = sched_move_domain(d, c);
if ( ret )
d->cpupool->n_dom++;
else
c->n_dom++;
- printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
- d->domain_id, c->cpupool_id, ret);
}
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
+ d->domain_id, op->cpupool_id, ret);
rcu_unlock_domain(d);
}
break;
Index: xen-4.0.0-testing/xen/common/sched_credit.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/sched_credit.c
+++ xen-4.0.0-testing/xen/common/sched_credit.c
@@ -602,7 +602,7 @@ csched_vcpu_acct(struct csched_private *
}
static void *
-csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc)
+csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc, void *dd)
{
struct csched_vcpu *svc;
@@ -614,7 +614,7 @@ csched_alloc_vdata(struct scheduler *ops
INIT_LIST_HEAD(&svc->runq_elem);
INIT_LIST_HEAD(&svc->active_vcpu_elem);
- svc->sdom = CSCHED_DOM(vc->domain);
+ svc->sdom = dd;
svc->vcpu = vc;
atomic_set(&svc->credit, 0);
svc->flags = 0U;
@@ -778,19 +778,14 @@ csched_dom_cntl(
return 0;
}
-static int
-csched_dom_init(struct scheduler *ops, struct domain *dom)
+static void *
+csched_alloc_domdata(struct scheduler *ops, struct domain *dom)
{
struct csched_dom *sdom;
- CSCHED_STAT_CRANK(dom_init);
-
- if ( is_idle_domain(dom) )
- return 0;
-
sdom = xmalloc(struct csched_dom);
if ( sdom == NULL )
- return -ENOMEM;
+ return NULL;
memset(sdom, 0, sizeof(*sdom));
/* Initialize credit and weight */
@@ -800,16 +795,40 @@ csched_dom_init(struct scheduler *ops, s
sdom->dom = dom;
sdom->weight = CSCHED_DEFAULT_WEIGHT;
sdom->cap = 0U;
+
+ return (void *)sdom;
+}
+
+static int
+csched_dom_init(struct scheduler *ops, struct domain *dom)
+{
+ struct csched_dom *sdom;
+
+ CSCHED_STAT_CRANK(dom_init);
+
+ if ( is_idle_domain(dom) )
+ return 0;
+
+ sdom = csched_alloc_domdata(ops, dom);
+ if ( sdom == NULL )
+ return -ENOMEM;
+
dom->sched_priv = sdom;
return 0;
}
static void
+csched_free_domdata(struct scheduler *ops, void *data)
+{
+ xfree(data);
+}
+
+static void
csched_dom_destroy(struct scheduler *ops, struct domain *dom)
{
CSCHED_STAT_CRANK(dom_destroy);
- xfree(CSCHED_DOM(dom));
+ csched_free_domdata(ops, CSCHED_DOM(dom));
}
/*
@@ -1147,9 +1166,10 @@ csched_load_balance(struct csched_privat
int peer_cpu;
BUG_ON( cpu != snext->vcpu->processor );
+ online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
/* If this CPU is going offline we shouldn't steal work. */
- if ( unlikely(!cpu_online(cpu)) )
+ if ( unlikely(!cpu_isset(cpu, *online)) )
goto out;
if ( snext->pri == CSCHED_PRI_IDLE )
@@ -1163,7 +1183,6 @@ csched_load_balance(struct csched_privat
* Peek at non-idling CPUs in the system, starting with our
* immediate neighbour.
*/
- online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
cpus_andnot(workers, *online, prv->idlers);
cpu_clear(cpu, workers);
peer_cpu = cpu;
@@ -1218,31 +1237,6 @@ csched_schedule(struct scheduler *ops, s
CSCHED_STAT_CRANK(schedule);
CSCHED_VCPU_CHECK(current);
- if ( unlikely(!cpu_isset(cpu, *CSCHED_CPUONLINE(per_cpu(cpupool, cpu)))) )
- {
- /* race with switching cpu between pools: when cpu is leaving the
- pool try to schedule idle vcpu */
-
- struct list_head * iter;
-
- snext = scurr;
- if (is_idle_vcpu(current))
- goto out;
-
- if ( vcpu_runnable(current) )
- __runq_insert(cpu, scurr);
-
- list_for_each(iter, runq)
- {
- snext = __runq_elem(iter);
- if ( snext->pri == CSCHED_PRI_IDLE )
- break;
- }
- BUG_ON( snext->pri != CSCHED_PRI_IDLE );
- __runq_remove(snext);
- goto out;
- }
-
/* Update credits */
if ( !is_idle_vcpu(scurr->vcpu) )
{
@@ -1273,7 +1267,6 @@ csched_schedule(struct scheduler *ops, s
else
snext = csched_load_balance(prv, cpu, snext);
-out:
/*
* Update idlers mask if necessary. When we're idling, other CPUs
* will tickle us when they get extra work.
@@ -1553,6 +1546,8 @@ struct scheduler sched_credit_def = {
.free_vdata = csched_free_vdata,
.alloc_pdata = csched_alloc_pdata,
.free_pdata = csched_free_pdata,
+ .alloc_domdata = csched_alloc_domdata,
+ .free_domdata = csched_free_domdata,
.tick_suspend = csched_tick_suspend,
.tick_resume = csched_tick_resume,
Index: xen-4.0.0-testing/xen/common/sched_sedf.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/sched_sedf.c
+++ xen-4.0.0-testing/xen/common/sched_sedf.c
@@ -332,7 +332,7 @@ static inline void __add_to_runqueue_sor
}
-static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v)
+static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v, void *dd)
{
struct sedf_vcpu_info *inf;
@@ -415,20 +415,37 @@ static void sedf_destroy_vcpu(struct sch
sedf_free_vdata(ops, v->sched_priv);
}
+static void *
+sedf_alloc_domdata(struct scheduler *ops, struct domain *d)
+{
+ void *mem;
+
+ mem = xmalloc(struct sedf_dom_info);
+ if ( mem == NULL )
+ return NULL;
+
+ memset(mem, 0, sizeof(struct sedf_dom_info));
+
+ return mem;
+}
+
static int sedf_init_domain(struct scheduler *ops, struct domain *d)
{
- d->sched_priv = xmalloc(struct sedf_dom_info);
+ d->sched_priv = sedf_alloc_domdata(ops, d);
if ( d->sched_priv == NULL )
return -ENOMEM;
- memset(d->sched_priv, 0, sizeof(struct sedf_dom_info));
-
return 0;
}
+static void sedf_free_domdata(struct scheduler *ops, void *data)
+{
+ xfree(data);
+}
+
static void sedf_destroy_domain(struct scheduler *ops, struct domain *d)
{
- xfree(d->sched_priv);
+ sedf_free_domdata(ops, d->sched_priv);
}
static int sedf_pick_cpu(struct scheduler *ops, struct vcpu *v)
@@ -1498,6 +1515,8 @@ struct scheduler sched_sedf_def = {
.free_vdata = sedf_free_vdata,
.alloc_pdata = sedf_alloc_pdata,
.free_pdata = sedf_free_pdata,
+ .alloc_domdata = sedf_alloc_domdata,
+ .free_domdata = sedf_free_domdata,
.do_schedule = sedf_do_schedule,
.pick_cpu = sedf_pick_cpu,
Index: xen-4.0.0-testing/xen/common/schedule.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/schedule.c
+++ xen-4.0.0-testing/xen/common/schedule.c
@@ -222,7 +222,7 @@ int sched_init_vcpu(struct vcpu *v, unsi
return 1;
}
- v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v);
+ v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
if ( v->sched_priv == NULL )
return 1;
@@ -237,14 +237,23 @@ int sched_move_domain(struct domain *d,
struct vcpu *v;
unsigned int new_p;
void **vcpu_priv;
+ void *domdata;
+
+ domdata = SCHED_OP(&(c->sched), alloc_domdata, d);
+ if ( domdata == NULL )
+ return -ENOMEM;
vcpu_priv = xmalloc_array(void *, d->max_vcpus);
if ( vcpu_priv == NULL )
+ {
+ SCHED_OP(&(c->sched), free_domdata, domdata);
return -ENOMEM;
+ }
+
memset(vcpu_priv, 0, d->max_vcpus * sizeof(void *));
for_each_vcpu ( d, v )
{
- vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v);
+ vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v, domdata);
if ( vcpu_priv[v->vcpu_id] == NULL )
{
for_each_vcpu ( d, v )
@@ -253,6 +262,7 @@ int sched_move_domain(struct domain *d,
xfree(vcpu_priv[v->vcpu_id]);
}
xfree(vcpu_priv);
+ SCHED_OP(&(c->sched), free_domdata, domdata);
return -ENOMEM;
}
}
@@ -276,6 +286,8 @@ int sched_move_domain(struct domain *d,
}
d->cpupool = c;
+ SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
+ d->sched_priv = domdata;
domain_unpause(d);
@@ -1079,7 +1091,7 @@ void schedule_cpu_switch(unsigned int cp
v = per_cpu(schedule_data, cpu).idle;
ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
if ( c != NULL )
- vpriv = SCHED_OP(new_ops, alloc_vdata, v);
+ vpriv = SCHED_OP(new_ops, alloc_vdata, v, v->domain->sched_priv);
spin_lock_irqsave(&per_cpu(schedule_data, cpu).schedule_lock, flags);
Index: xen-4.0.0-testing/xen/include/xen/sched-if.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/xen/sched-if.h
+++ xen-4.0.0-testing/xen/include/xen/sched-if.h
@@ -78,9 +78,12 @@ struct scheduler {
void (*deinit) (struct scheduler *);
void (*free_vdata) (struct scheduler *, void *);
- void * (*alloc_vdata) (struct scheduler *, struct vcpu *);
+ void * (*alloc_vdata) (struct scheduler *, struct vcpu *,
+ void *);
void (*free_pdata) (struct scheduler *, void *, int);
void * (*alloc_pdata) (struct scheduler *, int);
+ void (*free_domdata) (struct scheduler *, void *);
+ void * (*alloc_domdata) (struct scheduler *, struct domain *);
int (*init_domain) (struct scheduler *, struct domain *);
void (*destroy_domain) (struct scheduler *, struct domain *);
@@ -109,7 +112,6 @@ struct cpupool
cpumask_t cpu_valid; /* all cpus assigned to pool */
struct cpupool *next;
unsigned int n_dom;
- int cpu_in_transit; /* used for adding/removing cpus */
struct scheduler sched;
};

View File

@ -1,7 +1,5 @@
Index: xen-4.0.0-testing/tools/libxc/Makefile
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/Makefile
+++ xen-4.0.0-testing/tools/libxc/Makefile
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -8,6 +8,7 @@ CTRL_SRCS-y :=
CTRL_SRCS-y += xc_core.c
CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
@ -10,10 +8,8 @@ Index: xen-4.0.0-testing/tools/libxc/Makefile
CTRL_SRCS-y += xc_domain.c
CTRL_SRCS-y += xc_evtchn.c
CTRL_SRCS-y += xc_misc.c
Index: xen-4.0.0-testing/tools/libxc/xc_cpupool.c
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/libxc/xc_cpupool.c
+++ b/tools/libxc/xc_cpupool.c
@@ -0,0 +1,154 @@
+/******************************************************************************
+ * xc_cpupool.c
@ -169,42 +165,9 @@ Index: xen-4.0.0-testing/tools/libxc/xc_cpupool.c
+
+ return 0;
+}
Index: xen-4.0.0-testing/tools/libxc/xc_domain.c
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xc_domain.c
+++ xen-4.0.0-testing/tools/libxc/xc_domain.c
@@ -6,6 +6,7 @@
* Copyright (c) 2003, K A Fraser.
*/
+#include <stdarg.h>
#include "xc_private.h"
#include "xg_save_restore.h"
#include <xen/memory.h>
@@ -15,15 +16,21 @@ int xc_domain_create(int xc_handle,
uint32_t ssidref,
xen_domain_handle_t handle,
uint32_t flags,
- uint32_t *pdomid)
+ uint32_t *pdomid, ...)
{
int err;
+ va_list ap;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_createdomain;
domctl.domain = (domid_t)*pdomid;
domctl.u.createdomain.ssidref = ssidref;
domctl.u.createdomain.flags = flags;
+ if ( flags & XEN_DOMCTL_CDF_pool ) {
+ va_start(ap, pdomid);
+ domctl.u.createdomain.cpupool = va_arg(ap, uint32_t);
+ va_end(ap);
+ }
memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
return err;
@@ -220,6 +227,7 @@ int xc_domain_getinfo(int xc_handle,
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -220,6 +220,7 @@ int xc_domain_getinfo(int xc_handle,
info->cpu_time = domctl.u.getdomaininfo.cpu_time;
info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
@ -212,10 +175,8 @@ Index: xen-4.0.0-testing/tools/libxc/xc_domain.c
memcpy(info->handle, domctl.u.getdomaininfo.handle,
sizeof(xen_domain_handle_t));
Index: xen-4.0.0-testing/tools/libxc/xc_private.h
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xc_private.h
+++ xen-4.0.0-testing/tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl
return ret;
}
@ -236,10 +197,8 @@ Index: xen-4.0.0-testing/tools/libxc/xc_private.h
static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
{
int ret = -1;
Index: xen-4.0.0-testing/tools/libxc/xenctrl.h
===================================================================
--- xen-4.0.0-testing.orig/tools/libxc/xenctrl.h
+++ xen-4.0.0-testing/tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -171,6 +171,7 @@ typedef struct xc_dominfo {
unsigned int nr_online_vcpus;
unsigned int max_vcpu_id;
@ -248,15 +207,6 @@ Index: xen-4.0.0-testing/tools/libxc/xenctrl.h
} xc_dominfo_t;
typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
@@ -207,7 +208,7 @@ int xc_domain_create(int xc_handle,
uint32_t ssidref,
xen_domain_handle_t handle,
uint32_t flags,
- uint32_t *pdomid);
+ uint32_t *pdomid, ...);
/* Functions to produce a dump of a given domain
@@ -502,6 +503,100 @@ int xc_domain_setdebugging(int xc_handle
unsigned int enable);

View File

@ -1,42 +1,17 @@
Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c
+++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
@@ -97,17 +97,18 @@ static PyObject *pyxc_domain_create(XcOb
PyObject *args,
PyObject *kwds)
{
- uint32_t dom = 0, ssidref = 0, flags = 0, target = 0;
+ uint32_t dom = 0, ssidref = 0, flags = 0, target = 0, cpupool = 0;
int ret, i;
PyObject *pyhandle = NULL;
xen_domain_handle_t handle = {
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef };
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -106,8 +106,8 @@ static PyObject *pyxc_domain_create(XcOb
- static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", NULL };
+ static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", "cpupool", NULL };
static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", NULL };
- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list,
- &dom, &ssidref, &pyhandle, &flags, &target))
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOiii", kwd_list, &dom,
+ &ssidref, &pyhandle, &flags, &target,
+ &cpupool))
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list, &dom,
+ &ssidref, &pyhandle, &flags, &target))
return NULL;
if ( pyhandle != NULL )
{
@@ -124,8 +125,9 @@ static PyObject *pyxc_domain_create(XcOb
}
}
+ flags |= XEN_DOMCTL_CDF_pool;
if ( (ret = xc_domain_create(self->xc_handle, ssidref,
- handle, flags, &dom)) < 0 )
+ handle, flags, &dom, cpupool)) < 0 )
return pyxc_error_to_exception();
if ( target )
@@ -329,7 +331,7 @@ static PyObject *pyxc_domain_getinfo(XcO
@@ -329,7 +329,7 @@ static PyObject *pyxc_domain_getinfo(XcO
{
info_dict = Py_BuildValue(
"{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
@ -45,7 +20,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
"domid", (int)info[i].domid,
"online_vcpus", info[i].nr_online_vcpus,
"max_vcpu_id", info[i].max_vcpu_id,
@@ -344,7 +346,8 @@ static PyObject *pyxc_domain_getinfo(XcO
@@ -344,7 +344,8 @@ static PyObject *pyxc_domain_getinfo(XcO
"cpu_time", (long long)info[i].cpu_time,
"maxmem_kb", (long long)info[i].max_memkb,
"ssidref", (int)info[i].ssidref,
@ -55,7 +30,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
pyhandle = PyList_New(sizeof(xen_domain_handle_t));
if ( (pyhandle == NULL) || (info_dict == NULL) )
{
@@ -1751,6 +1754,179 @@ static PyObject *pyxc_dom_set_memshr(XcO
@@ -1751,6 +1752,179 @@ static PyObject *pyxc_dom_set_memshr(XcO
return zero;
}
@ -235,7 +210,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
static PyMethodDef pyxc_methods[] = {
{ "handle",
@@ -1866,7 +2042,8 @@ static PyMethodDef pyxc_methods[] = {
@@ -1866,7 +2040,8 @@ static PyMethodDef pyxc_methods[] = {
" maxmem_kb [int]: Maximum memory limit, in kilobytes\n"
" cpu_time [long]: CPU time consumed, in nanoseconds\n"
" shutdown_reason [int]: Numeric code from guest OS, explaining "
@ -245,7 +220,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
{ "vcpu_getinfo",
(PyCFunction)pyxc_vcpu_getinfo,
@@ -2264,6 +2441,66 @@ static PyMethodDef pyxc_methods[] = {
@@ -2264,6 +2439,66 @@ static PyMethodDef pyxc_methods[] = {
" enable [int,0|1]: Disable or enable?\n"
"Returns: [int] 0 on success; -1 on error.\n" },
@ -312,10 +287,8 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
{ NULL, NULL, 0, NULL }
};
Index: xen-4.0.0-testing/tools/python/xen/util/sxputils.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/python/xen/util/sxputils.py
+++ b/tools/python/xen/util/sxputils.py
@@ -0,0 +1,64 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
@ -381,10 +354,8 @@ Index: xen-4.0.0-testing/tools/python/xen/util/sxputils.py
+ return sxphash
+
+
Index: xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendAPI.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py
--- a/tools/python/xen/xend/XendAPI.py
+++ b/tools/python/xen/xend/XendAPI.py
@@ -51,6 +51,7 @@ from XendDPCI import XendDPCI
from XendPSCSI import XendPSCSI, XendPSCSI_HBA
from XendDSCSI import XendDSCSI, XendDSCSI_HBA
@ -576,11 +547,9 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py
# Xen API: Class VBD
# ----------------------------------------------------------------
Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
@@ -0,0 +1,896 @@
+++ b/tools/python/xen/xend/XendCPUPool.py
@@ -0,0 +1,903 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
@ -1130,7 +1099,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
+ def pool_start(cls, poolname):
+ pool = cls.lookup_pool(poolname)
+ if not pool:
+ raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
+ try:
+ pool.activate()
+ except XendAPIError, ex:
@ -1149,8 +1118,12 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
+ for cpu_ref in pool_vals['host_CPUs'] ]
+ cpus.sort()
+ pool_vals['host_CPU_numbers'] = cpus
+ vm_names = [ xd.get_vm_by_uuid(uuid).getName()
+ for uuid in pool_vals['started_VMs'] ]
+ # query VMs names. Take in account, that a VM
+ # returned by get_all_records could be destroy, now
+ vm_names = [ vm.getName()
+ for vm in map(xd.get_vm_by_uuid,
+ pool_vals['started_VMs'])
+ if vm ]
+ pool_vals['started_VM_names'] = vm_names
+ pool_vals['auto_power_on'] = int(pool_vals['auto_power_on'])
+ sxprs += [[pool_uuid] + map2sxp(pool_vals)]
@ -1161,7 +1134,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
+ def pool_destroy(cls, poolname):
+ pool = cls.lookup_pool(poolname)
+ if not pool:
+ raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
+ try:
+ pool.deactivate()
+ if not pool.is_managed():
@ -1172,7 +1145,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
+ def pool_delete(cls, poolname):
+ pool = cls.lookup_pool(poolname)
+ if not pool:
+ raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
+ try:
+ pool.destroy()
+ except XendAPIError, ex:
@ -1181,28 +1154,28 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
+ def pool_cpu_add(cls, poolname, cpu):
+ pool = cls.lookup_pool(poolname)
+ if not pool:
+ raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
+ try:
+ cpu_ref = cls._cpu_number_to_ref(int(cpu))
+ if cpu_ref:
+ pool.add_host_CPU_live(cpu_ref)
+ else:
+ raise PoolError(XEND_ERROR_INVALID_CPU,
+ 'CPU unkown')
+ 'CPU unknown')
+ except XendAPIError, ex:
+ raise VmError(ex.get_api_error())
+
+ def pool_cpu_remove(cls, poolname, cpu):
+ pool = cls.lookup_pool(poolname)
+ if not pool:
+ raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
+ try:
+ cpu_ref = cls._cpu_number_to_ref(int(cpu))
+ if cpu_ref:
+ pool.remove_host_CPU_live(cpu_ref)
+ else:
+ raise PoolError(XEND_ERROR_INVALID_CPU,
+ 'CPU unkown')
+ 'CPU unknown')
+ except XendAPIError, ex:
+ raise VmError(ex.get_api_error())
+
@ -1210,10 +1183,10 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
+ dom = XendDomain.instance()
+ pool = cls.lookup_pool(poolname)
+ if not pool:
+ raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
+ dominfo = dom.domain_lookup_nr(domname)
+ if not dominfo:
+ raise VmError('unkown domain %s' % domname)
+ raise VmError('unknown domain %s' % domname)
+ domid = dominfo.getDomid()
+ if domid is not None:
+ if domid == 0:
@ -1443,8 +1416,11 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
+ pool_uuid = None
+ try:
+ pool_id = int(id_or_name)
+ # pool id given
+ # pool id given ?
+ pool_uuid = cls.query_pool_ref(pool_id)
+ if not pool_uuid:
+ # not found -> search name
+ pool_uuid = cls.get_by_name_label(id_or_name)
+ except ValueError:
+ # pool name given
+ pool_uuid = cls.get_by_name_label(id_or_name)
@ -1477,10 +1453,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
+
+ unbound_cpus = classmethod(unbound_cpus)
+
Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py
+++ b/tools/python/xen/xend/XendConfig.py
@@ -128,6 +128,7 @@ XENAPI_CFG_TO_LEGACY_CFG = {
'PV_bootloader': 'bootloader',
'PV_bootloader_args': 'bootloader_args',
@ -1521,10 +1495,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
'superpages': 0,
'description': '',
}
Index: xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConstants.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py
--- a/tools/python/xen/xend/XendConstants.py
+++ b/tools/python/xen/xend/XendConstants.py
@@ -133,6 +133,8 @@ VTPM_DELETE_SCRIPT = auxbin.scripts_dir(
XS_VMROOT = "/vm/"
@ -1534,10 +1506,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py
NR_PCI_FUNC = 8
NR_PCI_DEV = 32
NR_PCI_DEVFN = NR_PCI_FUNC * NR_PCI_DEV
Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py
+++ b/tools/python/xen/xend/XendDomainInfo.py
@@ -60,6 +60,7 @@ from xen.xend.xenstore.xsutil import Get
from xen.xend.xenstore.xswatch import xswatch
from xen.xend.XendConstants import *
@ -1558,7 +1528,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
+ pool = XendCPUPool.lookup_pool(pool_name)
+
+ if pool is None:
+ raise VmError("unkown pool %s" % pool_name)
+ raise VmError("unknown pool %s" % pool_name)
+ pool_id = pool.query_pool_id()
+ if pool_id is None:
+ raise VmError("pool %s not activated" % pool_name)
@ -1566,15 +1536,19 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3)
try:
@@ -2573,6 +2587,7 @@ class XendDomainInfo:
ssidref = ssidref,
handle = uuid.fromString(self.info['uuid']),
flags = flags,
+ cpupool = pool_id,
target = self.info.target())
except Exception, e:
# may get here if due to ACM the operation is not permitted
@@ -3613,6 +3628,11 @@ class XendDomainInfo:
@@ -2586,6 +2600,11 @@ class XendDomainInfo:
failmsg += ', error=%i' % int(self.domid)
raise VmError(failmsg)
+ try:
+ xc.cpupool_movedomain(pool_id, self.domid)
+ except Exception, e:
+ raise VmError('Moving domain to target pool failed')
+
self.dompath = GetDomainPath(self.domid)
self._recreateDom()
@@ -3613,6 +3632,11 @@ class XendDomainInfo:
retval = xc.sched_credit_domain_get(self.getDomid())
return retval
@ -1586,10 +1560,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
def get_power_state(self):
return XEN_API_VM_POWER_STATE[self._stateGet()]
def get_platform(self):
Index: xen-4.0.0-testing/tools/python/xen/xend/XendError.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendError.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendError.py
--- a/tools/python/xen/xend/XendError.py
+++ b/tools/python/xen/xend/XendError.py
@@ -18,6 +18,7 @@
from xmlrpclib import Fault
@ -1625,10 +1597,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendError.py
class VDIError(XendAPIError):
def __init__(self, error, vdi):
XendAPIError.__init__(self)
Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendNode.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py
+++ b/tools/python/xen/xend/XendNode.py
@@ -43,6 +43,7 @@ from XendStateStore import XendStateStor
from XendMonitor import XendMonitor
from XendPPCI import XendPPCI
@ -1702,10 +1672,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
'max_free_memory',
'max_para_memory',
'max_hvm_memory',
Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/server/SrvServer.py
+++ xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
--- a/tools/python/xen/xend/server/SrvServer.py
+++ b/tools/python/xen/xend/server/SrvServer.py
@@ -52,6 +52,7 @@ from xen.xend import XendNode, XendOptio
from xen.xend.XendLogging import log
from xen.xend.XendClient import XEN_API_SOCKET
@ -1727,10 +1695,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
# Reaching this point means we can auto start domains
try:
xenddomain().autostart_domains()
Index: xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/server/XMLRPCServer.py
+++ xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py
--- a/tools/python/xen/xend/server/XMLRPCServer.py
+++ b/tools/python/xen/xend/server/XMLRPCServer.py
@@ -33,6 +33,7 @@ from xen.xend.XendClient import XML_RPC_
from xen.xend.XendConstants import DOM_STATE_RUNNING
from xen.xend.XendLogging import log
@ -1762,10 +1728,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py
# Functions in XendNode and XendDmesg
for type, lst, n in [(XendNode,
['info', 'pciinfo', 'send_debug_keys',
Index: xen-4.0.0-testing/tools/python/xen/xm/create.dtd
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/create.dtd
+++ xen-4.0.0-testing/tools/python/xen/xm/create.dtd
--- a/tools/python/xen/xm/create.dtd
+++ b/tools/python/xen/xm/create.dtd
@@ -50,6 +50,7 @@
s3_integrity CDATA #REQUIRED
vcpus_max CDATA #REQUIRED
@ -1774,10 +1738,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/create.dtd
actions_after_shutdown %NORMAL_EXIT; #REQUIRED
actions_after_reboot %NORMAL_EXIT; #REQUIRED
actions_after_crash %CRASH_BEHAVIOUR; #REQUIRED
Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/create.py
+++ xen-4.0.0-testing/tools/python/xen/xm/create.py
--- a/tools/python/xen/xm/create.py
+++ b/tools/python/xen/xm/create.py
@@ -659,6 +659,10 @@ gopts.var('suppress_spurious_page_faults
fn=set_bool, default=None,
use="""Do not inject spurious page faults into this guest""")
@ -1798,10 +1760,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
config_image = configure_image(vals)
if vals.bootloader:
Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py
+++ b/tools/python/xen/xm/main.py
@@ -56,6 +56,7 @@ from xen.util.xmlrpcclient import Server
import xen.util.xsm.xsm as security
from xen.util.xsm.xsm import XSMError
@ -1982,7 +1942,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
+ if len(refs) > 0:
+ return refs[0]
+ else:
+ err('unkown pool name')
+ err('unknown pool name')
+ sys.exit(1)
+
+def xm_pool_start(args):
@ -2110,7 +2070,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
+ cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
+ if c_rec['number'] == args[1] ]
+ if len(cpu_ref) == 0:
+ err('cpu number unkown')
+ err('cpu number unknown')
+ else:
+ server.xenapi.cpu_pool.add_host_CPU_live(ref, cpu_ref[0])
+ else:
@ -2124,7 +2084,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
+ cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
+ if c_rec['number'] == args[1] ]
+ if len(cpu_ref) == 0:
+ err('cpu number unkown')
+ err('cpu number unknown')
+ else:
+ server.xenapi.cpu_pool.remove_host_CPU_live(ref, cpu_ref[0])
+ else:
@ -2167,10 +2127,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
]
for c in IMPORTED_COMMANDS:
Index: xen-4.0.0-testing/tools/python/xen/xm/pool-create.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/python/xen/xm/pool-create.py
+++ b/tools/python/xen/xm/pool-create.py
@@ -0,0 +1,51 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
@ -2223,10 +2181,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/pool-create.py
+if __name__ == '__main__':
+ main(sys.argv)
+
Index: xen-4.0.0-testing/tools/python/xen/xm/pool-new.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/python/xen/xm/pool-new.py
+++ b/tools/python/xen/xm/pool-new.py
@@ -0,0 +1,50 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
@ -2278,10 +2234,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/pool-new.py
+if __name__ == '__main__':
+ main(sys.argv)
+
Index: xen-4.0.0-testing/tools/python/xen/xm/pool.py
===================================================================
--- /dev/null
+++ xen-4.0.0-testing/tools/python/xen/xm/pool.py
+++ b/tools/python/xen/xm/pool.py
@@ -0,0 +1,236 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
@ -2519,10 +2473,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/pool.py
+def help():
+ return str(GOPTS)
+
Index: xen-4.0.0-testing/tools/python/xen/xm/xenapi_create.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/xenapi_create.py
+++ xen-4.0.0-testing/tools/python/xen/xm/xenapi_create.py
--- a/tools/python/xen/xm/xenapi_create.py
+++ b/tools/python/xen/xm/xenapi_create.py
@@ -310,6 +310,8 @@ class xenapi_create:
get_child_nodes_as_dict(vm, "platform", "key", "value"),
"other_config":

View File

@ -36,7 +36,7 @@
static struct csched_private *csched_priv0 = NULL;
static void csched_tick(void *_cpu);
@@ -1524,11 +1523,13 @@ static void csched_tick_resume(struct sc
@@ -1517,11 +1516,13 @@ static void csched_tick_resume(struct sc
}
}

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c
+++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
@@ -946,16 +946,16 @@ static PyObject *pyxc_hvm_build(XcObject
@@ -944,16 +944,16 @@ static PyObject *pyxc_hvm_build(XcObject
#endif
int i;
char *image;
@ -24,7 +24,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
return NULL;
memset(vcpu_avail, 0, sizeof(vcpu_avail));
@@ -1007,6 +1007,7 @@ static PyObject *pyxc_hvm_build(XcObject
@@ -1005,6 +1005,7 @@ static PyObject *pyxc_hvm_build(XcObject
va_hvm->checksum -= sum;
munmap(va_map, XC_PAGE_SIZE);
#endif

View File

@ -8,7 +8,7 @@
### BEGIN INIT INFO
# Provides: xendomains
# Required-Start: $syslog $remote_fs xend
# Should-Start: iscsi o2cb ocfs2 openais
# Should-Start: iscsi o2cb ocfs2
# Required-Stop: $syslog $remote_fs xend
# Should-Stop: iscsi
# Default-Start: 3 5

View File

@ -1,3 +1,15 @@
-------------------------------------------------------------------
Wed Apr 21 21:15:04 MDT 2010 - jfehlig@novell.com
- bnc#596442 - Preserve device config on domain start failure
xend-preserve-devs.patch
-------------------------------------------------------------------
Tue Apr 20 15:18:31 MDT 2010 - jfehlig@novell.com
- bnc#597770 - insserv reports a loop between xendomains and
openais. Remove openais from Should-Start in xendomains script.
-------------------------------------------------------------------
Fri Apr 16 15:00:52 MDT 2010 - jfehlig@novell.com

View File

@ -26,11 +26,7 @@ ExclusiveArch: %ix86 x86_64
%define xen_build_dir xen-4.0.0-testing
%define with_kmp 1
BuildRequires: LibVNCServer-devel SDL-devel acpica automake bin86 curl-devel dev86 graphviz latex2html libjpeg-devel libxml2-devel ncurses-devel openssl openssl-devel pciutils-devel python-devel texinfo transfig
%if %suse_version >= 1030
BuildRequires: texlive texlive-latex
%else
BuildRequires: te_ams te_latex tetex
%endif
%ifarch x86_64
BuildRequires: glibc-32bit glibc-devel-32bit
%define max_cpus 256
@ -44,7 +40,7 @@ BuildRequires: kernel-source kernel-syms module-init-tools xorg-x11
%endif
Version: 4.0.0_21091_01
Release: 1
License: GPLv2+
License: GPLv2
Group: System/Kernel
AutoReqProv: on
PreReq: %insserv_prereq %fillup_prereq
@ -83,8 +79,11 @@ Patch1: 21109-x86-cpu-hotplug.patch
Patch2: 21128-domain-save-flush.patch
Patch3: 21150-shadow-race.patch
Patch4: 21160-sysctl-debug-keys.patch
Patch5: blktap-script.patch
Patch6: ioemu-subtype.patch
Patch5: 21189-x86-emulate-clflush.patch
Patch6: 21193-blktap-script.patch
Patch7: 21194-ioemu-subtype.patch
Patch8: 21225-conring-iommu.patch
Patch9: xend-preserve-devs.patch
# Our patches
Patch300: xen-config.diff
Patch301: xend-config.diff
@ -145,7 +144,6 @@ Patch366: cpu-pools-python.patch
Patch367: cpu-pools-libxen.patch
Patch368: cpu-pools-xmtest.patch
Patch369: cpu-pools-docs.patch
Patch370: cpu-pools-fixes.patch
# Patches for snapshot support
Patch400: snapshot-ioemu-save.patch
Patch401: snapshot-ioemu-restore.patch
@ -161,14 +159,14 @@ Patch415: tapdisk-ioemu-shutdown-fix.patch
Patch420: blktapctrl-default-to-ioemu.patch
Patch421: ioemu-blktap-barriers.patch
# Other bug fixes or features
Patch422: bdrv_open2_fix_flags.patch
Patch423: bdrv_open2_flags_2.patch
Patch424: ioemu-7615-qcow2-fix-alloc_cluster_link_l2.patch
Patch425: ioemu-bdrv-open-CACHE_WB.patch
Patch426: xen-ioemu-hvm-pv-support.diff
Patch427: qemu-dm-segfault.patch
Patch428: hibernate.patch
Patch429: del_usb_xend_entry.patch
Patch423: bdrv_open2_fix_flags.patch
Patch424: bdrv_open2_flags_2.patch
Patch425: ioemu-7615-qcow2-fix-alloc_cluster_link_l2.patch
Patch426: ioemu-bdrv-open-CACHE_WB.patch
Patch427: xen-ioemu-hvm-pv-support.diff
Patch428: qemu-dm-segfault.patch
Patch429: hibernate.patch
Patch430: del_usb_xend_entry.patch
# Jim's domain lock patch
Patch450: xend-domain-lock.patch
# Hypervisor and PV driver Patches
@ -534,6 +532,9 @@ Authors:
%patch4 -p1
%patch5 -p1
%patch6 -p1
%patch7 -p1
%patch8 -p1
%patch9 -p1
%patch300 -p1
%patch301 -p1
%patch302 -p1
@ -592,7 +593,6 @@ Authors:
%patch367 -p1
%patch368 -p1
%patch369 -p1
%patch370 -p1
%patch400 -p1
%patch401 -p1
%patch402 -p1
@ -605,7 +605,6 @@ Authors:
%patch415 -p1
%patch420 -p1
%patch421 -p1
%patch422 -p1
%patch423 -p1
%patch424 -p1
%patch425 -p1
@ -613,6 +612,7 @@ Authors:
%patch427 -p1
%patch428 -p1
%patch429 -p1
%patch430 -p1
%patch450 -p1
%patch500 -p1
%patch501 -p1

View File

@ -23,7 +23,7 @@ Index: xen-4.0.0-testing/tools/examples/xend-config.sxp
+# Lock is placed in /<xend-domain-lock-path>/<domain-uuid>.
+# Default is /var/lib/xen/images/vm_locks/
+#
+#(xend-domain-lock-path /var/lib/xend/domains)
+#(xend-domain-lock-path /var/lib/images/vm_locks)
+
+# External locking utility called by xend for acquiring/releasing
+# domain lock. By default /etc/xen/scripts/domain-lock will be used
@ -91,7 +91,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
XendTask.log_progress(0, 30, self._constructDomain)
XendTask.log_progress(31, 60, self._initDomain)
@@ -2979,6 +2981,11 @@ class XendDomainInfo:
@@ -2983,6 +2985,11 @@ class XendDomainInfo:
self._stateSet(DOM_STATE_HALTED)
self.domid = None # Do not push into _stateSet()!
@ -103,7 +103,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
finally:
self.refresh_shutdown_lock.release()
@@ -4485,6 +4492,74 @@ class XendDomainInfo:
@@ -4489,6 +4496,74 @@ class XendDomainInfo:
def has_device(self, dev_class, dev_uuid):
return (dev_uuid in self.info['%s_refs' % dev_class.lower()])

14
xend-preserve-devs.patch Normal file
View File

@ -0,0 +1,14 @@
Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
@@ -1166,8 +1166,8 @@ class XendConfig(dict):
config.append(['VDI', dev_cfg.get('VDI', '')])
sxpr.append(['device', config])
+ found = True
- found = True
except:
log.exception("dumping sxp from device controllers")
pass