diff --git a/21189-x86-emulate-clflush.patch b/21189-x86-emulate-clflush.patch
new file mode 100644
index 0000000..bf0aa3a
--- /dev/null
+++ b/21189-x86-emulate-clflush.patch
@@ -0,0 +1,53 @@
+# HG changeset patch
+# User Keir Fraser <keir.fraser@citrix.com>
+# Date 1271353678 -3600
+# Node ID d18e6a6c618af4f25a9e1a57c9e3eac55921678c
+# Parent  ffffddc4b1e030cce6bd4d12c4409c94599c1abf
+x86_emulate: Emulate CLFLUSH instruction
+
+We recently found that FreeBSD 8.0 guest failed to install and boot on
+Xen. The reason was that FreeBSD detected clflush feature and invoked
+this instruction to flush MMIO space. This caused a page fault; but
+x86_emulate.c failed to emulate this instruction (not supported). As a
+result, a page fault was detected inside FreeBSD. A similar issue was
+reported earlier.
+
+http://lists.xensource.com/archives/html/xen-devel/2010-03/msg00362.html
+
+From: Wei Huang <wei.huang2@amd.com>
+Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
+
+Index: xen-4.0.0-testing/xen/arch/x86/x86_emulate/x86_emulate.c
+===================================================================
+--- xen-4.0.0-testing.orig/xen/arch/x86/x86_emulate/x86_emulate.c
++++ xen-4.0.0-testing/xen/arch/x86/x86_emulate/x86_emulate.c
+@@ -227,7 +227,8 @@ static uint8_t twobyte_table[256] = {
+     DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, 0,
+     /* 0xA8 - 0xAF */
+     ImplicitOps, ImplicitOps, 0, DstBitBase|SrcReg|ModRM,
+-    DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, DstReg|SrcMem|ModRM,
++    DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
++    ImplicitOps|ModRM, DstReg|SrcMem|ModRM,
+     /* 0xB0 - 0xB7 */
+     ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
+     DstReg|SrcMem|ModRM|Mov, DstBitBase|SrcReg|ModRM,
+@@ -4008,6 +4009,19 @@ x86_emulate(
+         emulate_2op_SrcV_nobyte("bts", src, dst, _regs.eflags);
+         break;
+ 
++    case 0xae: /* Grp15 */
++        switch ( modrm_reg & 7 )
++        {
++        case 7: /* clflush */
++            fail_if(ops->wbinvd == NULL);
++            if ( (rc = ops->wbinvd(ctxt)) != 0 )
++                goto done;
++            break;
++        default:
++            goto cannot_emulate;
++        }
++        break;
++
+     case 0xaf: /* imul */
+         _regs.eflags &= ~(EFLG_OF|EFLG_CF);
+         switch ( dst.bytes )
diff --git a/21193-blktap-script.patch b/21193-blktap-script.patch
new file mode 100644
index 0000000..c568a78
--- /dev/null
+++ b/21193-blktap-script.patch
@@ -0,0 +1,23 @@
+# HG changeset patch
+# User Keir Fraser <keir.fraser@citrix.com>
+# Date 1271663923 -3600
+# Node ID ae1abcdd4a670e34509e39d5280834a75afc0f6a
+# Parent  6860c523d2daf1d110b130a9596469a0957b4032
+tools/hotplug/Linux/blktap: remove optional tapdisk: prefix
+
+Perhaps this should even be further generalized (e.g. to remove any
+"[!/]*:" pattern prefixes) to be more forward compatible?
+
+Signed-off-by: Jan Beulich <jbeulich@novell.com>
+
+diff -r 6860c523d2da -r ae1abcdd4a67 tools/hotplug/Linux/blktap
+--- a/tools/hotplug/Linux/blktap	Mon Apr 19 08:55:33 2010 +0100
++++ b/tools/hotplug/Linux/blktap	Mon Apr 19 08:58:43 2010 +0100
+@@ -59,6 +59,7 @@
+ if [ -n "$t" ]
+ then
+     p=$(xenstore_read "$XENBUS_PATH/params")
++    p=${p#tapdisk:}
+     # if we have a ':', chew from head including :
+     if echo $p | grep -q \:
+     then
diff --git a/ioemu-subtype.patch b/21194-ioemu-subtype.patch
similarity index 100%
rename from ioemu-subtype.patch
rename to 21194-ioemu-subtype.patch
diff --git a/21225-conring-iommu.patch b/21225-conring-iommu.patch
new file mode 100644
index 0000000..136cf19
--- /dev/null
+++ b/21225-conring-iommu.patch
@@ -0,0 +1,75 @@
+# HG changeset patch
+# User Keir Fraser <keir.fraser@citrix.com>
+# Date 1271954636 -3600
+# Node ID 2b97855a629f1d79e1d075a6d2a8b569018b2094
+# Parent  a7947fd90328287dd097294b241753063c858597
+console: Make initial static console buffer __initdata.
+
+The previous scheme --- freeing an area of BSS --- did not interact
+nicely with device passthrough as IOMMU will not have any Xen BSS area
+in guest device pagetables. Hence if the freed BSS space gets
+allocated to a guest, DMAs to guest's own memory can fail.
+
+The simple solution here is to always free the static buffer at end of
+boot (initmem is specially handled for IOMMUs) and require a
+dynamically-allocated buffer always to be created.
+
+Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
+
+--- a/xen/drivers/char/console.c
++++ b/xen/drivers/char/console.c
+@@ -65,11 +65,7 @@ size_param("conring_size", opt_conring_s
+ 
+ #define _CONRING_SIZE 16384
+ #define CONRING_IDX_MASK(i) ((i)&(conring_size-1))
+-static char
+-#if _CONRING_SIZE >= PAGE_SIZE
+-    __attribute__((__section__(".bss.page_aligned"), __aligned__(PAGE_SIZE)))
+-#endif
+-    _conring[_CONRING_SIZE];
++static char __initdata _conring[_CONRING_SIZE];
+ static char *__read_mostly conring = _conring;
+ static uint32_t __read_mostly conring_size = _CONRING_SIZE;
+ static uint32_t conringc, conringp;
+@@ -596,25 +592,20 @@ void __init console_init_preirq(void)
+ void __init console_init_postirq(void)
+ {
+     char *ring;
+-    unsigned int i;
++    unsigned int i, order;
+ 
+     serial_init_postirq();
+ 
+     if ( !opt_conring_size )
+         opt_conring_size = num_present_cpus() << (9 + xenlog_lower_thresh);
+-    /* Round size down to a power of two. */
+-    while ( opt_conring_size & (opt_conring_size - 1) )
+-        opt_conring_size &= opt_conring_size - 1;
+-    if ( opt_conring_size < conring_size )
+-        return;
+-    
+-    ring = alloc_xenheap_pages(get_order_from_bytes(opt_conring_size), 0);
+-    if ( ring == NULL )
++
++    order = get_order_from_bytes(max(opt_conring_size, conring_size));
++    while ( (ring = alloc_xenheap_pages(order, 0)) == NULL )
+     {
+-        printk("Unable to allocate console ring of %u bytes.\n",
+-               opt_conring_size);
+-        return;
++        BUG_ON(order == 0);
++        order--;
+     }
++    opt_conring_size = PAGE_SIZE << order;
+ 
+     spin_lock_irq(&console_lock);
+     for ( i = conringc ; i != conringp; i++ )
+@@ -625,8 +616,6 @@ void __init console_init_postirq(void)
+     spin_unlock_irq(&console_lock);
+ 
+     printk("Allocated console ring of %u KiB.\n", opt_conring_size >> 10);
+-
+-    init_xenheap_pages(__pa(_conring), __pa(_conring + _CONRING_SIZE));
+ }
+ 
+ void __init console_endboot(void)
diff --git a/32on64-extra-mem.patch b/32on64-extra-mem.patch
index d01b7ef..5b5d55e 100644
--- a/32on64-extra-mem.patch
+++ b/32on64-extra-mem.patch
@@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
 ===================================================================
 --- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
 +++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
-@@ -2909,7 +2909,7 @@ class XendDomainInfo:
+@@ -2913,7 +2913,7 @@ class XendDomainInfo:
  
              self.guest_bitsize = self.image.getBitSize()
              # Make sure there's enough RAM available for the domain
diff --git a/blktap-script.patch b/blktap-script.patch
deleted file mode 100644
index c7368cc..0000000
--- a/blktap-script.patch
+++ /dev/null
@@ -1,16 +0,0 @@
-diff -r c02cc832cb2d tools/hotplug/Linux/blktap
---- a/tools/hotplug/Linux/blktap	Tue Apr 13 18:19:33 2010 +0100
-+++ b/tools/hotplug/Linux/blktap	Fri Apr 16 14:48:00 2010 -0600
-@@ -59,10 +59,10 @@
- if [ -n "$t" ]
- then
-     p=$(xenstore_read "$XENBUS_PATH/params")
--    # if we have a ':', chew from head including :
-+    # if we have a ':', remove everything up to leading '/'
-     if echo $p | grep -q \:
-     then
--        p=${p#*:}
-+        p="/${p#*/}"
-     fi
- fi
- # some versions of readlink cannot be passed a regular file
diff --git a/cpu-pools-docs.patch b/cpu-pools-docs.patch
index e28f7c3..9e4e493 100644
--- a/cpu-pools-docs.patch
+++ b/cpu-pools-docs.patch
@@ -1,7 +1,5 @@
-Index: xen-4.0.0-testing/docs/xen-api/coversheet.tex
-===================================================================
---- xen-4.0.0-testing.orig/docs/xen-api/coversheet.tex
-+++ xen-4.0.0-testing/docs/xen-api/coversheet.tex
+--- a/docs/xen-api/coversheet.tex
++++ b/docs/xen-api/coversheet.tex
 @@ -52,6 +52,7 @@ Mike Day, IBM & Daniel Veillard, Red Hat
  Jim Fehlig, Novell & Tom Wilkie, University of Cambridge \\
  Jon Harrop, XenSource & Yosuke Iwamatsu, NEC \\
@@ -10,10 +8,8 @@ Index: xen-4.0.0-testing/docs/xen-api/coversheet.tex
  \end{tabular}
  \end{large}
  
-Index: xen-4.0.0-testing/docs/xen-api/revision-history.tex
-===================================================================
---- xen-4.0.0-testing.orig/docs/xen-api/revision-history.tex
-+++ xen-4.0.0-testing/docs/xen-api/revision-history.tex
+--- a/docs/xen-api/revision-history.tex
++++ b/docs/xen-api/revision-history.tex
 @@ -50,6 +50,12 @@
       between classes. Added host.PSCSI\_HBAs and VM.DSCSI\_HBAs
       fields.\tabularnewline
@@ -22,15 +18,13 @@ Index: xen-4.0.0-testing/docs/xen-api/revision-history.tex
 +     Added definitions of new classes cpu\_pool. Updated the table
 +     and the diagram representing relationships between classes.
 +     Added fields host.resident\_cpu\_pools, VM.cpu\_pool and
-+     host\_cpu.cpu\_pool.
++     host\_cpu.cpu\_pool.\tabularnewline
 +  \hline
   \end{tabular}
  \end{center}
  \end{flushleft}
-Index: xen-4.0.0-testing/docs/xen-api/xenapi-coversheet.tex
-===================================================================
---- xen-4.0.0-testing.orig/docs/xen-api/xenapi-coversheet.tex
-+++ xen-4.0.0-testing/docs/xen-api/xenapi-coversheet.tex
+--- a/docs/xen-api/xenapi-coversheet.tex
++++ b/docs/xen-api/xenapi-coversheet.tex
 @@ -17,12 +17,12 @@
  \newcommand{\coversheetlogo}{xen.eps}
  
@@ -46,10 +40,8 @@ Index: xen-4.0.0-testing/docs/xen-api/xenapi-coversheet.tex
  
  %% Document authors
  \newcommand{\docauthors}{
-Index: xen-4.0.0-testing/docs/xen-api/xenapi-datamodel-graph.dot
-===================================================================
---- xen-4.0.0-testing.orig/docs/xen-api/xenapi-datamodel-graph.dot
-+++ xen-4.0.0-testing/docs/xen-api/xenapi-datamodel-graph.dot
+--- a/docs/xen-api/xenapi-datamodel-graph.dot
++++ b/docs/xen-api/xenapi-datamodel-graph.dot
 @@ -14,7 +14,7 @@ fontname="Verdana";
  
  node [ shape=box ]; session VM host network VIF PIF SR VDI VBD PBD user;
@@ -67,10 +59,8 @@ Index: xen-4.0.0-testing/docs/xen-api/xenapi-datamodel-graph.dot
 +cpu_pool -> VM [ arrowhead="crow", arrowtail="none" ]
 +host -> cpu_pool [ arrowhead="crow", arrowtail="none" ]
  }
-Index: xen-4.0.0-testing/docs/xen-api/xenapi-datamodel.tex
-===================================================================
---- xen-4.0.0-testing.orig/docs/xen-api/xenapi-datamodel.tex
-+++ xen-4.0.0-testing/docs/xen-api/xenapi-datamodel.tex
+--- a/docs/xen-api/xenapi-datamodel.tex
++++ b/docs/xen-api/xenapi-datamodel.tex
 @@ -56,6 +56,7 @@ Name & Description \\
  {\tt debug} & A basic class for testing \\
  {\tt XSPolicy} & A class for handling Xen Security Policies \\
@@ -89,7 +79,7 @@ Index: xen-4.0.0-testing/docs/xen-api/xenapi-datamodel.tex
  \hline
  \end{tabular}\end{center}
  
-@@ -499,6 +503,56 @@ error code and a message describing the
+@@ -499,6 +503,56 @@ error code and a message describing the 
  \begin{verbatim}SECURITY_ERROR(xserr, message)\end{verbatim}
  \begin{center}\rule{10em}{0.1pt}\end{center}
  
diff --git a/cpu-pools-fixes.patch b/cpu-pools-fixes.patch
deleted file mode 100644
index 10b2737..0000000
--- a/cpu-pools-fixes.patch
+++ /dev/null
@@ -1,773 +0,0 @@
-Index: xen-4.0.0-testing/docs/xen-api/revision-history.tex
-===================================================================
---- xen-4.0.0-testing.orig/docs/xen-api/revision-history.tex
-+++ xen-4.0.0-testing/docs/xen-api/revision-history.tex
-@@ -54,7 +54,7 @@
-      Added definitions of new classes cpu\_pool. Updated the table
-      and the diagram representing relationships between classes.
-      Added fields host.resident\_cpu\_pools, VM.cpu\_pool and
--     host\_cpu.cpu\_pool.
-+     host\_cpu.cpu\_pool.\tabularnewline
-   \hline
-  \end{tabular}
- \end{center}
-Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xend/XendCPUPool.py
-+++ xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
-@@ -547,7 +547,7 @@ class XendCPUPool(XendBase):
-     def pool_start(cls, poolname):
-         pool = cls.lookup_pool(poolname)
-         if not pool:
--            raise VmError('unkown pool %s' % poolname)
-+            raise VmError('unknown pool %s' % poolname)
-         try:
-             pool.activate()
-         except XendAPIError, ex:
-@@ -566,8 +566,12 @@ class XendCPUPool(XendBase):
-                              for cpu_ref in pool_vals['host_CPUs'] ]
-                     cpus.sort()
-                     pool_vals['host_CPU_numbers'] = cpus
--                    vm_names = [ xd.get_vm_by_uuid(uuid).getName()
--                                 for uuid in pool_vals['started_VMs'] ]
-+                    # query VMs names. Take in account, that a VM
-+                    # returned by get_all_records could be destroy, now
-+                    vm_names = [ vm.getName()
-+                                 for vm in map(xd.get_vm_by_uuid,
-+                                               pool_vals['started_VMs'])
-+                                 if vm ]
-                     pool_vals['started_VM_names'] = vm_names
-                     pool_vals['auto_power_on'] = int(pool_vals['auto_power_on'])
-                     sxprs += [[pool_uuid] + map2sxp(pool_vals)]
-@@ -578,7 +582,7 @@ class XendCPUPool(XendBase):
-     def pool_destroy(cls, poolname):
-         pool = cls.lookup_pool(poolname)
-         if not pool:
--            raise VmError('unkown pool %s' % poolname)
-+            raise VmError('unknown pool %s' % poolname)
-         try:
-             pool.deactivate()
-             if not pool.is_managed():
-@@ -589,7 +593,7 @@ class XendCPUPool(XendBase):
-     def pool_delete(cls, poolname):
-         pool = cls.lookup_pool(poolname)
-         if not pool:
--            raise VmError('unkown pool %s' % poolname)
-+            raise VmError('unknown pool %s' % poolname)
-         try:
-             pool.destroy()
-         except XendAPIError, ex:
-@@ -598,28 +602,28 @@ class XendCPUPool(XendBase):
-     def pool_cpu_add(cls, poolname, cpu):
-         pool = cls.lookup_pool(poolname)
-         if not pool:
--            raise VmError('unkown pool %s' % poolname)
-+            raise VmError('unknown pool %s' % poolname)
-         try:
-             cpu_ref = cls._cpu_number_to_ref(int(cpu))
-             if cpu_ref:
-                 pool.add_host_CPU_live(cpu_ref)
-             else:
-                 raise PoolError(XEND_ERROR_INVALID_CPU,
--                                'CPU unkown')
-+                                'CPU unknown')
-         except XendAPIError, ex:
-             raise VmError(ex.get_api_error())
- 
-     def pool_cpu_remove(cls, poolname, cpu):
-         pool = cls.lookup_pool(poolname)
-         if not pool:
--            raise VmError('unkown pool %s' % poolname)
-+            raise VmError('unknown pool %s' % poolname)
-         try:
-             cpu_ref = cls._cpu_number_to_ref(int(cpu))
-             if cpu_ref:
-                 pool.remove_host_CPU_live(cpu_ref)
-             else:
-                 raise PoolError(XEND_ERROR_INVALID_CPU,
--                                'CPU unkown')
-+                                'CPU unknown')
-         except XendAPIError, ex:
-             raise VmError(ex.get_api_error())
- 
-@@ -627,10 +631,10 @@ class XendCPUPool(XendBase):
-         dom = XendDomain.instance()
-         pool = cls.lookup_pool(poolname)
-         if not pool:
--            raise VmError('unkown pool %s' % poolname)
-+            raise VmError('unknown pool %s' % poolname)
-         dominfo = dom.domain_lookup_nr(domname)
-         if not dominfo:
--            raise VmError('unkown domain %s' % domname)
-+            raise VmError('unknown domain %s' % domname)
-         domid = dominfo.getDomid()
-         if domid is not None:
-             if domid == 0:
-@@ -860,8 +864,11 @@ class XendCPUPool(XendBase):
-         pool_uuid = None
-         try:
-             pool_id = int(id_or_name)
--            # pool id given
-+            # pool id given ?
-             pool_uuid = cls.query_pool_ref(pool_id)
-+            if not pool_uuid:
-+                # not found -> search name
-+                pool_uuid = cls.get_by_name_label(id_or_name)
-         except ValueError:
-             # pool name given
-             pool_uuid = cls.get_by_name_label(id_or_name)
-Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
-+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
-@@ -2574,7 +2574,7 @@ class XendDomainInfo:
-         pool = XendCPUPool.lookup_pool(pool_name)
- 
-         if pool is None:
--            raise VmError("unkown pool %s" % pool_name)
-+            raise VmError("unknown pool %s" % pool_name)
-         pool_id = pool.query_pool_id()
-         if pool_id is None:
-             raise VmError("pool %s not activated" % pool_name)
-Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
-+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
-@@ -3515,7 +3515,7 @@ def get_pool_ref(name):
-     if len(refs) > 0:
-         return refs[0]
-     else:
--        err('unkown pool name')
-+        err('unknown pool name')
-         sys.exit(1)
- 
- def xm_pool_start(args):
-@@ -3643,7 +3643,7 @@ def xm_pool_cpu_add(args):
-         cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
-                                   if c_rec['number'] == args[1] ]
-         if len(cpu_ref) == 0:
--            err('cpu number unkown')
-+            err('cpu number unknown')
-         else:
-             server.xenapi.cpu_pool.add_host_CPU_live(ref, cpu_ref[0])
-     else:
-@@ -3657,7 +3657,7 @@ def xm_pool_cpu_remove(args):
-         cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
-                                   if c_rec['number'] ==  args[1] ]
-         if len(cpu_ref) == 0:
--            err('cpu number unkown')
-+            err('cpu number unknown')
-         else:
-             server.xenapi.cpu_pool.remove_host_CPU_live(ref, cpu_ref[0])
-     else:
-Index: xen-4.0.0-testing/xen/common/cpupool.c
-===================================================================
---- xen-4.0.0-testing.orig/xen/common/cpupool.c
-+++ xen-4.0.0-testing/xen/common/cpupool.c
-@@ -29,6 +29,9 @@ static struct cpupool *cpupool_list;
- static int cpupool0_max_cpus;
- integer_param("pool0_max_cpus", cpupool0_max_cpus);
- 
-+static int cpupool_moving_cpu = -1;
-+static struct cpupool *cpupool_cpu_moving = NULL;
-+
- /* cpupool lock: be carefull, this lock is sometimes released on another cpu
-  *               as it was obtained!
-  */
-@@ -104,7 +107,6 @@ struct cpupool *cpupool_create(int pooli
-     }
-     *q = c;
-     c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
--    c->cpu_in_transit = -1;
-     if ( schedule_init_global(sched, &(c->sched)) )
-     {
-         spin_unlock(&cpupool_lock);
-@@ -151,16 +153,20 @@ int cpupool_destroy(struct cpupool *c)
-  * assign a specific cpu to a cpupool
-  * cpupool_lock must be held
-  */
--static void cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
-+static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
- {
--    printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
--        c->cpupool_id, cpu);
-+    if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
-+        return -EBUSY;
-     per_cpu(cpupool, cpu) = c;
-     schedule_cpu_switch(cpu, c);
-     cpu_clear(cpu, cpupool_free_cpus);
-+    if (cpupool_moving_cpu == cpu)
-+    {
-+        cpupool_moving_cpu = -1;
-+        cpupool_cpu_moving = NULL;
-+    }
-     cpu_set(cpu, c->cpu_valid);
--    printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ready\n",
--        c->cpupool_id, cpu);
-+    return 0;
- }
- 
- /*
-@@ -177,8 +183,8 @@ int cpupool_assign_ncpu(struct cpupool *
-     spin_lock(&cpupool_lock);
-     for_each_cpu_mask(i, cpupool_free_cpus)
-     {
--        cpupool_assign_cpu_locked(c, i);
--        n++;
-+        if ( cpupool_assign_cpu_locked(c, i) == 0 )
-+            n++;
-         if ( n == ncpu )
-             break;
-     }
-@@ -188,43 +194,25 @@ int cpupool_assign_ncpu(struct cpupool *
-     return n;
- }
- 
--static void cpupool_unassign_cpu_locked_1(struct cpupool *c, unsigned int cpu)
--{
--    printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
--        c->cpupool_id, cpu);
--    c->cpu_in_transit = cpu;
--}
--
--static int cpupool_unassign_cpu_locked_2(struct cpupool *c)
-+static long cpupool_unassign_cpu_helper(void *hdl, void *info)
- {
--    int cpu = c->cpu_in_transit;
--    int ret;
-+    struct cpupool *c = (struct cpupool *)info;
-+    int cpu = cpupool_moving_cpu;
-+    long ret;
-+    int cpupool_id = c->cpupool_id;
- 
--    c->cpu_in_transit = -1;
--    cpu_clear(cpu, c->cpu_valid);
-     ret = cpu_disable_scheduler(cpu, 1);
--    if ( ret )
--    {
--        cpu_set(cpu, c->cpu_valid);
--    }
--    else
-+    cpu_set(cpu, cpupool_free_cpus);
-+    if ( !ret )
-     {
--        cpu_set(cpu, cpupool_free_cpus);
-         schedule_cpu_switch(cpu, NULL);
-         per_cpu(cpupool, cpu) = NULL;
-+        cpupool_moving_cpu = -1;
-+        cpupool_cpu_moving = NULL;
-     }
--    printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
--        c->cpupool_id, cpu, ret);
--    return ret;
--}
--
--static long cpupool_unassign_cpu_helper(void *hdl, void *info)
--{
--    struct cpupool *c = (struct cpupool *)info;
--    long ret;
--
--    ret = cpupool_unassign_cpu_locked_2(c);
-     spin_unlock(&cpupool_lock);
-+    printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %ld\n",
-+        cpupool_id, cpu, ret);
-     return ret;
- }
- 
-@@ -242,16 +230,23 @@ static long cpupool_unassign_cpu_helper(
- int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
- {
-     int work_cpu;
--    int rc = 0;
-+    int ret;
-     struct domain *d;
-+    int cpupool_id = c->cpupool_id;
- 
-+    printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
-+        cpupool_id, cpu);
-     spin_lock(&cpupool_lock);
--    if ( !cpu_isset(cpu, c->cpu_valid) )
--    {
--        spin_unlock(&cpupool_lock);
--        return 0;
--    }
--    if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) )
-+    ret = -EBUSY;
-+    if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
-+        goto out;
-+
-+    ret = 0;
-+    if ( !cpu_isset(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
-+        goto out;
-+
-+    if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) &&
-+         (cpu != cpupool_moving_cpu) )
-     {
-         for_each_domain(d)
-         {
-@@ -259,27 +254,24 @@ int cpupool_unassign_cpu(struct cpupool
-                 continue;
-             if ( !d->is_dying )
-             {
--                rc = -EBUSY;
-+                ret = -EBUSY;
-                 break;
-             }
--            printk(XENLOG_DEBUG "moving dying domain %d to pool0\n",
--                d->domain_id);
-             c->n_dom--;
--            rc = sched_move_domain(d, cpupool0);
--            if ( rc )
-+            ret = sched_move_domain(d, cpupool0);
-+            if ( ret )
-             {
-                 c->n_dom++;
-                 break;
-             }
-             cpupool0->n_dom++;
-         }
--        if ( rc )
--        {
--            spin_unlock(&cpupool_lock);
--            return rc;
--        }
-+        if ( ret )
-+            goto out;
-     }
--    cpupool_unassign_cpu_locked_1(c, cpu);
-+    cpupool_moving_cpu = cpu;
-+    cpupool_cpu_moving = c;
-+    cpu_clear(cpu, c->cpu_valid);
-     work_cpu = smp_processor_id();
-     if ( work_cpu == cpu )
-     {
-@@ -289,6 +281,12 @@ int cpupool_unassign_cpu(struct cpupool
-     }
-     return continue_hypercall_on_cpu(work_cpu, NULL,
-                                      cpupool_unassign_cpu_helper, c);
-+
-+out:
-+    spin_unlock(&cpupool_lock);
-+    printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
-+        cpupool_id, cpu, ret);
-+    return ret;
- }
- 
- /*
-@@ -316,6 +314,7 @@ int cpupool_add_domain(struct domain *d,
- {
-     struct cpupool *c;
-     int rc = 1;
-+    int n_dom;
- 
-     if ( poolid == CPUPOOLID_NONE )
-         return 0;
-@@ -324,12 +323,14 @@ int cpupool_add_domain(struct domain *d,
-     if ( (c != NULL) && cpus_weight(c->cpu_valid) )
-     {
-         c->n_dom++;
-+        n_dom = c->n_dom;
-         d->cpupool = c;
--        printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
--            d->domain_id, poolid, c->n_dom);
-         rc = 0;
-     }
-     spin_unlock(&cpupool_lock);
-+    if (!rc)
-+        printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
-+            d->domain_id, poolid, n_dom);
-     return rc;
- }
- 
-@@ -338,14 +339,19 @@ int cpupool_add_domain(struct domain *d,
-  */
- void cpupool_rm_domain(struct domain *d)
- {
-+    int cpupool_id;
-+    int n_dom;
-+
-     if ( d->cpupool == NULL )
-         return;
-     spin_lock(&cpupool_lock);
-+    cpupool_id = d->cpupool->cpupool_id;
-     d->cpupool->n_dom--;
--    printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
--        d->domain_id, d->cpupool->cpupool_id, d->cpupool->n_dom);
-+    n_dom = d->cpupool->n_dom;
-     d->cpupool = NULL;
-     spin_unlock(&cpupool_lock);
-+    printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
-+        d->domain_id, cpupool_id, n_dom);
-     return;
- }
- 
-@@ -359,7 +365,7 @@ void cpupool_cpu_add(unsigned int cpu)
-         return;
-     spin_lock(&cpupool_lock);
-     cpu_set(cpu, cpupool_free_cpus);
--    cpupool_assign_cpu_locked(cpupool0, cpu);
-+    (void)cpupool_assign_cpu_locked(cpupool0, cpu);
-     spin_unlock(&cpupool_lock);
-     return;
- }
-@@ -428,6 +434,8 @@ int cpupool_do_domctl(struct xen_domctl_
-         unsigned cpu;
- 
-         cpu = op->cpu;
-+        printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
-+            op->cpupool_id, cpu);
-         spin_lock(&cpupool_lock);
-         if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
-             cpu = first_cpu(cpupool_free_cpus);
-@@ -441,10 +449,11 @@ int cpupool_do_domctl(struct xen_domctl_
-         ret = -ENOENT;
-         if ( c == NULL )
-             goto addcpu_out;
--        cpupool_assign_cpu_locked(c, cpu);
--        ret = 0;
-+        ret = cpupool_assign_cpu_locked(c, cpu);
- addcpu_out:
-         spin_unlock(&cpupool_lock);
-+        printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
-+            op->cpupool_id, cpu, ret);
-     }
-     break;
- 
-@@ -488,23 +497,23 @@ addcpu_out:
-             rcu_unlock_domain(d);
-             break;
-         }
-+        printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
-+            d->domain_id, op->cpupool_id);
-         ret = -ENOENT;
-         spin_lock(&cpupool_lock);
-         c = cpupool_find_by_id(op->cpupool_id, 1);
-         if ( (c != NULL) && cpus_weight(c->cpu_valid) )
-         {
--            printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
--                d->domain_id, c->cpupool_id);
-             d->cpupool->n_dom--;
-             ret = sched_move_domain(d, c);
-             if ( ret )
-                 d->cpupool->n_dom++;
-             else
-                 c->n_dom++;
--            printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
--                d->domain_id, c->cpupool_id, ret);
-         }
-         spin_unlock(&cpupool_lock);
-+        printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
-+            d->domain_id, op->cpupool_id, ret);
-         rcu_unlock_domain(d);
-     }
-     break;
-Index: xen-4.0.0-testing/xen/common/sched_credit.c
-===================================================================
---- xen-4.0.0-testing.orig/xen/common/sched_credit.c
-+++ xen-4.0.0-testing/xen/common/sched_credit.c
-@@ -602,7 +602,7 @@ csched_vcpu_acct(struct csched_private *
- }
- 
- static void *
--csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc)
-+csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc, void *dd)
- {
-     struct csched_vcpu *svc;
- 
-@@ -614,7 +614,7 @@ csched_alloc_vdata(struct scheduler *ops
- 
-     INIT_LIST_HEAD(&svc->runq_elem);
-     INIT_LIST_HEAD(&svc->active_vcpu_elem);
--    svc->sdom = CSCHED_DOM(vc->domain);
-+    svc->sdom = dd;
-     svc->vcpu = vc;
-     atomic_set(&svc->credit, 0);
-     svc->flags = 0U;
-@@ -778,19 +778,14 @@ csched_dom_cntl(
-     return 0;
- }
- 
--static int
--csched_dom_init(struct scheduler *ops, struct domain *dom)
-+static void *
-+csched_alloc_domdata(struct scheduler *ops, struct domain *dom)
- {
-     struct csched_dom *sdom;
- 
--    CSCHED_STAT_CRANK(dom_init);
--
--    if ( is_idle_domain(dom) )
--        return 0;
--
-     sdom = xmalloc(struct csched_dom);
-     if ( sdom == NULL )
--        return -ENOMEM;
-+        return NULL;
-     memset(sdom, 0, sizeof(*sdom));
- 
-     /* Initialize credit and weight */
-@@ -800,16 +795,40 @@ csched_dom_init(struct scheduler *ops, s
-     sdom->dom = dom;
-     sdom->weight = CSCHED_DEFAULT_WEIGHT;
-     sdom->cap = 0U;
-+
-+    return (void *)sdom;
-+}
-+
-+static int
-+csched_dom_init(struct scheduler *ops, struct domain *dom)
-+{
-+    struct csched_dom *sdom;
-+
-+    CSCHED_STAT_CRANK(dom_init);
-+
-+    if ( is_idle_domain(dom) )
-+        return 0;
-+
-+    sdom = csched_alloc_domdata(ops, dom);
-+    if ( sdom == NULL )
-+        return -ENOMEM;
-+
-     dom->sched_priv = sdom;
- 
-     return 0;
- }
- 
- static void
-+csched_free_domdata(struct scheduler *ops, void *data)
-+{
-+    xfree(data);
-+}
-+
-+static void
- csched_dom_destroy(struct scheduler *ops, struct domain *dom)
- {
-     CSCHED_STAT_CRANK(dom_destroy);
--    xfree(CSCHED_DOM(dom));
-+    csched_free_domdata(ops, CSCHED_DOM(dom));
- }
- 
- /*
-@@ -1147,9 +1166,10 @@ csched_load_balance(struct csched_privat
-     int peer_cpu;
- 
-     BUG_ON( cpu != snext->vcpu->processor );
-+    online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
- 
-     /* If this CPU is going offline we shouldn't steal work. */
--    if ( unlikely(!cpu_online(cpu)) )
-+    if ( unlikely(!cpu_isset(cpu, *online)) )
-         goto out;
- 
-     if ( snext->pri == CSCHED_PRI_IDLE )
-@@ -1163,7 +1183,6 @@ csched_load_balance(struct csched_privat
-      * Peek at non-idling CPUs in the system, starting with our
-      * immediate neighbour.
-      */
--    online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
-     cpus_andnot(workers, *online, prv->idlers);
-     cpu_clear(cpu, workers);
-     peer_cpu = cpu;
-@@ -1218,31 +1237,6 @@ csched_schedule(struct scheduler *ops, s
-     CSCHED_STAT_CRANK(schedule);
-     CSCHED_VCPU_CHECK(current);
- 
--    if ( unlikely(!cpu_isset(cpu, *CSCHED_CPUONLINE(per_cpu(cpupool, cpu)))) )
--    {
--        /* race with switching cpu between pools: when cpu is leaving the
--           pool try to schedule idle vcpu */
--
--        struct list_head * iter;
--
--        snext = scurr;
--        if (is_idle_vcpu(current))
--            goto out;
--
--        if ( vcpu_runnable(current) )
--            __runq_insert(cpu, scurr);
--
--        list_for_each(iter, runq)
--        {
--            snext = __runq_elem(iter);
--            if ( snext->pri == CSCHED_PRI_IDLE )
--                break;
--        }
--        BUG_ON( snext->pri != CSCHED_PRI_IDLE );
--        __runq_remove(snext);
--        goto out;
--    }
--
-     /* Update credits */
-     if ( !is_idle_vcpu(scurr->vcpu) )
-     {
-@@ -1273,7 +1267,6 @@ csched_schedule(struct scheduler *ops, s
-     else
-         snext = csched_load_balance(prv, cpu, snext);
- 
--out:
-     /*
-      * Update idlers mask if necessary. When we're idling, other CPUs
-      * will tickle us when they get extra work.
-@@ -1553,6 +1546,8 @@ struct scheduler sched_credit_def = {
-     .free_vdata     = csched_free_vdata,
-     .alloc_pdata    = csched_alloc_pdata,
-     .free_pdata     = csched_free_pdata,
-+    .alloc_domdata  = csched_alloc_domdata,
-+    .free_domdata   = csched_free_domdata,
- 
-     .tick_suspend   = csched_tick_suspend,
-     .tick_resume    = csched_tick_resume,
-Index: xen-4.0.0-testing/xen/common/sched_sedf.c
-===================================================================
---- xen-4.0.0-testing.orig/xen/common/sched_sedf.c
-+++ xen-4.0.0-testing/xen/common/sched_sedf.c
-@@ -332,7 +332,7 @@ static inline void __add_to_runqueue_sor
- }
- 
- 
--static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v)
-+static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v, void *dd)
- {
-     struct sedf_vcpu_info *inf;
- 
-@@ -415,20 +415,37 @@ static void sedf_destroy_vcpu(struct sch
-     sedf_free_vdata(ops, v->sched_priv);
- }
- 
-+static void *
-+sedf_alloc_domdata(struct scheduler *ops, struct domain *d)
-+{
-+    void *mem;
-+
-+    mem = xmalloc(struct sedf_dom_info);
-+    if ( mem == NULL )
-+        return NULL;
-+
-+    memset(mem, 0, sizeof(struct sedf_dom_info));
-+
-+    return mem;
-+}
-+
- static int sedf_init_domain(struct scheduler *ops, struct domain *d)
- {
--    d->sched_priv = xmalloc(struct sedf_dom_info);
-+    d->sched_priv = sedf_alloc_domdata(ops, d);
-     if ( d->sched_priv == NULL )
-         return -ENOMEM;
- 
--    memset(d->sched_priv, 0, sizeof(struct sedf_dom_info));
--
-     return 0;
- }
- 
-+static void sedf_free_domdata(struct scheduler *ops, void *data)
-+{
-+    xfree(data);
-+}
-+
- static void sedf_destroy_domain(struct scheduler *ops, struct domain *d)
- {
--    xfree(d->sched_priv);
-+    sedf_free_domdata(ops, d->sched_priv);
- }
- 
- static int sedf_pick_cpu(struct scheduler *ops, struct vcpu *v)
-@@ -1498,6 +1515,8 @@ struct scheduler sched_sedf_def = {
-     .free_vdata     = sedf_free_vdata,
-     .alloc_pdata    = sedf_alloc_pdata,
-     .free_pdata     = sedf_free_pdata,
-+    .alloc_domdata  = sedf_alloc_domdata,
-+    .free_domdata   = sedf_free_domdata,
- 
-     .do_schedule    = sedf_do_schedule,
-     .pick_cpu       = sedf_pick_cpu,
-Index: xen-4.0.0-testing/xen/common/schedule.c
-===================================================================
---- xen-4.0.0-testing.orig/xen/common/schedule.c
-+++ xen-4.0.0-testing/xen/common/schedule.c
-@@ -222,7 +222,7 @@ int sched_init_vcpu(struct vcpu *v, unsi
-             return 1;
-     }
- 
--    v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v);
-+    v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
-     if ( v->sched_priv == NULL )
-         return 1;
- 
-@@ -237,14 +237,23 @@ int sched_move_domain(struct domain *d,
-     struct vcpu *v;
-     unsigned int new_p;
-     void **vcpu_priv;
-+    void *domdata;
-+
-+    domdata = SCHED_OP(&(c->sched), alloc_domdata, d);
-+    if ( domdata == NULL )
-+        return -ENOMEM;
- 
-     vcpu_priv = xmalloc_array(void *, d->max_vcpus);
-     if ( vcpu_priv == NULL )
-+    {
-+        SCHED_OP(&(c->sched), free_domdata, domdata);
-         return -ENOMEM;
-+    }
-+
-     memset(vcpu_priv, 0, d->max_vcpus * sizeof(void *));
-     for_each_vcpu ( d, v )
-     {
--        vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v);
-+        vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v, domdata);
-         if ( vcpu_priv[v->vcpu_id] == NULL )
-         {
-             for_each_vcpu ( d, v )
-@@ -253,6 +262,7 @@ int sched_move_domain(struct domain *d,
-                     xfree(vcpu_priv[v->vcpu_id]);
-             }
-             xfree(vcpu_priv);
-+            SCHED_OP(&(c->sched), free_domdata, domdata);
-             return -ENOMEM;
-         }
-     }
-@@ -276,6 +286,8 @@ int sched_move_domain(struct domain *d,
-     }
- 
-     d->cpupool = c;
-+    SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
-+    d->sched_priv = domdata;
- 
-     domain_unpause(d);
- 
-@@ -1079,7 +1091,7 @@ void schedule_cpu_switch(unsigned int cp
-     v = per_cpu(schedule_data, cpu).idle;
-     ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
-     if ( c != NULL )
--        vpriv = SCHED_OP(new_ops, alloc_vdata, v);
-+        vpriv = SCHED_OP(new_ops, alloc_vdata, v, v->domain->sched_priv);
- 
-     spin_lock_irqsave(&per_cpu(schedule_data, cpu).schedule_lock, flags);
- 
-Index: xen-4.0.0-testing/xen/include/xen/sched-if.h
-===================================================================
---- xen-4.0.0-testing.orig/xen/include/xen/sched-if.h
-+++ xen-4.0.0-testing/xen/include/xen/sched-if.h
-@@ -78,9 +78,12 @@ struct scheduler {
-     void         (*deinit)         (struct scheduler *);
- 
-     void         (*free_vdata)     (struct scheduler *, void *);
--    void *       (*alloc_vdata)    (struct scheduler *, struct vcpu *);
-+    void *       (*alloc_vdata)    (struct scheduler *, struct vcpu *,
-+                                    void *);
-     void         (*free_pdata)     (struct scheduler *, void *, int);
-     void *       (*alloc_pdata)    (struct scheduler *, int);
-+    void         (*free_domdata)   (struct scheduler *, void *);
-+    void *       (*alloc_domdata)  (struct scheduler *, struct domain *);
- 
-     int          (*init_domain)    (struct scheduler *, struct domain *);
-     void         (*destroy_domain) (struct scheduler *, struct domain *);
-@@ -109,7 +112,6 @@ struct cpupool
-     cpumask_t        cpu_valid;      /* all cpus assigned to pool */
-     struct cpupool   *next;
-     unsigned int     n_dom;
--    int              cpu_in_transit; /* used for adding/removing cpus */
-     struct scheduler sched;
- };
- 
diff --git a/cpu-pools-libxc.patch b/cpu-pools-libxc.patch
index a99e615..b641146 100644
--- a/cpu-pools-libxc.patch
+++ b/cpu-pools-libxc.patch
@@ -1,7 +1,5 @@
-Index: xen-4.0.0-testing/tools/libxc/Makefile
-===================================================================
---- xen-4.0.0-testing.orig/tools/libxc/Makefile
-+++ xen-4.0.0-testing/tools/libxc/Makefile
+--- a/tools/libxc/Makefile
++++ b/tools/libxc/Makefile
 @@ -8,6 +8,7 @@ CTRL_SRCS-y       :=
  CTRL_SRCS-y       += xc_core.c
  CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
@@ -10,10 +8,8 @@ Index: xen-4.0.0-testing/tools/libxc/Makefile
  CTRL_SRCS-y       += xc_domain.c
  CTRL_SRCS-y       += xc_evtchn.c
  CTRL_SRCS-y       += xc_misc.c
-Index: xen-4.0.0-testing/tools/libxc/xc_cpupool.c
-===================================================================
 --- /dev/null
-+++ xen-4.0.0-testing/tools/libxc/xc_cpupool.c
++++ b/tools/libxc/xc_cpupool.c
 @@ -0,0 +1,154 @@
 +/******************************************************************************
 + * xc_cpupool.c
@@ -56,9 +52,9 @@ Index: xen-4.0.0-testing/tools/libxc/xc_cpupool.c
 +    return do_domctl_save(xc_handle, &domctl);
 +}
 +
-+int xc_cpupool_getinfo(int xc_handle, 
++int xc_cpupool_getinfo(int xc_handle,
 +                       uint32_t first_poolid,
-+                       uint32_t n_max, 
++                       uint32_t n_max,
 +                       xc_cpupoolinfo_t *info)
 +{
 +    int err = 0;
@@ -169,42 +165,9 @@ Index: xen-4.0.0-testing/tools/libxc/xc_cpupool.c
 +
 +    return 0;
 +}
-Index: xen-4.0.0-testing/tools/libxc/xc_domain.c
-===================================================================
---- xen-4.0.0-testing.orig/tools/libxc/xc_domain.c
-+++ xen-4.0.0-testing/tools/libxc/xc_domain.c
-@@ -6,6 +6,7 @@
-  * Copyright (c) 2003, K A Fraser.
-  */
- 
-+#include <stdarg.h>
- #include "xc_private.h"
- #include "xg_save_restore.h"
- #include <xen/memory.h>
-@@ -15,15 +16,21 @@ int xc_domain_create(int xc_handle,
-                      uint32_t ssidref,
-                      xen_domain_handle_t handle,
-                      uint32_t flags,
--                     uint32_t *pdomid)
-+                     uint32_t *pdomid, ...)
- {
-     int err;
-+    va_list ap;
-     DECLARE_DOMCTL;
- 
-     domctl.cmd = XEN_DOMCTL_createdomain;
-     domctl.domain = (domid_t)*pdomid;
-     domctl.u.createdomain.ssidref = ssidref;
-     domctl.u.createdomain.flags   = flags;
-+    if ( flags & XEN_DOMCTL_CDF_pool ) {
-+        va_start(ap, pdomid);
-+        domctl.u.createdomain.cpupool = va_arg(ap, uint32_t);
-+        va_end(ap);
-+    }
-     memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
-     if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
-         return err;
-@@ -220,6 +227,7 @@ int xc_domain_getinfo(int xc_handle,
+--- a/tools/libxc/xc_domain.c
++++ b/tools/libxc/xc_domain.c
+@@ -220,6 +220,7 @@ int xc_domain_getinfo(int xc_handle,
          info->cpu_time = domctl.u.getdomaininfo.cpu_time;
          info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
          info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
@@ -212,10 +175,8 @@ Index: xen-4.0.0-testing/tools/libxc/xc_domain.c
  
          memcpy(info->handle, domctl.u.getdomaininfo.handle,
                 sizeof(xen_domain_handle_t));
-Index: xen-4.0.0-testing/tools/libxc/xc_private.h
-===================================================================
---- xen-4.0.0-testing.orig/tools/libxc/xc_private.h
-+++ xen-4.0.0-testing/tools/libxc/xc_private.h
+--- a/tools/libxc/xc_private.h
++++ b/tools/libxc/xc_private.h
 @@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl
      return ret;
  }
@@ -236,10 +197,8 @@ Index: xen-4.0.0-testing/tools/libxc/xc_private.h
  static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
  {
      int ret = -1;
-Index: xen-4.0.0-testing/tools/libxc/xenctrl.h
-===================================================================
---- xen-4.0.0-testing.orig/tools/libxc/xenctrl.h
-+++ xen-4.0.0-testing/tools/libxc/xenctrl.h
+--- a/tools/libxc/xenctrl.h
++++ b/tools/libxc/xenctrl.h
 @@ -171,6 +171,7 @@ typedef struct xc_dominfo {
      unsigned int  nr_online_vcpus;
      unsigned int  max_vcpu_id;
@@ -248,15 +207,6 @@ Index: xen-4.0.0-testing/tools/libxc/xenctrl.h
  } xc_dominfo_t;
  
  typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
-@@ -207,7 +208,7 @@ int xc_domain_create(int xc_handle,
-                      uint32_t ssidref,
-                      xen_domain_handle_t handle,
-                      uint32_t flags,
--                     uint32_t *pdomid);
-+                     uint32_t *pdomid, ...);
- 
- 
- /* Functions to produce a dump of a given domain
 @@ -502,6 +503,100 @@ int xc_domain_setdebugging(int xc_handle
                             unsigned int enable);
  
diff --git a/cpu-pools-python.patch b/cpu-pools-python.patch
index a8849c9..bd55d64 100644
--- a/cpu-pools-python.patch
+++ b/cpu-pools-python.patch
@@ -1,42 +1,17 @@
-Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c
-+++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
-@@ -97,17 +97,18 @@ static PyObject *pyxc_domain_create(XcOb
-                                     PyObject *args,
-                                     PyObject *kwds)
- {
--    uint32_t dom = 0, ssidref = 0, flags = 0, target = 0;
-+    uint32_t dom = 0, ssidref = 0, flags = 0, target = 0, cpupool = 0;
-     int      ret, i;
-     PyObject *pyhandle = NULL;
-     xen_domain_handle_t handle = { 
-         0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
-         0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef };
+--- a/tools/python/xen/lowlevel/xc/xc.c
++++ b/tools/python/xen/lowlevel/xc/xc.c
+@@ -106,8 +106,8 @@ static PyObject *pyxc_domain_create(XcOb
  
--    static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", NULL };
-+    static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", "cpupool", NULL };
+     static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", NULL };
  
 -    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list,
 -                                      &dom, &ssidref, &pyhandle, &flags, &target))
-+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOiii", kwd_list, &dom,
-+                                      &ssidref, &pyhandle, &flags, &target,
-+                                      &cpupool))
++    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list, &dom,
++                                      &ssidref, &pyhandle, &flags, &target))
          return NULL;
      if ( pyhandle != NULL )
      {
-@@ -124,8 +125,9 @@ static PyObject *pyxc_domain_create(XcOb
-         }
-     }
- 
-+    flags |= XEN_DOMCTL_CDF_pool;
-     if ( (ret = xc_domain_create(self->xc_handle, ssidref,
--                                 handle, flags, &dom)) < 0 )
-+                                 handle, flags, &dom, cpupool)) < 0 )
-         return pyxc_error_to_exception();
- 
-     if ( target )
-@@ -329,7 +331,7 @@ static PyObject *pyxc_domain_getinfo(XcO
+@@ -329,7 +329,7 @@ static PyObject *pyxc_domain_getinfo(XcO
      {
          info_dict = Py_BuildValue(
              "{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
@@ -45,7 +20,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
              "domid",           (int)info[i].domid,
              "online_vcpus",    info[i].nr_online_vcpus,
              "max_vcpu_id",     info[i].max_vcpu_id,
-@@ -344,7 +346,8 @@ static PyObject *pyxc_domain_getinfo(XcO
+@@ -344,7 +344,8 @@ static PyObject *pyxc_domain_getinfo(XcO
              "cpu_time",        (long long)info[i].cpu_time,
              "maxmem_kb",       (long long)info[i].max_memkb,
              "ssidref",         (int)info[i].ssidref,
@@ -55,7 +30,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
          pyhandle = PyList_New(sizeof(xen_domain_handle_t));
          if ( (pyhandle == NULL) || (info_dict == NULL) )
          {
-@@ -1751,6 +1754,179 @@ static PyObject *pyxc_dom_set_memshr(XcO
+@@ -1751,6 +1752,179 @@ static PyObject *pyxc_dom_set_memshr(XcO
      return zero;
  }
  
@@ -235,7 +210,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
  
  static PyMethodDef pyxc_methods[] = {
      { "handle",
-@@ -1866,7 +2042,8 @@ static PyMethodDef pyxc_methods[] = {
+@@ -1866,7 +2040,8 @@ static PyMethodDef pyxc_methods[] = {
        " maxmem_kb [int]: Maximum memory limit, in kilobytes\n"
        " cpu_time [long]: CPU time consumed, in nanoseconds\n"
        " shutdown_reason [int]: Numeric code from guest OS, explaining "
@@ -245,7 +220,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
  
      { "vcpu_getinfo", 
        (PyCFunction)pyxc_vcpu_getinfo, 
-@@ -2264,6 +2441,66 @@ static PyMethodDef pyxc_methods[] = {
+@@ -2264,6 +2439,66 @@ static PyMethodDef pyxc_methods[] = {
        " enable  [int,0|1]:    Disable or enable?\n"
        "Returns: [int] 0 on success; -1 on error.\n" },
  
@@ -312,10 +287,8 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
      { NULL, NULL, 0, NULL }
  };
  
-Index: xen-4.0.0-testing/tools/python/xen/util/sxputils.py
-===================================================================
 --- /dev/null
-+++ xen-4.0.0-testing/tools/python/xen/util/sxputils.py
++++ b/tools/python/xen/util/sxputils.py
 @@ -0,0 +1,64 @@
 +#============================================================================
 +# This library is free software; you can redistribute it and/or
@@ -381,10 +354,8 @@ Index: xen-4.0.0-testing/tools/python/xen/util/sxputils.py
 +    return sxphash
 +
 +
-Index: xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xend/XendAPI.py
-+++ xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py
+--- a/tools/python/xen/xend/XendAPI.py
++++ b/tools/python/xen/xend/XendAPI.py
 @@ -51,6 +51,7 @@ from XendDPCI import XendDPCI
  from XendPSCSI import XendPSCSI, XendPSCSI_HBA
  from XendDSCSI import XendDSCSI, XendDSCSI_HBA
@@ -576,11 +547,9 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py
  
      # Xen API: Class VBD
      # ----------------------------------------------------------------
-Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
-===================================================================
 --- /dev/null
-+++ xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
-@@ -0,0 +1,896 @@
++++ b/tools/python/xen/xend/XendCPUPool.py
+@@ -0,0 +1,903 @@
 +#============================================================================
 +# This library is free software; you can redistribute it and/or
 +# modify it under the terms of version 2.1 of the GNU Lesser General Public
@@ -1130,7 +1099,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
 +    def pool_start(cls, poolname):
 +        pool = cls.lookup_pool(poolname)
 +        if not pool:
-+            raise VmError('unkown pool %s' % poolname)
++            raise VmError('unknown pool %s' % poolname)
 +        try:
 +            pool.activate()
 +        except XendAPIError, ex:
@@ -1149,8 +1118,12 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
 +                             for cpu_ref in pool_vals['host_CPUs'] ]
 +                    cpus.sort()
 +                    pool_vals['host_CPU_numbers'] = cpus
-+                    vm_names = [ xd.get_vm_by_uuid(uuid).getName()
-+                                 for uuid in pool_vals['started_VMs'] ]
++                    # query VMs names. Take in account, that a VM
++                    # returned by get_all_records could be destroy, now
++                    vm_names = [ vm.getName()
++                                 for vm in map(xd.get_vm_by_uuid,
++                                               pool_vals['started_VMs'])
++                                 if vm ]
 +                    pool_vals['started_VM_names'] = vm_names
 +                    pool_vals['auto_power_on'] = int(pool_vals['auto_power_on'])
 +                    sxprs += [[pool_uuid] + map2sxp(pool_vals)]
@@ -1161,7 +1134,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
 +    def pool_destroy(cls, poolname):
 +        pool = cls.lookup_pool(poolname)
 +        if not pool:
-+            raise VmError('unkown pool %s' % poolname)
++            raise VmError('unknown pool %s' % poolname)
 +        try:
 +            pool.deactivate()
 +            if not pool.is_managed():
@@ -1172,7 +1145,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
 +    def pool_delete(cls, poolname):
 +        pool = cls.lookup_pool(poolname)
 +        if not pool:
-+            raise VmError('unkown pool %s' % poolname)
++            raise VmError('unknown pool %s' % poolname)
 +        try:
 +            pool.destroy()
 +        except XendAPIError, ex:
@@ -1181,28 +1154,28 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
 +    def pool_cpu_add(cls, poolname, cpu):
 +        pool = cls.lookup_pool(poolname)
 +        if not pool:
-+            raise VmError('unkown pool %s' % poolname)
++            raise VmError('unknown pool %s' % poolname)
 +        try:
 +            cpu_ref = cls._cpu_number_to_ref(int(cpu))
 +            if cpu_ref:
 +                pool.add_host_CPU_live(cpu_ref)
 +            else:
 +                raise PoolError(XEND_ERROR_INVALID_CPU,
-+                                'CPU unkown')
++                                'CPU unknown')
 +        except XendAPIError, ex:
 +            raise VmError(ex.get_api_error())
 +
 +    def pool_cpu_remove(cls, poolname, cpu):
 +        pool = cls.lookup_pool(poolname)
 +        if not pool:
-+            raise VmError('unkown pool %s' % poolname)
++            raise VmError('unknown pool %s' % poolname)
 +        try:
 +            cpu_ref = cls._cpu_number_to_ref(int(cpu))
 +            if cpu_ref:
 +                pool.remove_host_CPU_live(cpu_ref)
 +            else:
 +                raise PoolError(XEND_ERROR_INVALID_CPU,
-+                                'CPU unkown')
++                                'CPU unknown')
 +        except XendAPIError, ex:
 +            raise VmError(ex.get_api_error())
 +
@@ -1210,10 +1183,10 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
 +        dom = XendDomain.instance()
 +        pool = cls.lookup_pool(poolname)
 +        if not pool:
-+            raise VmError('unkown pool %s' % poolname)
++            raise VmError('unknown pool %s' % poolname)
 +        dominfo = dom.domain_lookup_nr(domname)
 +        if not dominfo:
-+            raise VmError('unkown domain %s' % domname)
++            raise VmError('unknown domain %s' % domname)
 +        domid = dominfo.getDomid()
 +        if domid is not None:
 +            if domid == 0:
@@ -1443,8 +1416,11 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
 +        pool_uuid = None
 +        try:
 +            pool_id = int(id_or_name)
-+            # pool id given
++            # pool id given ?
 +            pool_uuid = cls.query_pool_ref(pool_id)
++            if not pool_uuid:
++                # not found -> search name
++                pool_uuid = cls.get_by_name_label(id_or_name)
 +        except ValueError:
 +            # pool name given
 +            pool_uuid = cls.get_by_name_label(id_or_name)
@@ -1477,10 +1453,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
 +
 +    unbound_cpus = classmethod(unbound_cpus)
 +
-Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
-+++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
+--- a/tools/python/xen/xend/XendConfig.py
++++ b/tools/python/xen/xend/XendConfig.py
 @@ -128,6 +128,7 @@ XENAPI_CFG_TO_LEGACY_CFG = {
      'PV_bootloader': 'bootloader',
      'PV_bootloader_args': 'bootloader_args',
@@ -1521,10 +1495,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
              'superpages': 0,
              'description': '',
          }
-Index: xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConstants.py
-+++ xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py
+--- a/tools/python/xen/xend/XendConstants.py
++++ b/tools/python/xen/xend/XendConstants.py
 @@ -133,6 +133,8 @@ VTPM_DELETE_SCRIPT = auxbin.scripts_dir(
  
  XS_VMROOT = "/vm/"
@@ -1534,10 +1506,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py
  NR_PCI_FUNC = 8
  NR_PCI_DEV = 32
  NR_PCI_DEVFN = NR_PCI_FUNC * NR_PCI_DEV
-Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
-+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
+--- a/tools/python/xen/xend/XendDomainInfo.py
++++ b/tools/python/xen/xend/XendDomainInfo.py
 @@ -60,6 +60,7 @@ from xen.xend.xenstore.xsutil import Get
  from xen.xend.xenstore.xswatch import xswatch
  from xen.xend.XendConstants import *
@@ -1558,7 +1528,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
 +        pool = XendCPUPool.lookup_pool(pool_name)
 +
 +        if pool is None:
-+            raise VmError("unkown pool %s" % pool_name)
++            raise VmError("unknown pool %s" % pool_name)
 +        pool_id = pool.query_pool_id()
 +        if pool_id is None:
 +            raise VmError("pool %s not activated" % pool_name)
@@ -1566,15 +1536,19 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
          flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3)
  
          try:
-@@ -2573,6 +2587,7 @@ class XendDomainInfo:
-                 ssidref = ssidref,
-                 handle = uuid.fromString(self.info['uuid']),
-                 flags = flags,
-+                cpupool = pool_id,
-                 target = self.info.target())
-         except Exception, e:
-             # may get here if due to ACM the operation is not permitted
-@@ -3613,6 +3628,11 @@ class XendDomainInfo:
+@@ -2586,6 +2600,11 @@ class XendDomainInfo:
+                 failmsg += ', error=%i' % int(self.domid)
+             raise VmError(failmsg)
+ 
++        try:
++            xc.cpupool_movedomain(pool_id, self.domid)
++        except Exception, e:
++            raise VmError('Moving domain to target pool failed')
++
+         self.dompath = GetDomainPath(self.domid)
+ 
+         self._recreateDom()
+@@ -3613,6 +3632,11 @@ class XendDomainInfo:
  
          retval = xc.sched_credit_domain_get(self.getDomid())
          return retval
@@ -1586,10 +1560,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
      def get_power_state(self):
          return XEN_API_VM_POWER_STATE[self._stateGet()]
      def get_platform(self):
-Index: xen-4.0.0-testing/tools/python/xen/xend/XendError.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xend/XendError.py
-+++ xen-4.0.0-testing/tools/python/xen/xend/XendError.py
+--- a/tools/python/xen/xend/XendError.py
++++ b/tools/python/xen/xend/XendError.py
 @@ -18,6 +18,7 @@
  
  from xmlrpclib import Fault
@@ -1625,10 +1597,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendError.py
  class VDIError(XendAPIError):
      def __init__(self, error, vdi):
          XendAPIError.__init__(self)
-Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xend/XendNode.py
-+++ xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
+--- a/tools/python/xen/xend/XendNode.py
++++ b/tools/python/xen/xend/XendNode.py
 @@ -43,6 +43,7 @@ from XendStateStore import XendStateStor
  from XendMonitor import XendMonitor
  from XendPPCI import XendPPCI
@@ -1702,10 +1672,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
                        'max_free_memory',
                        'max_para_memory',
                        'max_hvm_memory',
-Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xend/server/SrvServer.py
-+++ xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
+--- a/tools/python/xen/xend/server/SrvServer.py
++++ b/tools/python/xen/xend/server/SrvServer.py
 @@ -52,6 +52,7 @@ from xen.xend import XendNode, XendOptio
  from xen.xend.XendLogging import log
  from xen.xend.XendClient import XEN_API_SOCKET
@@ -1727,10 +1695,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
              # Reaching this point means we can auto start domains
              try:
                  xenddomain().autostart_domains()
-Index: xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xend/server/XMLRPCServer.py
-+++ xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py
+--- a/tools/python/xen/xend/server/XMLRPCServer.py
++++ b/tools/python/xen/xend/server/XMLRPCServer.py
 @@ -33,6 +33,7 @@ from xen.xend.XendClient import XML_RPC_
  from xen.xend.XendConstants import DOM_STATE_RUNNING
  from xen.xend.XendLogging import log
@@ -1762,10 +1728,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py
          # Functions in XendNode and XendDmesg
          for type, lst, n in [(XendNode,
                                ['info', 'pciinfo', 'send_debug_keys',
-Index: xen-4.0.0-testing/tools/python/xen/xm/create.dtd
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xm/create.dtd
-+++ xen-4.0.0-testing/tools/python/xen/xm/create.dtd
+--- a/tools/python/xen/xm/create.dtd
++++ b/tools/python/xen/xm/create.dtd
 @@ -50,6 +50,7 @@
                   s3_integrity           CDATA #REQUIRED
                   vcpus_max              CDATA #REQUIRED
@@ -1774,10 +1738,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/create.dtd
                   actions_after_shutdown %NORMAL_EXIT; #REQUIRED 
                   actions_after_reboot   %NORMAL_EXIT; #REQUIRED
                   actions_after_crash    %CRASH_BEHAVIOUR; #REQUIRED
-Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xm/create.py
-+++ xen-4.0.0-testing/tools/python/xen/xm/create.py
+--- a/tools/python/xen/xm/create.py
++++ b/tools/python/xen/xm/create.py
 @@ -659,6 +659,10 @@ gopts.var('suppress_spurious_page_faults
            fn=set_bool, default=None,
            use="""Do not inject spurious page faults into this guest""")
@@ -1798,10 +1760,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
  
      config_image = configure_image(vals)
      if vals.bootloader:
-Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
-+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
+--- a/tools/python/xen/xm/main.py
++++ b/tools/python/xen/xm/main.py
 @@ -56,6 +56,7 @@ from xen.util.xmlrpcclient import Server
  import xen.util.xsm.xsm as security
  from xen.util.xsm.xsm import XSMError
@@ -1982,7 +1942,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
 +    if len(refs) > 0:
 +        return refs[0]
 +    else:
-+        err('unkown pool name')
++        err('unknown pool name')
 +        sys.exit(1)
 +
 +def xm_pool_start(args):
@@ -2110,7 +2070,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
 +        cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
 +                                  if c_rec['number'] == args[1] ]
 +        if len(cpu_ref) == 0:
-+            err('cpu number unkown')
++            err('cpu number unknown')
 +        else:
 +            server.xenapi.cpu_pool.add_host_CPU_live(ref, cpu_ref[0])
 +    else:
@@ -2124,7 +2084,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
 +        cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
 +                                  if c_rec['number'] ==  args[1] ]
 +        if len(cpu_ref) == 0:
-+            err('cpu number unkown')
++            err('cpu number unknown')
 +        else:
 +            server.xenapi.cpu_pool.remove_host_CPU_live(ref, cpu_ref[0])
 +    else:
@@ -2167,10 +2127,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
      ]
  
  for c in IMPORTED_COMMANDS:
-Index: xen-4.0.0-testing/tools/python/xen/xm/pool-create.py
-===================================================================
 --- /dev/null
-+++ xen-4.0.0-testing/tools/python/xen/xm/pool-create.py
++++ b/tools/python/xen/xm/pool-create.py
 @@ -0,0 +1,51 @@
 +#============================================================================
 +# This library is free software; you can redistribute it and/or
@@ -2223,10 +2181,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/pool-create.py
 +if __name__ == '__main__':
 +    main(sys.argv)
 +
-Index: xen-4.0.0-testing/tools/python/xen/xm/pool-new.py
-===================================================================
 --- /dev/null
-+++ xen-4.0.0-testing/tools/python/xen/xm/pool-new.py
++++ b/tools/python/xen/xm/pool-new.py
 @@ -0,0 +1,50 @@
 +#============================================================================
 +# This library is free software; you can redistribute it and/or
@@ -2278,10 +2234,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/pool-new.py
 +if __name__ == '__main__':
 +    main(sys.argv)
 +
-Index: xen-4.0.0-testing/tools/python/xen/xm/pool.py
-===================================================================
 --- /dev/null
-+++ xen-4.0.0-testing/tools/python/xen/xm/pool.py
++++ b/tools/python/xen/xm/pool.py
 @@ -0,0 +1,236 @@
 +#============================================================================
 +# This library is free software; you can redistribute it and/or
@@ -2519,10 +2473,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/pool.py
 +def help():
 +    return str(GOPTS)
 +
-Index: xen-4.0.0-testing/tools/python/xen/xm/xenapi_create.py
-===================================================================
---- xen-4.0.0-testing.orig/tools/python/xen/xm/xenapi_create.py
-+++ xen-4.0.0-testing/tools/python/xen/xm/xenapi_create.py
+--- a/tools/python/xen/xm/xenapi_create.py
++++ b/tools/python/xen/xm/xenapi_create.py
 @@ -310,6 +310,8 @@ class xenapi_create:
                  get_child_nodes_as_dict(vm, "platform", "key", "value"),
              "other_config":
diff --git a/cpupools-core-fixup.patch b/cpupools-core-fixup.patch
index 5cc48e1..fde77c6 100644
--- a/cpupools-core-fixup.patch
+++ b/cpupools-core-fixup.patch
@@ -36,7 +36,7 @@
  static struct csched_private *csched_priv0 = NULL;
  
  static void csched_tick(void *_cpu);
-@@ -1524,11 +1523,13 @@ static void csched_tick_resume(struct sc
+@@ -1517,11 +1516,13 @@ static void csched_tick_resume(struct sc
      }
  }
  
diff --git a/cpupools-core.patch b/cpupools-core.patch
index 8ee7b80..5dbe123 100644
--- a/cpupools-core.patch
+++ b/cpupools-core.patch
@@ -483,7 +483,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  obj-y += event_channel.o
 --- /dev/null
 +++ b/xen/common/cpupool.c
-@@ -0,0 +1,570 @@
+@@ -0,0 +1,585 @@
 +/******************************************************************************
 + * cpupool.c
 + *
@@ -515,6 +515,9 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +static int cpupool0_max_cpus;
 +integer_param("pool0_max_cpus", cpupool0_max_cpus);
 +
++static int cpupool_moving_cpu = -1;
++static struct cpupool *cpupool_cpu_moving = NULL;
++
 +/* cpupool lock: be carefull, this lock is sometimes released on another cpu
 + *               as it was obtained!
 + */
@@ -590,7 +593,6 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    }
 +    *q = c;
 +    c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
-+    c->cpu_in_transit = -1;
 +    if ( schedule_init_global(sched, &(c->sched)) )
 +    {
 +        spin_unlock(&cpupool_lock);
@@ -637,16 +639,20 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 + * assign a specific cpu to a cpupool
 + * cpupool_lock must be held
 + */
-+static void cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
++static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
 +{
-+    printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
-+        c->cpupool_id, cpu);
++    if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
++        return -EBUSY;
 +    per_cpu(cpupool, cpu) = c;
 +    schedule_cpu_switch(cpu, c);
 +    cpu_clear(cpu, cpupool_free_cpus);
++    if (cpupool_moving_cpu == cpu)
++    {
++        cpupool_moving_cpu = -1;
++        cpupool_cpu_moving = NULL;
++    }
 +    cpu_set(cpu, c->cpu_valid);
-+    printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ready\n",
-+        c->cpupool_id, cpu);
++    return 0;
 +}
 +
 +/*
@@ -663,8 +669,8 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    spin_lock(&cpupool_lock);
 +    for_each_cpu_mask(i, cpupool_free_cpus)
 +    {
-+        cpupool_assign_cpu_locked(c, i);
-+        n++;
++        if ( cpupool_assign_cpu_locked(c, i) == 0 )
++            n++;
 +        if ( n == ncpu )
 +            break;
 +    }
@@ -674,43 +680,25 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    return n;
 +}
 +
-+static void cpupool_unassign_cpu_locked_1(struct cpupool *c, unsigned int cpu)
-+{
-+    printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
-+        c->cpupool_id, cpu);
-+    c->cpu_in_transit = cpu;
-+}
-+
-+static int cpupool_unassign_cpu_locked_2(struct cpupool *c)
-+{
-+    int cpu = c->cpu_in_transit;
-+    int ret;
-+
-+    c->cpu_in_transit = -1;
-+    cpu_clear(cpu, c->cpu_valid);
-+    ret = cpu_disable_scheduler(cpu, 1);
-+    if ( ret )
-+    {
-+        cpu_set(cpu, c->cpu_valid);
-+    }
-+    else
-+    {
-+        cpu_set(cpu, cpupool_free_cpus);
-+        schedule_cpu_switch(cpu, NULL);
-+        per_cpu(cpupool, cpu) = NULL;
-+    }
-+    printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
-+        c->cpupool_id, cpu, ret);
-+    return ret;
-+}
-+
 +static long cpupool_unassign_cpu_helper(void *hdl, void *info)
 +{
 +    struct cpupool *c = (struct cpupool *)info;
++    int cpu = cpupool_moving_cpu;
 +    long ret;
++    int cpupool_id = c->cpupool_id;
 +
-+    ret = cpupool_unassign_cpu_locked_2(c);
++    ret = cpu_disable_scheduler(cpu, 1);
++    cpu_set(cpu, cpupool_free_cpus);
++    if ( !ret )
++    {
++        schedule_cpu_switch(cpu, NULL);
++        per_cpu(cpupool, cpu) = NULL;
++        cpupool_moving_cpu = -1;
++        cpupool_cpu_moving = NULL;
++    }
 +    spin_unlock(&cpupool_lock);
++    printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %ld\n",
++        cpupool_id, cpu, ret);
 +    return ret;
 +}
 +
@@ -728,16 +716,23 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
 +{
 +    int work_cpu;
-+    int rc = 0;
++    int ret;
 +    struct domain *d;
++    int cpupool_id = c->cpupool_id;
 +
++    printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
++        cpupool_id, cpu);
 +    spin_lock(&cpupool_lock);
-+    if ( !cpu_isset(cpu, c->cpu_valid) )
-+    {
-+        spin_unlock(&cpupool_lock);
-+        return 0;
-+    }
-+    if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) )
++    ret = -EBUSY;
++    if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
++        goto out;
++
++    ret = 0;
++    if ( !cpu_isset(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
++        goto out;
++
++    if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) &&
++         (cpu != cpupool_moving_cpu) )
 +    {
 +        for_each_domain(d)
 +        {
@@ -745,27 +740,24 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +                continue;
 +            if ( !d->is_dying )
 +            {
-+                rc = -EBUSY;
++                ret = -EBUSY;
 +                break;
 +            }
-+            printk(XENLOG_DEBUG "moving dying domain %d to pool0\n",
-+                d->domain_id);
 +            c->n_dom--;
-+            rc = sched_move_domain(d, cpupool0);
-+            if ( rc )
++            ret = sched_move_domain(d, cpupool0);
++            if ( ret )
 +            {
 +                c->n_dom++;
 +                break;
 +            }
 +            cpupool0->n_dom++;
 +        }
-+        if ( rc )
-+        {
-+            spin_unlock(&cpupool_lock);
-+            return rc;
-+        }
++        if ( ret )
++            goto out;
 +    }
-+    cpupool_unassign_cpu_locked_1(c, cpu);
++    cpupool_moving_cpu = cpu;
++    cpupool_cpu_moving = c;
++    cpu_clear(cpu, c->cpu_valid);
 +    work_cpu = smp_processor_id();
 +    if ( work_cpu == cpu )
 +    {
@@ -775,6 +767,12 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    }
 +    return continue_hypercall_on_cpu(work_cpu, NULL,
 +                                     cpupool_unassign_cpu_helper, c);
++
++out:
++    spin_unlock(&cpupool_lock);
++    printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
++        cpupool_id, cpu, ret);
++    return ret;
 +}
 +
 +/*
@@ -802,6 +800,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +{
 +    struct cpupool *c;
 +    int rc = 1;
++    int n_dom;
 +
 +    if ( poolid == CPUPOOLID_NONE )
 +        return 0;
@@ -810,12 +809,14 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    if ( (c != NULL) && cpus_weight(c->cpu_valid) )
 +    {
 +        c->n_dom++;
++        n_dom = c->n_dom;
 +        d->cpupool = c;
-+        printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
-+            d->domain_id, poolid, c->n_dom);
 +        rc = 0;
 +    }
 +    spin_unlock(&cpupool_lock);
++    if (!rc)
++        printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
++            d->domain_id, poolid, n_dom);
 +    return rc;
 +}
 +
@@ -824,14 +825,19 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 + */
 +void cpupool_rm_domain(struct domain *d)
 +{
++    int cpupool_id;
++    int n_dom;
++
 +    if ( d->cpupool == NULL )
 +        return;
 +    spin_lock(&cpupool_lock);
++    cpupool_id = d->cpupool->cpupool_id;
 +    d->cpupool->n_dom--;
-+    printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
-+        d->domain_id, d->cpupool->cpupool_id, d->cpupool->n_dom);
++    n_dom = d->cpupool->n_dom;
 +    d->cpupool = NULL;
 +    spin_unlock(&cpupool_lock);
++    printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
++        d->domain_id, cpupool_id, n_dom);
 +    return;
 +}
 +
@@ -845,7 +851,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +        return;
 +    spin_lock(&cpupool_lock);
 +    cpu_set(cpu, cpupool_free_cpus);
-+    cpupool_assign_cpu_locked(cpupool0, cpu);
++    (void)cpupool_assign_cpu_locked(cpupool0, cpu);
 +    spin_unlock(&cpupool_lock);
 +    return;
 +}
@@ -914,6 +920,8 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +        unsigned cpu;
 +
 +        cpu = op->cpu;
++        printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
++            op->cpupool_id, cpu);
 +        spin_lock(&cpupool_lock);
 +        if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
 +            cpu = first_cpu(cpupool_free_cpus);
@@ -927,10 +935,11 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +        ret = -ENOENT;
 +        if ( c == NULL )
 +            goto addcpu_out;
-+        cpupool_assign_cpu_locked(c, cpu);
-+        ret = 0;
++        ret = cpupool_assign_cpu_locked(c, cpu);
 +addcpu_out:
 +        spin_unlock(&cpupool_lock);
++        printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
++            op->cpupool_id, cpu, ret);
 +    }
 +    break;
 +
@@ -974,23 +983,29 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +            rcu_unlock_domain(d);
 +            break;
 +        }
++        if ( op->cpupool_id == d->cpupool->cpupool_id )
++        {
++            ret = 0;
++            rcu_unlock_domain(d);
++            break;
++        }
++        printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
++            d->domain_id, op->cpupool_id);
 +        ret = -ENOENT;
 +        spin_lock(&cpupool_lock);
 +        c = cpupool_find_by_id(op->cpupool_id, 1);
 +        if ( (c != NULL) && cpus_weight(c->cpu_valid) )
 +        {
-+            printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
-+                d->domain_id, c->cpupool_id);
 +            d->cpupool->n_dom--;
 +            ret = sched_move_domain(d, c);
 +            if ( ret )
 +                d->cpupool->n_dom++;
 +            else
 +                c->n_dom++;
-+            printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
-+                d->domain_id, c->cpupool_id, ret);
 +        }
 +        spin_unlock(&cpupool_lock);
++        printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
++            d->domain_id, op->cpupool_id, ret);
 +        rcu_unlock_domain(d);
 +    }
 +    break;
@@ -1117,7 +1132,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      {
          if ( cpu_isset(i, cpu_exclude_map) )
              continue;
-@@ -388,12 +391,14 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
+@@ -388,6 +391,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
          domid_t        dom;
          static domid_t rover = 0;
          unsigned int domcr_flags;
@@ -1125,24 +1140,8 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
          ret = -EINVAL;
          if ( supervisor_mode_kernel ||
-              (op->u.createdomain.flags &
-              ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap |
--               XEN_DOMCTL_CDF_s3_integrity | XEN_DOMCTL_CDF_oos_off)) )
-+               XEN_DOMCTL_CDF_s3_integrity | XEN_DOMCTL_CDF_oos_off |
-+               XEN_DOMCTL_CDF_pool)) )
-             break;
- 
-         dom = op->domain;
-@@ -429,9 +434,15 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
-             domcr_flags |= DOMCRF_s3_integrity;
-         if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_oos_off )
+@@ -431,7 +435,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
              domcr_flags |= DOMCRF_oos_off;
-+        if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_pool )
-+            pool = op->u.createdomain.cpupool;
-+
-+        ret = -EINVAL;
-+        if ( pool == CPUPOOLID_NONE )
-+            break;
  
          ret = -ENOMEM;
 -        d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
@@ -1150,7 +1149,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
          if ( d == NULL )
              break;
  
-@@ -450,6 +461,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
+@@ -450,6 +454,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
      {
          struct domain *d;
          unsigned int i, max = op->u.max_vcpus.max, cpu;
@@ -1158,7 +1157,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
          ret = -ESRCH;
          if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
-@@ -498,6 +510,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
+@@ -498,6 +503,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
              goto maxvcpu_out;
  
          ret = -ENOMEM;
@@ -1166,7 +1165,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
          if ( max > d->max_vcpus )
          {
              struct vcpu **vcpus;
-@@ -521,8 +534,8 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
+@@ -521,8 +527,8 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
                  continue;
  
              cpu = (i == 0) ?
@@ -1177,7 +1176,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
              if ( alloc_vcpu(d, i, cpu) == NULL )
                  goto maxvcpu_out;
-@@ -961,6 +974,14 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
+@@ -961,6 +967,14 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
      }
      break;
  
@@ -1470,7 +1469,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 -static int
 -csched_vcpu_init(struct vcpu *vc)
 +static void *
-+csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc)
++csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc, void *dd)
  {
 -    struct domain * const dom = vc->domain;
 -    struct csched_dom *sdom = CSCHED_DOM(dom);
@@ -1488,7 +1487,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      INIT_LIST_HEAD(&svc->runq_elem);
      INIT_LIST_HEAD(&svc->active_vcpu_elem);
 -    svc->sdom = sdom;
-+    svc->sdom = CSCHED_DOM(vc->domain);
++    svc->sdom = dd;
      svc->vcpu = vc;
      atomic_set(&svc->credit, 0);
      svc->flags = 0U;
@@ -1608,7 +1607,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
              }
              sdom->weight = op->u.credit.weight;
          }
-@@ -709,14 +773,14 @@ csched_dom_cntl(
+@@ -709,25 +773,20 @@ csched_dom_cntl(
          if ( op->u.credit.cap != (uint16_t)~0U )
              sdom->cap = op->u.credit.cap;
  
@@ -1619,22 +1618,69 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      return 0;
  }
  
- static int
+-static int
 -csched_dom_init(struct domain *dom)
-+csched_dom_init(struct scheduler *ops, struct domain *dom)
++static void *
++csched_alloc_domdata(struct scheduler *ops, struct domain *dom)
  {
      struct csched_dom *sdom;
  
-@@ -743,7 +807,7 @@ csched_dom_init(struct domain *dom)
+-    CSCHED_STAT_CRANK(dom_init);
+-
+-    if ( is_idle_domain(dom) )
+-        return 0;
+-
+     sdom = xmalloc(struct csched_dom);
+     if ( sdom == NULL )
+-        return -ENOMEM;
++        return NULL;
+     memset(sdom, 0, sizeof(*sdom));
+ 
+     /* Initialize credit and weight */
+@@ -737,16 +796,40 @@ csched_dom_init(struct domain *dom)
+     sdom->dom = dom;
+     sdom->weight = CSCHED_DEFAULT_WEIGHT;
+     sdom->cap = 0U;
++
++    return (void *)sdom;
++}
++
++static int
++csched_dom_init(struct scheduler *ops, struct domain *dom)
++{
++    struct csched_dom *sdom;
++
++    CSCHED_STAT_CRANK(dom_init);
++
++    if ( is_idle_domain(dom) )
++        return 0;
++
++    sdom = csched_alloc_domdata(ops, dom);
++    if ( sdom == NULL )
++        return -ENOMEM;
++
+     dom->sched_priv = sdom;
+ 
+     return 0;
  }
  
  static void
 -csched_dom_destroy(struct domain *dom)
++csched_free_domdata(struct scheduler *ops, void *data)
++{
++    xfree(data);
++}
++
++static void
 +csched_dom_destroy(struct scheduler *ops, struct domain *dom)
  {
      CSCHED_STAT_CRANK(dom_destroy);
-     xfree(CSCHED_DOM(dom));
-@@ -757,7 +821,7 @@ csched_dom_destroy(struct domain *dom)
+-    xfree(CSCHED_DOM(dom));
++    csched_free_domdata(ops, CSCHED_DOM(dom));
+ }
+ 
+ /*
+@@ -757,7 +840,7 @@ csched_dom_destroy(struct domain *dom)
   * remember the last UNDER to make the move up operation O(1).
   */
  static void
@@ -1643,7 +1689,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  {
      struct csched_pcpu * const spc = CSCHED_PCPU(cpu);
      struct list_head *runq, *elem, *next, *last_under;
-@@ -765,7 +829,7 @@ csched_runq_sort(unsigned int cpu)
+@@ -765,7 +848,7 @@ csched_runq_sort(unsigned int cpu)
      unsigned long flags;
      int sort_epoch;
  
@@ -1652,7 +1698,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      if ( sort_epoch == spc->runq_sort_last )
          return;
  
-@@ -802,6 +866,7 @@ csched_runq_sort(unsigned int cpu)
+@@ -802,6 +885,7 @@ csched_runq_sort(unsigned int cpu)
  static void
  csched_acct(void* dummy)
  {
@@ -1660,7 +1706,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      unsigned long flags;
      struct list_head *iter_vcpu, *next_vcpu;
      struct list_head *iter_sdom, *next_sdom;
-@@ -818,22 +883,22 @@ csched_acct(void* dummy)
+@@ -818,22 +902,22 @@ csched_acct(void* dummy)
      int credit;
  
  
@@ -1690,7 +1736,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
          CSCHED_STAT_CRANK(acct_no_work);
          goto out;
      }
-@@ -845,7 +910,7 @@ csched_acct(void* dummy)
+@@ -845,7 +929,7 @@ csched_acct(void* dummy)
      credit_xtra = 0;
      credit_cap = 0U;
  
@@ -1699,7 +1745,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      {
          sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem);
  
-@@ -865,9 +930,9 @@ csched_acct(void* dummy)
+@@ -865,9 +949,9 @@ csched_acct(void* dummy)
           * only when the system-wide credit balance is negative.
           */
          credit_peak = sdom->active_vcpu_count * CSCHED_CREDITS_PER_ACCT;
@@ -1711,7 +1757,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
                               (weight_total - 1)
                             ) / weight_total;
          }
-@@ -909,7 +974,7 @@ csched_acct(void* dummy)
+@@ -909,7 +993,7 @@ csched_acct(void* dummy)
                   */
                  CSCHED_STAT_CRANK(acct_reorder);
                  list_del(&sdom->active_sdom_elem);
@@ -1720,7 +1766,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
              }
  
              credit_fair = credit_peak;
-@@ -975,7 +1040,7 @@ csched_acct(void* dummy)
+@@ -975,7 +1059,7 @@ csched_acct(void* dummy)
                  /* Upper bound on credits means VCPU stops earning */
                  if ( credit > CSCHED_CREDITS_PER_TSLICE )
                  {
@@ -1729,7 +1775,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
                      credit = 0;
                      atomic_set(&svc->credit, credit);
                  }
-@@ -987,15 +1052,15 @@ csched_acct(void* dummy)
+@@ -987,15 +1071,15 @@ csched_acct(void* dummy)
          }
      }
  
@@ -1749,7 +1795,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
              MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT );
  }
  
-@@ -1004,6 +1069,7 @@ csched_tick(void *_cpu)
+@@ -1004,6 +1088,7 @@ csched_tick(void *_cpu)
  {
      unsigned int cpu = (unsigned long)_cpu;
      struct csched_pcpu *spc = CSCHED_PCPU(cpu);
@@ -1757,7 +1803,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
      spc->tick++;
  
-@@ -1011,7 +1077,7 @@ csched_tick(void *_cpu)
+@@ -1011,7 +1096,7 @@ csched_tick(void *_cpu)
       * Accounting for running VCPU
       */
      if ( !is_idle_vcpu(current) )
@@ -1766,7 +1812,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
      /*
       * Check if runq needs to be sorted
-@@ -1020,7 +1086,7 @@ csched_tick(void *_cpu)
+@@ -1020,7 +1105,7 @@ csched_tick(void *_cpu)
       * modified priorities. This is a special O(n) sort and runs at most
       * once per accounting period (currently 30 milliseconds).
       */
@@ -1775,7 +1821,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
      set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK));
  }
-@@ -1073,10 +1139,12 @@ csched_runq_steal(int peer_cpu, int cpu,
+@@ -1073,16 +1158,19 @@ csched_runq_steal(int peer_cpu, int cpu,
  }
  
  static struct csched_vcpu *
@@ -1789,17 +1835,24 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      int peer_cpu;
  
      BUG_ON( cpu != snext->vcpu->processor );
-@@ -1096,7 +1164,8 @@ csched_load_balance(int cpu, struct csch
++    online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
+ 
+     /* If this CPU is going offline we shouldn't steal work. */
+-    if ( unlikely(!cpu_online(cpu)) )
++    if ( unlikely(!cpu_isset(cpu, *online)) )
+         goto out;
+ 
+     if ( snext->pri == CSCHED_PRI_IDLE )
+@@ -1096,7 +1184,7 @@ csched_load_balance(int cpu, struct csch
       * Peek at non-idling CPUs in the system, starting with our
       * immediate neighbour.
       */
 -    cpus_andnot(workers, cpu_online_map, csched_priv.idlers);
-+    online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
 +    cpus_andnot(workers, *online, prv->idlers);
      cpu_clear(cpu, workers);
      peer_cpu = cpu;
  
-@@ -1138,17 +1207,43 @@ csched_load_balance(int cpu, struct csch
+@@ -1138,11 +1226,12 @@ csched_load_balance(int cpu, struct csch
   * fast for the common case.
   */
  static struct task_slice
@@ -1813,48 +1866,16 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      struct csched_vcpu *snext;
      struct task_slice ret;
  
-     CSCHED_STAT_CRANK(schedule);
-     CSCHED_VCPU_CHECK(current);
- 
-+    if ( unlikely(!cpu_isset(cpu, *CSCHED_CPUONLINE(per_cpu(cpupool, cpu)))) )
-+    {
-+        /* race with switching cpu between pools: when cpu is leaving the
-+           pool try to schedule idle vcpu */
-+
-+        struct list_head * iter;
-+
-+        snext = scurr;
-+        if (is_idle_vcpu(current))
-+            goto out;
-+
-+        if ( vcpu_runnable(current) )
-+            __runq_insert(cpu, scurr);
-+
-+        list_for_each(iter, runq)
-+        {
-+            snext = __runq_elem(iter);
-+            if ( snext->pri == CSCHED_PRI_IDLE )
-+                break;
-+        }
-+        BUG_ON( snext->pri != CSCHED_PRI_IDLE );
-+        __runq_remove(snext);
-+        goto out;
-+    }
-+
-     /* Update credits */
-     if ( !is_idle_vcpu(scurr->vcpu) )
-     {
-@@ -1177,20 +1272,21 @@ csched_schedule(s_time_t now)
+@@ -1177,7 +1266,7 @@ csched_schedule(s_time_t now)
      if ( snext->pri > CSCHED_PRI_TS_OVER )
          __runq_remove(snext);
      else
 -        snext = csched_load_balance(cpu, snext);
 +        snext = csched_load_balance(prv, cpu, snext);
  
-+out:
      /*
       * Update idlers mask if necessary. When we're idling, other CPUs
-      * will tickle us when they get extra work.
+@@ -1185,12 +1274,12 @@ csched_schedule(s_time_t now)
       */
      if ( snext->pri == CSCHED_PRI_IDLE )
      {
@@ -1871,7 +1892,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      }
  
      if ( !is_idle_vcpu(snext->vcpu) )
-@@ -1237,7 +1333,7 @@ csched_dump_vcpu(struct csched_vcpu *svc
+@@ -1237,7 +1326,7 @@ csched_dump_vcpu(struct csched_vcpu *svc
  }
  
  static void
@@ -1880,7 +1901,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  {
      struct list_head *runq, *iter;
      struct csched_pcpu *spc;
-@@ -1275,9 +1371,10 @@ csched_dump_pcpu(int cpu)
+@@ -1275,9 +1364,10 @@ csched_dump_pcpu(int cpu)
  }
  
  static void
@@ -1892,7 +1913,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      int loop;
  #define idlers_buf keyhandler_scratch
  
-@@ -1294,12 +1391,12 @@ csched_dump(void)
+@@ -1294,12 +1384,12 @@ csched_dump(void)
             "\tticks per tslice   = %d\n"
             "\tticks per acct     = %d\n"
             "\tmigration delay    = %uus\n",
@@ -1911,7 +1932,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
             CSCHED_DEFAULT_WEIGHT,
             CSCHED_MSECS_PER_TICK,
             CSCHED_CREDITS_PER_MSEC,
-@@ -1307,12 +1404,12 @@ csched_dump(void)
+@@ -1307,12 +1397,12 @@ csched_dump(void)
             CSCHED_TICKS_PER_ACCT,
             vcpu_migration_delay);
  
@@ -1926,7 +1947,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      {
          struct csched_dom *sdom;
          sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem);
-@@ -1329,18 +1426,30 @@ csched_dump(void)
+@@ -1329,18 +1419,30 @@ csched_dump(void)
  #undef idlers_buf
  }
  
@@ -1968,7 +1989,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  }
  
  /* Tickers cannot be kicked until SMP subsystem is alive. */
-@@ -1350,54 +1459,81 @@ static __init int csched_start_tickers(v
+@@ -1350,54 +1452,81 @@ static __init int csched_start_tickers(v
      unsigned int cpu;
  
      /* Is the credit scheduler initialised? */
@@ -2060,7 +2081,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      .destroy_vcpu   = csched_vcpu_destroy,
  
      .sleep          = csched_vcpu_sleep,
-@@ -1411,6 +1547,11 @@ const struct scheduler sched_credit_def 
+@@ -1411,6 +1540,13 @@ const struct scheduler sched_credit_def 
      .dump_cpu_state = csched_dump_pcpu,
      .dump_settings  = csched_dump,
      .init           = csched_init,
@@ -2069,6 +2090,8 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    .free_vdata     = csched_free_vdata,
 +    .alloc_pdata    = csched_alloc_pdata,
 +    .free_pdata     = csched_free_pdata,
++    .alloc_domdata  = csched_alloc_domdata,
++    .free_domdata   = csched_free_domdata,
  
      .tick_suspend   = csched_tick_suspend,
      .tick_resume    = csched_tick_resume,
@@ -2098,7 +2121,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
  
 -static int sedf_init_vcpu(struct vcpu *v)
-+static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v)
++static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v, void *dd)
  {
      struct sedf_vcpu_info *inf;
  
@@ -2130,7 +2153,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      /* Every VCPU gets an equal share of extratime by default. */
      inf->deadl_abs   = 0;
      inf->latency     = 0;
-@@ -383,19 +373,49 @@ static int sedf_init_vcpu(struct vcpu *v
+@@ -383,39 +373,88 @@ static int sedf_init_vcpu(struct vcpu *v
      }
      else
      {
@@ -2158,42 +2181,66 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    INIT_LIST_HEAD(&spc->extraq[EXTRA_UTIL_Q]);
 +
 +    return (void *)spc;
- }
- 
--static void sedf_destroy_vcpu(struct vcpu *v)
++}
++
 +static void
 +sedf_free_pdata(struct scheduler *ops, void *spc, int cpu)
- {
--    xfree(v->sched_priv);
++{
 +    if ( spc == NULL )
 +        return;
 +
 +    xfree(spc);
- }
- 
--static int sedf_init_domain(struct domain *d)
++}
++
 +static void sedf_free_vdata(struct scheduler *ops, void *priv)
 +{
 +    xfree(priv);
-+}
-+
+ }
+ 
+-static void sedf_destroy_vcpu(struct vcpu *v)
 +static void sedf_destroy_vcpu(struct scheduler *ops, struct vcpu *v)
-+{
+ {
+-    xfree(v->sched_priv);
 +    sedf_free_vdata(ops, v->sched_priv);
+ }
+ 
+-static int sedf_init_domain(struct domain *d)
++static void *
++sedf_alloc_domdata(struct scheduler *ops, struct domain *d)
+ {
+-    d->sched_priv = xmalloc(struct sedf_dom_info);
++    void *mem;
++
++    mem = xmalloc(struct sedf_dom_info);
++    if ( mem == NULL )
++        return NULL;
++
++    memset(mem, 0, sizeof(struct sedf_dom_info));
++
++    return mem;
 +}
 +
 +static int sedf_init_domain(struct scheduler *ops, struct domain *d)
- {
-     d->sched_priv = xmalloc(struct sedf_dom_info);
++{
++    d->sched_priv = sedf_alloc_domdata(ops, d);
      if ( d->sched_priv == NULL )
-@@ -406,16 +426,18 @@ static int sedf_init_domain(struct domai
+         return -ENOMEM;
+ 
+-    memset(d->sched_priv, 0, sizeof(struct sedf_dom_info));
+-
      return 0;
  }
  
 -static void sedf_destroy_domain(struct domain *d)
++static void sedf_free_domdata(struct scheduler *ops, void *data)
++{
++    xfree(data);
++}
++
 +static void sedf_destroy_domain(struct scheduler *ops, struct domain *d)
  {
-     xfree(d->sched_priv);
+-    xfree(d->sched_priv);
++    sedf_free_domdata(ops, d->sched_priv);
  }
  
 -static int sedf_pick_cpu(struct vcpu *v)
@@ -2208,7 +2255,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      return first_cpu(online_affinity);
  }
  
-@@ -751,7 +773,7 @@ static struct task_slice sedf_do_extra_s
+@@ -751,7 +790,7 @@ static struct task_slice sedf_do_extra_s
     -timeslice for the current period used up
     -domain on waitqueue has started it's period
     -and various others ;) in general: determine which domain to run next*/
@@ -2217,7 +2264,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  {
      int                   cpu      = smp_processor_id();
      struct list_head     *runq     = RUNQ(cpu);
-@@ -786,6 +808,13 @@ static struct task_slice sedf_do_schedul
+@@ -786,6 +825,13 @@ static struct task_slice sedf_do_schedul
      }
   check_waitq:
      update_queues(now, runq, waitq);
@@ -2231,7 +2278,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
   
      /*now simply pick the first domain from the runqueue, which has the
        earliest deadline, because the list is sorted*/
-@@ -848,7 +877,7 @@ static struct task_slice sedf_do_schedul
+@@ -848,7 +894,7 @@ static struct task_slice sedf_do_schedul
  }
  
  
@@ -2240,7 +2287,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  {
      PRINT(2,"sedf_sleep was called, domain-id %i.%i\n",
            d->domain->domain_id, d->vcpu_id);
-@@ -1067,7 +1096,7 @@ static inline int should_switch(struct v
+@@ -1067,7 +1113,7 @@ static inline int should_switch(struct v
      return 1;
  }
  
@@ -2249,7 +2296,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  {
      s_time_t              now = NOW();
      struct sedf_vcpu_info* inf = EDOM_INFO(d);
-@@ -1220,8 +1249,8 @@ static void sedf_dump_domain(struct vcpu
+@@ -1220,8 +1266,8 @@ static void sedf_dump_domain(struct vcpu
  }
  
  
@@ -2260,7 +2307,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  {
      struct list_head      *list, *queue, *tmp;
      struct sedf_vcpu_info *d_inf;
-@@ -1294,7 +1323,7 @@ static void sedf_dump_cpu_state(int i)
+@@ -1294,7 +1340,7 @@ static void sedf_dump_cpu_state(int i)
  
  
  /* Adjusts periods and slices of the domains accordingly to their weights. */
@@ -2269,7 +2316,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  {
      struct vcpu *p;
      struct domain      *d;
-@@ -1315,6 +1344,8 @@ static int sedf_adjust_weights(struct xe
+@@ -1315,6 +1361,8 @@ static int sedf_adjust_weights(struct xe
      rcu_read_lock(&domlist_read_lock);
      for_each_domain( d )
      {
@@ -2278,7 +2325,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
          for_each_vcpu( d, p )
          {
              if ( EDOM_INFO(p)->weight )
-@@ -1366,7 +1397,7 @@ static int sedf_adjust_weights(struct xe
+@@ -1366,7 +1414,7 @@ static int sedf_adjust_weights(struct xe
  
  
  /* set or fetch domain scheduling parameters */
@@ -2287,7 +2334,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  {
      struct vcpu *v;
      int rc;
-@@ -1376,9 +1407,6 @@ static int sedf_adjust(struct domain *p,
+@@ -1376,9 +1424,6 @@ static int sedf_adjust(struct domain *p,
            p->domain_id, op->u.sedf.period, op->u.sedf.slice,
            op->u.sedf.latency, (op->u.sedf.extratime)?"yes":"no");
  
@@ -2297,7 +2344,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      if ( op->cmd == XEN_DOMCTL_SCHEDOP_putinfo )
      {
          /* Check for sane parameters. */
-@@ -1428,7 +1456,7 @@ static int sedf_adjust(struct domain *p,
+@@ -1428,7 +1473,7 @@ static int sedf_adjust(struct domain *p,
              }
          }
  
@@ -2306,7 +2353,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
          if ( rc )
              return rc;
  
-@@ -1456,7 +1484,7 @@ static int sedf_adjust(struct domain *p,
+@@ -1456,7 +1501,7 @@ static int sedf_adjust(struct domain *p,
      return 0;
  }
  
@@ -2315,7 +2362,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      .name     = "Simple EDF Scheduler",
      .opt_name = "sedf",
      .sched_id = XEN_SCHEDULER_SEDF,
-@@ -1464,9 +1492,13 @@ const struct scheduler sched_sedf_def = 
+@@ -1464,9 +1509,15 @@ const struct scheduler sched_sedf_def = 
      .init_domain    = sedf_init_domain,
      .destroy_domain = sedf_destroy_domain,
  
@@ -2326,6 +2373,8 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    .free_vdata     = sedf_free_vdata,
 +    .alloc_pdata    = sedf_alloc_pdata,
 +    .free_pdata     = sedf_free_pdata,
++    .alloc_domdata  = sedf_alloc_domdata,
++    .free_domdata   = sedf_free_domdata,
 +
      .do_schedule    = sedf_do_schedule,
      .pick_cpu       = sedf_pick_cpu,
@@ -2366,7 +2415,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
  static inline void trace_runstate_change(struct vcpu *v, int new_state)
  {
-@@ -207,7 +214,74 @@ int sched_init_vcpu(struct vcpu *v, unsi
+@@ -207,7 +214,86 @@ int sched_init_vcpu(struct vcpu *v, unsi
  
      TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
  
@@ -2379,7 +2428,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +            return 1;
 +    }
 +
-+    v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v);
++    v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
 +    if ( v->sched_priv == NULL )
 +        return 1;
 +
@@ -2394,14 +2443,23 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    struct vcpu *v;
 +    unsigned int new_p;
 +    void **vcpu_priv;
++    void *domdata;
++
++    domdata = SCHED_OP(&(c->sched), alloc_domdata, d);
++    if ( domdata == NULL )
++        return -ENOMEM;
 +
 +    vcpu_priv = xmalloc_array(void *, d->max_vcpus);
 +    if ( vcpu_priv == NULL )
++    {
++        SCHED_OP(&(c->sched), free_domdata, domdata);
 +        return -ENOMEM;
++    }
++
 +    memset(vcpu_priv, 0, d->max_vcpus * sizeof(void *));
 +    for_each_vcpu ( d, v )
 +    {
-+        vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v);
++        vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v, domdata);
 +        if ( vcpu_priv[v->vcpu_id] == NULL )
 +        {
 +            for_each_vcpu ( d, v )
@@ -2410,6 +2468,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +                    xfree(vcpu_priv[v->vcpu_id]);
 +            }
 +            xfree(vcpu_priv);
++            SCHED_OP(&(c->sched), free_domdata, domdata);
 +            return -ENOMEM;
 +        }
 +    }
@@ -2433,6 +2492,8 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    }
 +
 +    d->cpupool = c;
++    SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
++    d->sched_priv = domdata;
 +
 +    domain_unpause(d);
 +
@@ -2442,7 +2503,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  }
  
  void sched_destroy_vcpu(struct vcpu *v)
-@@ -217,17 +291,17 @@ void sched_destroy_vcpu(struct vcpu *v)
+@@ -217,17 +303,17 @@ void sched_destroy_vcpu(struct vcpu *v)
      kill_timer(&v->poll_timer);
      if ( test_and_clear_bool(v->is_urgent) )
          atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
@@ -2463,7 +2524,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  }
  
  void vcpu_sleep_nosync(struct vcpu *v)
-@@ -241,7 +315,7 @@ void vcpu_sleep_nosync(struct vcpu *v)
+@@ -241,7 +327,7 @@ void vcpu_sleep_nosync(struct vcpu *v)
          if ( v->runstate.state == RUNSTATE_runnable )
              vcpu_runstate_change(v, RUNSTATE_offline, NOW());
  
@@ -2472,7 +2533,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      }
  
      vcpu_schedule_unlock_irqrestore(v, flags);
-@@ -269,7 +343,7 @@ void vcpu_wake(struct vcpu *v)
+@@ -269,7 +355,7 @@ void vcpu_wake(struct vcpu *v)
      {
          if ( v->runstate.state >= RUNSTATE_blocked )
              vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
@@ -2481,7 +2542,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      }
      else if ( !test_bit(_VPF_blocked, &v->pause_flags) )
      {
-@@ -324,7 +398,7 @@ static void vcpu_migrate(struct vcpu *v)
+@@ -324,7 +410,7 @@ static void vcpu_migrate(struct vcpu *v)
  
      /* Select new CPU. */
      old_cpu = v->processor;
@@ -2490,7 +2551,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
      /*
       * Transfer urgency status to new CPU before switching CPUs, as once
-@@ -367,22 +441,32 @@ void vcpu_force_reschedule(struct vcpu *
+@@ -367,22 +453,32 @@ void vcpu_force_reschedule(struct vcpu *
  }
  
  /*
@@ -2527,7 +2588,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
              if ( (cpus_weight(v->cpu_affinity) == 1) &&
                   cpu_isset(cpu, v->cpu_affinity) )
              {
-@@ -396,39 +480,51 @@ void cpu_disable_scheduler(void)
+@@ -396,39 +492,51 @@ void cpu_disable_scheduler(void)
               * be chosen when the timer is next re-set.
               */
              if ( v->singleshot_timer.cpu == cpu )
@@ -2593,7 +2654,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      old_affinity = v->cpu_affinity;
      v->cpu_affinity = *affinity;
      *affinity = old_affinity;
-@@ -446,36 +542,6 @@ static int __vcpu_set_affinity(
+@@ -446,36 +554,6 @@ static int __vcpu_set_affinity(
      return 0;
  }
  
@@ -2630,7 +2691,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  /* Block the currently-executing domain until a pertinent event occurs. */
  static long do_block(void)
  {
-@@ -762,7 +828,7 @@ long sched_adjust(struct domain *d, stru
+@@ -762,7 +840,7 @@ long sched_adjust(struct domain *d, stru
      struct vcpu *v;
      long ret;
      
@@ -2639,7 +2700,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
           ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
            (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
          return -EINVAL;
-@@ -789,7 +855,7 @@ long sched_adjust(struct domain *d, stru
+@@ -789,7 +867,7 @@ long sched_adjust(struct domain *d, stru
      if ( d == current->domain )
          vcpu_schedule_lock_irq(current);
  
@@ -2648,7 +2709,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
          TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
  
      if ( d == current->domain )
-@@ -836,6 +902,7 @@ static void schedule(void)
+@@ -836,6 +914,7 @@ static void schedule(void)
  {
      struct vcpu          *prev = current, *next = NULL;
      s_time_t              now = NOW();
@@ -2656,7 +2717,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      struct schedule_data *sd;
      struct task_slice     next_slice;
  
-@@ -851,7 +918,7 @@ static void schedule(void)
+@@ -851,7 +930,7 @@ static void schedule(void)
      stop_timer(&sd->s_timer);
      
      /* get policy-specific decision on scheduling... */
@@ -2665,7 +2726,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
      next = next_slice.task;
  
-@@ -951,6 +1018,19 @@ static void poll_timer_fn(void *data)
+@@ -951,6 +1030,19 @@ static void poll_timer_fn(void *data)
          vcpu_unblock(v);
  }
  
@@ -2685,7 +2746,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  /* Initialise the data structures. */
  void __init scheduler_init(void)
  {
-@@ -958,12 +1038,6 @@ void __init scheduler_init(void)
+@@ -958,12 +1050,6 @@ void __init scheduler_init(void)
  
      open_softirq(SCHEDULE_SOFTIRQ, schedule);
  
@@ -2698,7 +2759,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
      for ( i = 0; schedulers[i] != NULL; i++ )
      {
          ops = *schedulers[i];
-@@ -977,43 +1051,123 @@ void __init scheduler_init(void)
+@@ -977,43 +1063,123 @@ void __init scheduler_init(void)
          ops = *schedulers[0];
      }
  
@@ -2734,16 +2795,10 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    v = per_cpu(schedule_data, cpu).idle;
 +    ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
 +    if ( c != NULL )
-+        vpriv = SCHED_OP(new_ops, alloc_vdata, v);
- 
--    local_irq_save(flags);
++        vpriv = SCHED_OP(new_ops, alloc_vdata, v, v->domain->sched_priv);
++
 +    spin_lock_irqsave(&per_cpu(schedule_data, cpu).schedule_lock, flags);
- 
--    printk("Scheduler: %s (%s)\n", ops.name, ops.opt_name);
--    SCHED_OP(dump_settings);
--    printk("sched_smt_power_savings: %s\n",
--            sched_smt_power_savings? "enabled":"disabled");
--    printk("NOW=0x%08X%08X\n",  (u32)(now>>32), (u32)now);
++
 +    if ( c == NULL )
 +    {
 +        vpriv = v->sched_priv;
@@ -2767,8 +2822,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +        SCHED_OP(old_ops, free_vdata, vpriv);
 +    SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
 +}
- 
--    for_each_online_cpu ( i )
++
 +/* init scheduler global data */
 +int schedule_init_global(char *name, struct scheduler *sched)
 +{
@@ -2787,13 +2841,19 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    memcpy(sched, data, sizeof(*sched));
 +    return SCHED_OP(sched, init);
 +}
-+
+ 
+-    local_irq_save(flags);
 +/* deinitialize scheduler global data */
 +void schedule_deinit_global(struct scheduler *sched)
 +{
 +    SCHED_OP(sched, deinit);
 +}
-+
+ 
+-    printk("Scheduler: %s (%s)\n", ops.name, ops.opt_name);
+-    SCHED_OP(dump_settings);
+-    printk("sched_smt_power_savings: %s\n",
+-            sched_smt_power_savings? "enabled":"disabled");
+-    printk("NOW=0x%08X%08X\n",  (u32)(now>>32), (u32)now);
 +void schedule_dump(struct cpupool *c)
 +{
 +    int               i;
@@ -2804,7 +2864,8 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    cpus = (c == NULL) ? &cpupool_free_cpus : &c->cpu_valid;
 +    printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
 +    SCHED_OP(sched, dump_settings);
-+
+ 
+-    for_each_online_cpu ( i )
 +    for_each_cpu_mask (i, *cpus)
      {
          spin_lock(&per_cpu(schedule_data, i).schedule_lock);
@@ -2957,7 +3018,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  extern int cpu_down(unsigned int cpu);
 --- a/xen/include/public/domctl.h
 +++ b/xen/include/public/domctl.h
-@@ -60,10 +60,14 @@ struct xen_domctl_createdomain {
+@@ -60,10 +60,10 @@ struct xen_domctl_createdomain {
   /* Should domain memory integrity be verifed by tboot during Sx? */
  #define _XEN_DOMCTL_CDF_s3_integrity  2
  #define XEN_DOMCTL_CDF_s3_integrity   (1U<<_XEN_DOMCTL_CDF_s3_integrity)
@@ -2965,15 +3026,11 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
   /* Disable out-of-sync shadow page tables? */
  #define _XEN_DOMCTL_CDF_oos_off       3
  #define XEN_DOMCTL_CDF_oos_off        (1U<<_XEN_DOMCTL_CDF_oos_off)
-+ /* cpupool is specified (0 otherwise) */
-+#define _XEN_DOMCTL_CDF_pool          4
-+#define XEN_DOMCTL_CDF_pool           (1U<<_XEN_DOMCTL_CDF_pool)
 +    uint32_t flags;
-+    uint32_t cpupool;
  };
  typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
  DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
-@@ -106,6 +110,7 @@ struct xen_domctl_getdomaininfo {
+@@ -106,6 +106,7 @@ struct xen_domctl_getdomaininfo {
      uint32_t max_vcpu_id;        /* Maximum VCPUID in use by this domain. */
      uint32_t ssidref;
      xen_domain_handle_t handle;
@@ -2981,7 +3038,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  };
  typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
  DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
-@@ -781,6 +786,30 @@ struct xen_domctl_mem_sharing_op {
+@@ -781,6 +782,30 @@ struct xen_domctl_mem_sharing_op {
  typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
  DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
  
@@ -3012,7 +3069,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
  struct xen_domctl {
      uint32_t cmd;
-@@ -842,6 +871,7 @@ struct xen_domctl {
+@@ -842,6 +867,7 @@ struct xen_domctl {
  #define XEN_DOMCTL_gettscinfo                    59
  #define XEN_DOMCTL_settscinfo                    60
  #define XEN_DOMCTL_getpageframeinfo3             61
@@ -3020,7 +3077,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  #define XEN_DOMCTL_gdbsx_guestmemio            1000
  #define XEN_DOMCTL_gdbsx_pausevcpu             1001
  #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
-@@ -890,6 +920,7 @@ struct xen_domctl {
+@@ -890,6 +916,7 @@ struct xen_domctl {
          struct xen_domctl_debug_op          debug_op;
          struct xen_domctl_mem_event_op      mem_event_op;
          struct xen_domctl_mem_sharing_op    mem_sharing_op;
@@ -3060,7 +3117,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
  static inline void vcpu_schedule_lock(struct vcpu *v)
  {
-@@ -59,28 +72,47 @@ struct scheduler {
+@@ -59,28 +72,49 @@ struct scheduler {
      char *name;             /* full name for this scheduler      */
      char *opt_name;         /* option name for this scheduler    */
      unsigned int sched_id;  /* ID for this scheduler             */
@@ -3071,9 +3128,12 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
  
 -    void         (*init)           (void);
 +    void         (*free_vdata)     (struct scheduler *, void *);
-+    void *       (*alloc_vdata)    (struct scheduler *, struct vcpu *);
++    void *       (*alloc_vdata)    (struct scheduler *, struct vcpu *,
++                                    void *);
 +    void         (*free_pdata)     (struct scheduler *, void *, int);
 +    void *       (*alloc_pdata)    (struct scheduler *, int);
++    void         (*free_domdata)   (struct scheduler *, void *);
++    void *       (*alloc_domdata)  (struct scheduler *, struct domain *);
  
 -    int          (*init_domain)    (struct domain *);
 -    void         (*destroy_domain) (struct domain *);
@@ -3115,7 +3175,6 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
 +    cpumask_t        cpu_valid;      /* all cpus assigned to pool */
 +    struct cpupool   *next;
 +    unsigned int     n_dom;
-+    int              cpu_in_transit; /* used for adding/removing cpus */
 +    struct scheduler sched;
 +};
 +
diff --git a/hv_tools.patch b/hv_tools.patch
index 392a299..95021d3 100644
--- a/hv_tools.patch
+++ b/hv_tools.patch
@@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
 ===================================================================
 --- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c
 +++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
-@@ -946,16 +946,16 @@ static PyObject *pyxc_hvm_build(XcObject
+@@ -944,16 +944,16 @@ static PyObject *pyxc_hvm_build(XcObject
  #endif
      int i;
      char *image;
@@ -24,7 +24,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
          return NULL;
  
      memset(vcpu_avail, 0, sizeof(vcpu_avail));
-@@ -1007,6 +1007,7 @@ static PyObject *pyxc_hvm_build(XcObject
+@@ -1005,6 +1005,7 @@ static PyObject *pyxc_hvm_build(XcObject
      va_hvm->checksum -= sum;
      munmap(va_map, XC_PAGE_SIZE);
  #endif
diff --git a/init.xendomains b/init.xendomains
index 144e8dc..d146c63 100644
--- a/init.xendomains
+++ b/init.xendomains
@@ -8,7 +8,7 @@
 ### BEGIN INIT INFO
 # Provides:          xendomains
 # Required-Start:    $syslog $remote_fs xend
-# Should-Start:      iscsi o2cb ocfs2 openais
+# Should-Start:      iscsi o2cb ocfs2
 # Required-Stop:     $syslog $remote_fs xend
 # Should-Stop:       iscsi
 # Default-Start:     3 5
diff --git a/xen.changes b/xen.changes
index 4f94c79..96a0076 100644
--- a/xen.changes
+++ b/xen.changes
@@ -1,3 +1,15 @@
+-------------------------------------------------------------------
+Wed Apr 21 21:15:04 MDT 2010 - jfehlig@novell.com
+
+- bnc#596442 - Preserve device config on domain start failure
+  xend-preserve-devs.patch
+
+-------------------------------------------------------------------
+Tue Apr 20 15:18:31 MDT 2010 - jfehlig@novell.com
+
+- bnc#597770 - insserv reports a loop between xendomains and
+  openais.  Remove openais from Should-Start in xendomains script.
+
 -------------------------------------------------------------------
 Fri Apr 16 15:00:52 MDT 2010 - jfehlig@novell.com
 
diff --git a/xen.spec b/xen.spec
index d80ce34..511c184 100644
--- a/xen.spec
+++ b/xen.spec
@@ -26,11 +26,7 @@ ExclusiveArch:  %ix86 x86_64
 %define xen_build_dir xen-4.0.0-testing
 %define with_kmp 1
 BuildRequires:  LibVNCServer-devel SDL-devel acpica automake bin86 curl-devel dev86 graphviz latex2html libjpeg-devel libxml2-devel ncurses-devel openssl openssl-devel pciutils-devel python-devel texinfo transfig
-%if %suse_version >= 1030
 BuildRequires:  texlive texlive-latex
-%else
-BuildRequires:  te_ams te_latex tetex
-%endif
 %ifarch x86_64
 BuildRequires:  glibc-32bit glibc-devel-32bit
 %define max_cpus 256
@@ -44,7 +40,7 @@ BuildRequires:  kernel-source kernel-syms module-init-tools xorg-x11
 %endif
 Version:        4.0.0_21091_01
 Release:        1
-License:        GPLv2+
+License:        GPLv2
 Group:          System/Kernel
 AutoReqProv:    on
 PreReq:         %insserv_prereq %fillup_prereq
@@ -83,8 +79,11 @@ Patch1:         21109-x86-cpu-hotplug.patch
 Patch2:         21128-domain-save-flush.patch
 Patch3:         21150-shadow-race.patch
 Patch4:         21160-sysctl-debug-keys.patch
-Patch5:         blktap-script.patch
-Patch6:         ioemu-subtype.patch
+Patch5:         21189-x86-emulate-clflush.patch
+Patch6:         21193-blktap-script.patch
+Patch7:         21194-ioemu-subtype.patch
+Patch8:         21225-conring-iommu.patch
+Patch9:         xend-preserve-devs.patch
 # Our patches
 Patch300:       xen-config.diff
 Patch301:       xend-config.diff
@@ -145,7 +144,6 @@ Patch366:       cpu-pools-python.patch
 Patch367:       cpu-pools-libxen.patch
 Patch368:       cpu-pools-xmtest.patch
 Patch369:       cpu-pools-docs.patch
-Patch370:       cpu-pools-fixes.patch
 # Patches for snapshot support
 Patch400:       snapshot-ioemu-save.patch
 Patch401:       snapshot-ioemu-restore.patch
@@ -161,14 +159,14 @@ Patch415:       tapdisk-ioemu-shutdown-fix.patch
 Patch420:       blktapctrl-default-to-ioemu.patch
 Patch421:       ioemu-blktap-barriers.patch
 # Other bug fixes or features
-Patch422:       bdrv_open2_fix_flags.patch 
-Patch423:       bdrv_open2_flags_2.patch
-Patch424:       ioemu-7615-qcow2-fix-alloc_cluster_link_l2.patch
-Patch425:       ioemu-bdrv-open-CACHE_WB.patch
-Patch426:       xen-ioemu-hvm-pv-support.diff
-Patch427:       qemu-dm-segfault.patch
-Patch428:       hibernate.patch
-Patch429:       del_usb_xend_entry.patch
+Patch423:       bdrv_open2_fix_flags.patch 
+Patch424:       bdrv_open2_flags_2.patch
+Patch425:       ioemu-7615-qcow2-fix-alloc_cluster_link_l2.patch
+Patch426:       ioemu-bdrv-open-CACHE_WB.patch
+Patch427:       xen-ioemu-hvm-pv-support.diff
+Patch428:       qemu-dm-segfault.patch
+Patch429:       hibernate.patch
+Patch430:       del_usb_xend_entry.patch
 # Jim's domain lock patch
 Patch450:       xend-domain-lock.patch
 # Hypervisor and PV driver Patches
@@ -534,6 +532,9 @@ Authors:
 %patch4 -p1
 %patch5 -p1
 %patch6 -p1
+%patch7 -p1
+%patch8 -p1
+%patch9 -p1
 %patch300 -p1
 %patch301 -p1
 %patch302 -p1
@@ -592,7 +593,6 @@ Authors:
 %patch367 -p1
 %patch368 -p1
 %patch369 -p1
-%patch370 -p1
 %patch400 -p1
 %patch401 -p1
 %patch402 -p1
@@ -605,7 +605,6 @@ Authors:
 %patch415 -p1
 %patch420 -p1
 %patch421 -p1
-%patch422 -p1
 %patch423 -p1
 %patch424 -p1
 %patch425 -p1
@@ -613,6 +612,7 @@ Authors:
 %patch427 -p1
 %patch428 -p1
 %patch429 -p1
+%patch430 -p1
 %patch450 -p1
 %patch500 -p1
 %patch501 -p1
diff --git a/xend-domain-lock.patch b/xend-domain-lock.patch
index 609d8a3..dd87ea2 100644
--- a/xend-domain-lock.patch
+++ b/xend-domain-lock.patch
@@ -23,7 +23,7 @@ Index: xen-4.0.0-testing/tools/examples/xend-config.sxp
 +#        Lock is placed in /<xend-domain-lock-path>/<domain-uuid>.
 +# Default is /var/lib/xen/images/vm_locks/
 +#
-+#(xend-domain-lock-path /var/lib/xend/domains)
++#(xend-domain-lock-path /var/lib/images/vm_locks)
 +
 +# External locking utility called by xend for acquiring/releasing
 +# domain lock.  By default /etc/xen/scripts/domain-lock will be used
@@ -91,7 +91,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
                  XendTask.log_progress(0, 30, self._constructDomain)
                  XendTask.log_progress(31, 60, self._initDomain)
                  
-@@ -2979,6 +2981,11 @@ class XendDomainInfo:
+@@ -2983,6 +2985,11 @@ class XendDomainInfo:
  
              self._stateSet(DOM_STATE_HALTED)
              self.domid = None  # Do not push into _stateSet()!
@@ -103,7 +103,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
          finally:
              self.refresh_shutdown_lock.release()
  
-@@ -4485,6 +4492,74 @@ class XendDomainInfo:
+@@ -4489,6 +4496,74 @@ class XendDomainInfo:
      def has_device(self, dev_class, dev_uuid):
          return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
  
diff --git a/xend-preserve-devs.patch b/xend-preserve-devs.patch
new file mode 100644
index 0000000..1a671fd
--- /dev/null
+++ b/xend-preserve-devs.patch
@@ -0,0 +1,14 @@
+Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
+===================================================================
+--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
++++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
+@@ -1166,8 +1166,8 @@ class XendConfig(dict):
+                                     config.append(['VDI', dev_cfg.get('VDI', '')])
+ 
+                                 sxpr.append(['device', config])
++                                found = True
+ 
+-                            found = True
+                         except:
+                             log.exception("dumping sxp from device controllers")
+                             pass