-Fix bnc#466899 - numa enabled xen fails to start/create vms

adjust_vcpuaffinity_more_cpu.patch 

- Update to changeset 21022 Xen 4.0.0 RC6.

- bnc#586510 - cpupool fixes
  cpu-pools-update.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=35
This commit is contained in:
Charles Arnold 2010-03-19 17:16:56 +00:00 committed by Git OBS Bridge
parent 5e5f163f43
commit a9bb3bc2f5
21 changed files with 866 additions and 141 deletions

View File

@ -673,10 +673,10 @@ Index: xen-4.0.0-testing/tools/blktap/drivers/Makefile
MEMSHRLIBS += $(MEMSHR_DIR)/libmemshr.a
endif
-LDFLAGS_blktapctrl := $(LDFLAGS_libxenctrl) $(LDFLAGS_libxenstore) $(MEMSHRLIBS) -L../lib -lblktap -lrt -lm
-LDFLAGS_blktapctrl := $(LDFLAGS_libxenctrl) $(LDFLAGS_libxenstore) $(MEMSHRLIBS) -L../lib -lblktap -lrt -lm -lpthread
-LDFLAGS_img := $(LIBAIO_DIR)/libaio.a $(CRYPT_LIB) -lpthread -lz
+LDFLAGS_xen := $(LDFLAGS_libxenctrl) $(LDFLAGS_libxenstore)
+LDFLAGS_blktapctrl := $(LDFLAGS_xen) $(MEMSHRLIBS) -L../lib -lblktap -lrt -lm
+LDFLAGS_blktapctrl := $(LDFLAGS_xen) $(MEMSHRLIBS) -L../lib -lblktap -lrt -lm -lpthread
+LDFLAGS_img := $(LIBAIO_DIR)/libaio.a $(CRYPT_LIB) -lpthread -lz $(LDFLAGS_xen)
BLK-OBJS-y := block-aio.o

View File

@ -2,9 +2,9 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
===================================================================
--- xen-4.0.0-testing.orig/tools/hotplug/Linux/network-bridge
+++ xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
@@ -241,6 +241,9 @@ op_start () {
return
fi
@@ -245,6 +245,9 @@ op_start () {
claim_lock "network-bridge"
+ local bonded=""
+ [ -e /sys/class/net/${netdev}/bonding ] && bonded="yes"
@ -12,7 +12,7 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
vlans=$(find_active_vlans "${netdev}")
for vlan in $vlans ; do ifdown $vlan ; done
@@ -258,18 +261,32 @@ op_start () {
@@ -262,18 +265,32 @@ op_start () {
ip link set ${netdev} down
ip addr flush ${netdev}
fi

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
===================================================================
--- xen-4.0.0-testing.orig/tools/hotplug/Linux/network-bridge
+++ xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
@@ -262,18 +262,18 @@ op_stop () {
@@ -270,19 +270,19 @@ op_stop () {
transfer_addrs ${bridge} ${pdev}
if ! ifdown ${bridge}; then
get_ip_info ${bridge}
@ -24,8 +24,9 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
+ ip link set ${pdev} down
ip link set ${pdev} name ${netdev}
do_ifup ${netdev}
-
- brctl delbr ${tdev}
-
release_lock "network-bridge"
}
# adds $dev to $bridge but waits for $dev to be in running state first

View File

@ -1,8 +1,8 @@
Index: xen-3.5.0-testing/tools/hotplug/Linux/network-bridge
Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
===================================================================
--- xen-3.5.0-testing.orig/tools/hotplug/Linux/network-bridge
+++ xen-3.5.0-testing/tools/hotplug/Linux/network-bridge
@@ -249,6 +249,11 @@ op_start () {
--- xen-4.0.0-testing.orig/tools/hotplug/Linux/network-bridge
+++ xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
@@ -253,6 +253,11 @@ op_start () {
create_bridge ${tdev}
@ -14,7 +14,7 @@ Index: xen-3.5.0-testing/tools/hotplug/Linux/network-bridge
preiftransfer ${netdev}
transfer_addrs ${netdev} ${tdev}
# Remember slaves for bonding interface.
@@ -322,6 +327,13 @@ op_stop () {
@@ -330,6 +335,13 @@ op_stop () {
ip link set ${pdev} name ${netdev}
do_ifup ${netdev}
@ -26,5 +26,5 @@ Index: xen-3.5.0-testing/tools/hotplug/Linux/network-bridge
+ fi
+
for vlan in $vlans ; do ifup $vlan ; done
}
release_lock "network-bridge"

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
===================================================================
--- xen-4.0.0-testing.orig/tools/hotplug/Linux/network-bridge
+++ xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
@@ -191,6 +191,28 @@ antispoofing () {
@@ -193,6 +193,28 @@ antispoofing () {
iptables -A FORWARD -m physdev --physdev-in ${pdev} -j ACCEPT
}
@ -31,9 +31,9 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
# Usage: show_status dev bridge
# Print ifconfig and routes.
show_status () {
@@ -219,6 +241,9 @@ op_start () {
return
fi
@@ -223,6 +245,9 @@ op_start () {
claim_lock "network-bridge"
+ vlans=$(find_active_vlans "${netdev}")
+ for vlan in $vlans ; do ifdown $vlan ; done
@ -41,7 +41,7 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
create_bridge ${tdev}
preiftransfer ${netdev}
@@ -246,6 +271,8 @@ op_start () {
@@ -250,6 +275,8 @@ op_start () {
add_to_bridge2 ${bridge} ${pdev}
do_ifup ${bridge}
@ -50,9 +50,9 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
if [ ${antispoof} = 'yes' ] ; then
antispoofing
fi
@@ -259,6 +286,9 @@ op_stop () {
return
fi
@@ -267,6 +294,9 @@ op_stop () {
claim_lock "network-bridge"
+ vlans=$(find_active_vlans "${netdev}")
+ for vlan in $vlans ; do ifdown $vlan ; done
@ -60,12 +60,12 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
transfer_addrs ${bridge} ${pdev}
if ! ifdown ${bridge}; then
get_ip_info ${bridge}
@@ -274,6 +304,8 @@ op_stop () {
ip link set ${pdev} down
@@ -283,6 +313,8 @@ op_stop () {
ip link set ${pdev} name ${netdev}
do_ifup ${netdev}
+
+ for vlan in $vlans ; do ifup $vlan ; done
+
release_lock "network-bridge"
}
# adds $dev to $bridge but waits for $dev to be in running state first

View File

@ -60,7 +60,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/configure
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/configure
+++ xen-4.0.0-testing/tools/ioemu-remote/configure
@@ -1508,7 +1508,7 @@ bsd)
@@ -1511,7 +1511,7 @@ bsd)
;;
esac
@ -121,7 +121,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/tapdisk-ioemu.c
typedef struct IOHandlerRecord {
int fd;
IOCanRWHandler *fd_read_poll;
@@ -103,7 +81,6 @@ int main(void)
@@ -103,7 +81,6 @@ int main(void)
logfile = stderr;
bdrv_init();
@ -129,7 +129,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/tapdisk-ioemu.c
init_blktap();
/* Daemonize */
@@ -115,8 +92,6 @@ int main(void)
@@ -115,8 +92,6 @@ int main(void)
* completed aio operations.
*/
while (1) {

773
cpu-pools-fixes.patch Normal file
View File

@ -0,0 +1,773 @@
Index: xen-4.0.0-testing/docs/xen-api/revision-history.tex
===================================================================
--- xen-4.0.0-testing.orig/docs/xen-api/revision-history.tex
+++ xen-4.0.0-testing/docs/xen-api/revision-history.tex
@@ -54,7 +54,7 @@
Added definitions of new classes cpu\_pool. Updated the table
and the diagram representing relationships between classes.
Added fields host.resident\_cpu\_pools, VM.cpu\_pool and
- host\_cpu.cpu\_pool.
+ host\_cpu.cpu\_pool.\tabularnewline
\hline
\end{tabular}
\end{center}
Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendCPUPool.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
@@ -547,7 +547,7 @@ class XendCPUPool(XendBase):
def pool_start(cls, poolname):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
pool.activate()
except XendAPIError, ex:
@@ -566,8 +566,12 @@ class XendCPUPool(XendBase):
for cpu_ref in pool_vals['host_CPUs'] ]
cpus.sort()
pool_vals['host_CPU_numbers'] = cpus
- vm_names = [ xd.get_vm_by_uuid(uuid).getName()
- for uuid in pool_vals['started_VMs'] ]
+ # query VMs names. Take in account, that a VM
+ # returned by get_all_records could be destroy, now
+ vm_names = [ vm.getName()
+ for vm in map(xd.get_vm_by_uuid,
+ pool_vals['started_VMs'])
+ if vm ]
pool_vals['started_VM_names'] = vm_names
pool_vals['auto_power_on'] = int(pool_vals['auto_power_on'])
sxprs += [[pool_uuid] + map2sxp(pool_vals)]
@@ -578,7 +582,7 @@ class XendCPUPool(XendBase):
def pool_destroy(cls, poolname):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
pool.deactivate()
if not pool.is_managed():
@@ -589,7 +593,7 @@ class XendCPUPool(XendBase):
def pool_delete(cls, poolname):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
pool.destroy()
except XendAPIError, ex:
@@ -598,28 +602,28 @@ class XendCPUPool(XendBase):
def pool_cpu_add(cls, poolname, cpu):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
cpu_ref = cls._cpu_number_to_ref(int(cpu))
if cpu_ref:
pool.add_host_CPU_live(cpu_ref)
else:
raise PoolError(XEND_ERROR_INVALID_CPU,
- 'CPU unkown')
+ 'CPU unknown')
except XendAPIError, ex:
raise VmError(ex.get_api_error())
def pool_cpu_remove(cls, poolname, cpu):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
cpu_ref = cls._cpu_number_to_ref(int(cpu))
if cpu_ref:
pool.remove_host_CPU_live(cpu_ref)
else:
raise PoolError(XEND_ERROR_INVALID_CPU,
- 'CPU unkown')
+ 'CPU unknown')
except XendAPIError, ex:
raise VmError(ex.get_api_error())
@@ -627,10 +631,10 @@ class XendCPUPool(XendBase):
dom = XendDomain.instance()
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
dominfo = dom.domain_lookup_nr(domname)
if not dominfo:
- raise VmError('unkown domain %s' % domname)
+ raise VmError('unknown domain %s' % domname)
domid = dominfo.getDomid()
if domid is not None:
if domid == 0:
@@ -860,8 +864,11 @@ class XendCPUPool(XendBase):
pool_uuid = None
try:
pool_id = int(id_or_name)
- # pool id given
+ # pool id given ?
pool_uuid = cls.query_pool_ref(pool_id)
+ if not pool_uuid:
+ # not found -> search name
+ pool_uuid = cls.get_by_name_label(id_or_name)
except ValueError:
# pool name given
pool_uuid = cls.get_by_name_label(id_or_name)
Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -2574,7 +2574,7 @@ class XendDomainInfo:
pool = XendCPUPool.lookup_pool(pool_name)
if pool is None:
- raise VmError("unkown pool %s" % pool_name)
+ raise VmError("unknown pool %s" % pool_name)
pool_id = pool.query_pool_id()
if pool_id is None:
raise VmError("pool %s not activated" % pool_name)
Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
@@ -3515,7 +3515,7 @@ def get_pool_ref(name):
if len(refs) > 0:
return refs[0]
else:
- err('unkown pool name')
+ err('unknown pool name')
sys.exit(1)
def xm_pool_start(args):
@@ -3643,7 +3643,7 @@ def xm_pool_cpu_add(args):
cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
if c_rec['number'] == args[1] ]
if len(cpu_ref) == 0:
- err('cpu number unkown')
+ err('cpu number unknown')
else:
server.xenapi.cpu_pool.add_host_CPU_live(ref, cpu_ref[0])
else:
@@ -3657,7 +3657,7 @@ def xm_pool_cpu_remove(args):
cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
if c_rec['number'] == args[1] ]
if len(cpu_ref) == 0:
- err('cpu number unkown')
+ err('cpu number unknown')
else:
server.xenapi.cpu_pool.remove_host_CPU_live(ref, cpu_ref[0])
else:
Index: xen-4.0.0-testing/xen/common/cpupool.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/cpupool.c
+++ xen-4.0.0-testing/xen/common/cpupool.c
@@ -29,6 +29,9 @@ static struct cpupool *cpupool_list;
static int cpupool0_max_cpus;
integer_param("pool0_max_cpus", cpupool0_max_cpus);
+static int cpupool_moving_cpu = -1;
+static struct cpupool *cpupool_cpu_moving = NULL;
+
/* cpupool lock: be carefull, this lock is sometimes released on another cpu
* as it was obtained!
*/
@@ -104,7 +107,6 @@ struct cpupool *cpupool_create(int pooli
}
*q = c;
c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
- c->cpu_in_transit = -1;
if ( schedule_init_global(sched, &(c->sched)) )
{
spin_unlock(&cpupool_lock);
@@ -151,16 +153,20 @@ int cpupool_destroy(struct cpupool *c)
* assign a specific cpu to a cpupool
* cpupool_lock must be held
*/
-static void cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
+static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
{
- printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
- c->cpupool_id, cpu);
+ if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
+ return -EBUSY;
per_cpu(cpupool, cpu) = c;
schedule_cpu_switch(cpu, c);
cpu_clear(cpu, cpupool_free_cpus);
+ if (cpupool_moving_cpu == cpu)
+ {
+ cpupool_moving_cpu = -1;
+ cpupool_cpu_moving = NULL;
+ }
cpu_set(cpu, c->cpu_valid);
- printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ready\n",
- c->cpupool_id, cpu);
+ return 0;
}
/*
@@ -177,8 +183,8 @@ int cpupool_assign_ncpu(struct cpupool *
spin_lock(&cpupool_lock);
for_each_cpu_mask(i, cpupool_free_cpus)
{
- cpupool_assign_cpu_locked(c, i);
- n++;
+ if ( cpupool_assign_cpu_locked(c, i) == 0 )
+ n++;
if ( n == ncpu )
break;
}
@@ -188,43 +194,25 @@ int cpupool_assign_ncpu(struct cpupool *
return n;
}
-static void cpupool_unassign_cpu_locked_1(struct cpupool *c, unsigned int cpu)
-{
- printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
- c->cpupool_id, cpu);
- c->cpu_in_transit = cpu;
-}
-
-static int cpupool_unassign_cpu_locked_2(struct cpupool *c)
+static long cpupool_unassign_cpu_helper(void *hdl, void *info)
{
- int cpu = c->cpu_in_transit;
- int ret;
+ struct cpupool *c = (struct cpupool *)info;
+ int cpu = cpupool_moving_cpu;
+ long ret;
+ int cpupool_id = c->cpupool_id;
- c->cpu_in_transit = -1;
- cpu_clear(cpu, c->cpu_valid);
ret = cpu_disable_scheduler(cpu, 1);
- if ( ret )
- {
- cpu_set(cpu, c->cpu_valid);
- }
- else
+ cpu_set(cpu, cpupool_free_cpus);
+ if ( !ret )
{
- cpu_set(cpu, cpupool_free_cpus);
schedule_cpu_switch(cpu, NULL);
per_cpu(cpupool, cpu) = NULL;
+ cpupool_moving_cpu = -1;
+ cpupool_cpu_moving = NULL;
}
- printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
- c->cpupool_id, cpu, ret);
- return ret;
-}
-
-static long cpupool_unassign_cpu_helper(void *hdl, void *info)
-{
- struct cpupool *c = (struct cpupool *)info;
- long ret;
-
- ret = cpupool_unassign_cpu_locked_2(c);
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %ld\n",
+ cpupool_id, cpu, ret);
return ret;
}
@@ -242,16 +230,23 @@ static long cpupool_unassign_cpu_helper(
int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
{
int work_cpu;
- int rc = 0;
+ int ret;
struct domain *d;
+ int cpupool_id = c->cpupool_id;
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
+ cpupool_id, cpu);
spin_lock(&cpupool_lock);
- if ( !cpu_isset(cpu, c->cpu_valid) )
- {
- spin_unlock(&cpupool_lock);
- return 0;
- }
- if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) )
+ ret = -EBUSY;
+ if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
+ goto out;
+
+ ret = 0;
+ if ( !cpu_isset(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
+ goto out;
+
+ if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) &&
+ (cpu != cpupool_moving_cpu) )
{
for_each_domain(d)
{
@@ -259,27 +254,24 @@ int cpupool_unassign_cpu(struct cpupool
continue;
if ( !d->is_dying )
{
- rc = -EBUSY;
+ ret = -EBUSY;
break;
}
- printk(XENLOG_DEBUG "moving dying domain %d to pool0\n",
- d->domain_id);
c->n_dom--;
- rc = sched_move_domain(d, cpupool0);
- if ( rc )
+ ret = sched_move_domain(d, cpupool0);
+ if ( ret )
{
c->n_dom++;
break;
}
cpupool0->n_dom++;
}
- if ( rc )
- {
- spin_unlock(&cpupool_lock);
- return rc;
- }
+ if ( ret )
+ goto out;
}
- cpupool_unassign_cpu_locked_1(c, cpu);
+ cpupool_moving_cpu = cpu;
+ cpupool_cpu_moving = c;
+ cpu_clear(cpu, c->cpu_valid);
work_cpu = smp_processor_id();
if ( work_cpu == cpu )
{
@@ -289,6 +281,12 @@ int cpupool_unassign_cpu(struct cpupool
}
return continue_hypercall_on_cpu(work_cpu, NULL,
cpupool_unassign_cpu_helper, c);
+
+out:
+ spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
+ cpupool_id, cpu, ret);
+ return ret;
}
/*
@@ -316,6 +314,7 @@ int cpupool_add_domain(struct domain *d,
{
struct cpupool *c;
int rc = 1;
+ int n_dom;
if ( poolid == CPUPOOLID_NONE )
return 0;
@@ -324,12 +323,14 @@ int cpupool_add_domain(struct domain *d,
if ( (c != NULL) && cpus_weight(c->cpu_valid) )
{
c->n_dom++;
+ n_dom = c->n_dom;
d->cpupool = c;
- printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
- d->domain_id, poolid, c->n_dom);
rc = 0;
}
spin_unlock(&cpupool_lock);
+ if (!rc)
+ printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
+ d->domain_id, poolid, n_dom);
return rc;
}
@@ -338,14 +339,19 @@ int cpupool_add_domain(struct domain *d,
*/
void cpupool_rm_domain(struct domain *d)
{
+ int cpupool_id;
+ int n_dom;
+
if ( d->cpupool == NULL )
return;
spin_lock(&cpupool_lock);
+ cpupool_id = d->cpupool->cpupool_id;
d->cpupool->n_dom--;
- printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
- d->domain_id, d->cpupool->cpupool_id, d->cpupool->n_dom);
+ n_dom = d->cpupool->n_dom;
d->cpupool = NULL;
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
+ d->domain_id, cpupool_id, n_dom);
return;
}
@@ -359,7 +365,7 @@ void cpupool_cpu_add(unsigned int cpu)
return;
spin_lock(&cpupool_lock);
cpu_set(cpu, cpupool_free_cpus);
- cpupool_assign_cpu_locked(cpupool0, cpu);
+ (void)cpupool_assign_cpu_locked(cpupool0, cpu);
spin_unlock(&cpupool_lock);
return;
}
@@ -428,6 +434,8 @@ int cpupool_do_domctl(struct xen_domctl_
unsigned cpu;
cpu = op->cpu;
+ printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
+ op->cpupool_id, cpu);
spin_lock(&cpupool_lock);
if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
cpu = first_cpu(cpupool_free_cpus);
@@ -441,10 +449,11 @@ int cpupool_do_domctl(struct xen_domctl_
ret = -ENOENT;
if ( c == NULL )
goto addcpu_out;
- cpupool_assign_cpu_locked(c, cpu);
- ret = 0;
+ ret = cpupool_assign_cpu_locked(c, cpu);
addcpu_out:
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
+ op->cpupool_id, cpu, ret);
}
break;
@@ -488,23 +497,23 @@ addcpu_out:
rcu_unlock_domain(d);
break;
}
+ printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
+ d->domain_id, op->cpupool_id);
ret = -ENOENT;
spin_lock(&cpupool_lock);
c = cpupool_find_by_id(op->cpupool_id, 1);
if ( (c != NULL) && cpus_weight(c->cpu_valid) )
{
- printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
- d->domain_id, c->cpupool_id);
d->cpupool->n_dom--;
ret = sched_move_domain(d, c);
if ( ret )
d->cpupool->n_dom++;
else
c->n_dom++;
- printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
- d->domain_id, c->cpupool_id, ret);
}
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
+ d->domain_id, op->cpupool_id, ret);
rcu_unlock_domain(d);
}
break;
Index: xen-4.0.0-testing/xen/common/sched_credit.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/sched_credit.c
+++ xen-4.0.0-testing/xen/common/sched_credit.c
@@ -602,7 +602,7 @@ csched_vcpu_acct(struct csched_private *
}
static void *
-csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc)
+csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc, void *dd)
{
struct csched_vcpu *svc;
@@ -614,7 +614,7 @@ csched_alloc_vdata(struct scheduler *ops
INIT_LIST_HEAD(&svc->runq_elem);
INIT_LIST_HEAD(&svc->active_vcpu_elem);
- svc->sdom = CSCHED_DOM(vc->domain);
+ svc->sdom = dd;
svc->vcpu = vc;
atomic_set(&svc->credit, 0);
svc->flags = 0U;
@@ -778,19 +778,14 @@ csched_dom_cntl(
return 0;
}
-static int
-csched_dom_init(struct scheduler *ops, struct domain *dom)
+static void *
+csched_alloc_domdata(struct scheduler *ops, struct domain *dom)
{
struct csched_dom *sdom;
- CSCHED_STAT_CRANK(dom_init);
-
- if ( is_idle_domain(dom) )
- return 0;
-
sdom = xmalloc(struct csched_dom);
if ( sdom == NULL )
- return -ENOMEM;
+ return NULL;
memset(sdom, 0, sizeof(*sdom));
/* Initialize credit and weight */
@@ -800,16 +795,40 @@ csched_dom_init(struct scheduler *ops, s
sdom->dom = dom;
sdom->weight = CSCHED_DEFAULT_WEIGHT;
sdom->cap = 0U;
+
+ return (void *)sdom;
+}
+
+static int
+csched_dom_init(struct scheduler *ops, struct domain *dom)
+{
+ struct csched_dom *sdom;
+
+ CSCHED_STAT_CRANK(dom_init);
+
+ if ( is_idle_domain(dom) )
+ return 0;
+
+ sdom = csched_alloc_domdata(ops, dom);
+ if ( sdom == NULL )
+ return -ENOMEM;
+
dom->sched_priv = sdom;
return 0;
}
static void
+csched_free_domdata(struct scheduler *ops, void *data)
+{
+ xfree(data);
+}
+
+static void
csched_dom_destroy(struct scheduler *ops, struct domain *dom)
{
CSCHED_STAT_CRANK(dom_destroy);
- xfree(CSCHED_DOM(dom));
+ csched_free_domdata(ops, CSCHED_DOM(dom));
}
/*
@@ -1147,9 +1166,10 @@ csched_load_balance(struct csched_privat
int peer_cpu;
BUG_ON( cpu != snext->vcpu->processor );
+ online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
/* If this CPU is going offline we shouldn't steal work. */
- if ( unlikely(!cpu_online(cpu)) )
+ if ( unlikely(!cpu_isset(cpu, *online)) )
goto out;
if ( snext->pri == CSCHED_PRI_IDLE )
@@ -1163,7 +1183,6 @@ csched_load_balance(struct csched_privat
* Peek at non-idling CPUs in the system, starting with our
* immediate neighbour.
*/
- online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
cpus_andnot(workers, *online, prv->idlers);
cpu_clear(cpu, workers);
peer_cpu = cpu;
@@ -1218,31 +1237,6 @@ csched_schedule(struct scheduler *ops, s
CSCHED_STAT_CRANK(schedule);
CSCHED_VCPU_CHECK(current);
- if ( unlikely(!cpu_isset(cpu, *CSCHED_CPUONLINE(per_cpu(cpupool, cpu)))) )
- {
- /* race with switching cpu between pools: when cpu is leaving the
- pool try to schedule idle vcpu */
-
- struct list_head * iter;
-
- snext = scurr;
- if (is_idle_vcpu(current))
- goto out;
-
- if ( vcpu_runnable(current) )
- __runq_insert(cpu, scurr);
-
- list_for_each(iter, runq)
- {
- snext = __runq_elem(iter);
- if ( snext->pri == CSCHED_PRI_IDLE )
- break;
- }
- BUG_ON( snext->pri != CSCHED_PRI_IDLE );
- __runq_remove(snext);
- goto out;
- }
-
/* Update credits */
if ( !is_idle_vcpu(scurr->vcpu) )
{
@@ -1273,7 +1267,6 @@ csched_schedule(struct scheduler *ops, s
else
snext = csched_load_balance(prv, cpu, snext);
-out:
/*
* Update idlers mask if necessary. When we're idling, other CPUs
* will tickle us when they get extra work.
@@ -1553,6 +1546,8 @@ struct scheduler sched_credit_def = {
.free_vdata = csched_free_vdata,
.alloc_pdata = csched_alloc_pdata,
.free_pdata = csched_free_pdata,
+ .alloc_domdata = csched_alloc_domdata,
+ .free_domdata = csched_free_domdata,
.tick_suspend = csched_tick_suspend,
.tick_resume = csched_tick_resume,
Index: xen-4.0.0-testing/xen/common/sched_sedf.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/sched_sedf.c
+++ xen-4.0.0-testing/xen/common/sched_sedf.c
@@ -332,7 +332,7 @@ static inline void __add_to_runqueue_sor
}
-static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v)
+static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v, void *dd)
{
struct sedf_vcpu_info *inf;
@@ -415,20 +415,37 @@ static void sedf_destroy_vcpu(struct sch
sedf_free_vdata(ops, v->sched_priv);
}
+static void *
+sedf_alloc_domdata(struct scheduler *ops, struct domain *d)
+{
+ void *mem;
+
+ mem = xmalloc(struct sedf_dom_info);
+ if ( mem == NULL )
+ return NULL;
+
+ memset(mem, 0, sizeof(struct sedf_dom_info));
+
+ return mem;
+}
+
static int sedf_init_domain(struct scheduler *ops, struct domain *d)
{
- d->sched_priv = xmalloc(struct sedf_dom_info);
+ d->sched_priv = sedf_alloc_domdata(ops, d);
if ( d->sched_priv == NULL )
return -ENOMEM;
- memset(d->sched_priv, 0, sizeof(struct sedf_dom_info));
-
return 0;
}
+static void sedf_free_domdata(struct scheduler *ops, void *data)
+{
+ xfree(data);
+}
+
static void sedf_destroy_domain(struct scheduler *ops, struct domain *d)
{
- xfree(d->sched_priv);
+ sedf_free_domdata(ops, d->sched_priv);
}
static int sedf_pick_cpu(struct scheduler *ops, struct vcpu *v)
@@ -1498,6 +1515,8 @@ struct scheduler sched_sedf_def = {
.free_vdata = sedf_free_vdata,
.alloc_pdata = sedf_alloc_pdata,
.free_pdata = sedf_free_pdata,
+ .alloc_domdata = sedf_alloc_domdata,
+ .free_domdata = sedf_free_domdata,
.do_schedule = sedf_do_schedule,
.pick_cpu = sedf_pick_cpu,
Index: xen-4.0.0-testing/xen/common/schedule.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/schedule.c
+++ xen-4.0.0-testing/xen/common/schedule.c
@@ -222,7 +222,7 @@ int sched_init_vcpu(struct vcpu *v, unsi
return 1;
}
- v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v);
+ v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
if ( v->sched_priv == NULL )
return 1;
@@ -237,14 +237,23 @@ int sched_move_domain(struct domain *d,
struct vcpu *v;
unsigned int new_p;
void **vcpu_priv;
+ void *domdata;
+
+ domdata = SCHED_OP(&(c->sched), alloc_domdata, d);
+ if ( domdata == NULL )
+ return -ENOMEM;
vcpu_priv = xmalloc_array(void *, d->max_vcpus);
if ( vcpu_priv == NULL )
+ {
+ SCHED_OP(&(c->sched), free_domdata, domdata);
return -ENOMEM;
+ }
+
memset(vcpu_priv, 0, d->max_vcpus * sizeof(void *));
for_each_vcpu ( d, v )
{
- vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v);
+ vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v, domdata);
if ( vcpu_priv[v->vcpu_id] == NULL )
{
for_each_vcpu ( d, v )
@@ -253,6 +262,7 @@ int sched_move_domain(struct domain *d,
xfree(vcpu_priv[v->vcpu_id]);
}
xfree(vcpu_priv);
+ SCHED_OP(&(c->sched), free_domdata, domdata);
return -ENOMEM;
}
}
@@ -276,6 +286,8 @@ int sched_move_domain(struct domain *d,
}
d->cpupool = c;
+ SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
+ d->sched_priv = domdata;
domain_unpause(d);
@@ -1079,7 +1091,7 @@ void schedule_cpu_switch(unsigned int cp
v = per_cpu(schedule_data, cpu).idle;
ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
if ( c != NULL )
- vpriv = SCHED_OP(new_ops, alloc_vdata, v);
+ vpriv = SCHED_OP(new_ops, alloc_vdata, v, v->domain->sched_priv);
spin_lock_irqsave(&per_cpu(schedule_data, cpu).schedule_lock, flags);
Index: xen-4.0.0-testing/xen/include/xen/sched-if.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/xen/sched-if.h
+++ xen-4.0.0-testing/xen/include/xen/sched-if.h
@@ -78,9 +78,12 @@ struct scheduler {
void (*deinit) (struct scheduler *);
void (*free_vdata) (struct scheduler *, void *);
- void * (*alloc_vdata) (struct scheduler *, struct vcpu *);
+ void * (*alloc_vdata) (struct scheduler *, struct vcpu *,
+ void *);
void (*free_pdata) (struct scheduler *, void *, int);
void * (*alloc_pdata) (struct scheduler *, int);
+ void (*free_domdata) (struct scheduler *, void *);
+ void * (*alloc_domdata) (struct scheduler *, struct domain *);
int (*init_domain) (struct scheduler *, struct domain *);
void (*destroy_domain) (struct scheduler *, struct domain *);
@@ -109,7 +112,6 @@ struct cpupool
cpumask_t cpu_valid; /* all cpus assigned to pool */
struct cpupool *next;
unsigned int n_dom;
- int cpu_in_transit; /* used for adding/removing cpus */
struct scheduler sched;
};

View File

@ -204,7 +204,7 @@ Index: xen-4.0.0-testing/tools/libxc/xc_domain.c
memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
return err;
@@ -206,6 +213,7 @@ int xc_domain_getinfo(int xc_handle,
@@ -220,6 +227,7 @@ int xc_domain_getinfo(int xc_handle,
info->cpu_time = domctl.u.getdomaininfo.cpu_time;
info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
@ -257,7 +257,7 @@ Index: xen-4.0.0-testing/tools/libxc/xenctrl.h
/* Functions to produce a dump of a given domain
@@ -500,6 +501,100 @@ int xc_domain_setdebugging(int xc_handle
@@ -502,6 +503,100 @@ int xc_domain_setdebugging(int xc_handle
unsigned int enable);
/*

View File

@ -36,7 +36,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
return pyxc_error_to_exception();
if ( target )
@@ -316,7 +318,7 @@ static PyObject *pyxc_domain_getinfo(XcO
@@ -332,7 +334,7 @@ static PyObject *pyxc_domain_getinfo(XcO
{
info_dict = Py_BuildValue(
"{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
@ -45,7 +45,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
"domid", (int)info[i].domid,
"online_vcpus", info[i].nr_online_vcpus,
"max_vcpu_id", info[i].max_vcpu_id,
@@ -331,7 +333,8 @@ static PyObject *pyxc_domain_getinfo(XcO
@@ -347,7 +349,8 @@ static PyObject *pyxc_domain_getinfo(XcO
"cpu_time", (long long)info[i].cpu_time,
"maxmem_kb", (long long)info[i].max_memkb,
"ssidref", (int)info[i].ssidref,
@ -55,7 +55,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
pyhandle = PyList_New(sizeof(xen_domain_handle_t));
if ( (pyhandle == NULL) || (info_dict == NULL) )
{
@@ -1697,6 +1700,179 @@ static PyObject *pyxc_dom_set_memshr(XcO
@@ -1755,6 +1758,179 @@ static PyObject *pyxc_dom_set_memshr(XcO
return zero;
}
@ -235,7 +235,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
static PyMethodDef pyxc_methods[] = {
{ "handle",
@@ -1812,7 +1988,8 @@ static PyMethodDef pyxc_methods[] = {
@@ -1870,7 +2046,8 @@ static PyMethodDef pyxc_methods[] = {
" maxmem_kb [int]: Maximum memory limit, in kilobytes\n"
" cpu_time [long]: CPU time consumed, in nanoseconds\n"
" shutdown_reason [int]: Numeric code from guest OS, explaining "
@ -245,7 +245,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
{ "vcpu_getinfo",
(PyCFunction)pyxc_vcpu_getinfo,
@@ -2210,6 +2387,66 @@ static PyMethodDef pyxc_methods[] = {
@@ -2268,6 +2445,66 @@ static PyMethodDef pyxc_methods[] = {
" enable [int,0|1]: Disable or enable?\n"
"Returns: [int] 0 on success; -1 on error.\n" },
@ -1489,15 +1489,15 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
}
LEGACY_CFG_TO_XENAPI_CFG = reverse_dict(XENAPI_CFG_TO_LEGACY_CFG)
@@ -233,6 +234,7 @@ XENAPI_CFG_TYPES = {
's3_integrity' : int,
@@ -234,6 +235,7 @@ XENAPI_CFG_TYPES = {
'superpages' : int,
'memory_sharing': int,
'Description': str,
+ 'pool_name' : str,
}
# List of legacy configuration keys that have no equivalent in the
@@ -278,6 +280,7 @@ LEGACY_CFG_TYPES = {
@@ -279,6 +281,7 @@ LEGACY_CFG_TYPES = {
'bootloader': str,
'bootloader_args': str,
'description': str,
@ -1505,7 +1505,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
}
# Values that should be stored in xenstore's /vm/<uuid> that is used
@@ -299,6 +302,7 @@ LEGACY_XENSTORE_VM_PARAMS = [
@@ -300,6 +303,7 @@ LEGACY_XENSTORE_VM_PARAMS = [
'on_xend_stop',
'bootloader',
'bootloader_args',
@ -1513,7 +1513,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
]
##
@@ -407,6 +411,7 @@ class XendConfig(dict):
@@ -408,6 +412,7 @@ class XendConfig(dict):
'other_config': {},
'platform': {},
'target': 0,

View File

@ -6,12 +6,13 @@
- move and rename csched_priv to make sure eventual backported
upstream patches using the variable get correctly adjusted (i.e.
missing adjustments get detected at build time)
- remove boot time per-CPU pool assignment messages (bnc#572146)
Index: xen-4.0.0-testing/xen/arch/x86/domain.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/domain.c
+++ xen-4.0.0-testing/xen/arch/x86/domain.c
@@ -1573,6 +1573,7 @@ int continue_hypercall_on_cpu(int cpu, v
@@ -1580,6 +1580,7 @@ int continue_hypercall_on_cpu(int cpu, v
v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
v->arch.continue_info = info;
@ -19,7 +20,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
}
else
{
@@ -1583,7 +1584,6 @@ int continue_hypercall_on_cpu(int cpu, v
@@ -1590,7 +1591,6 @@ int continue_hypercall_on_cpu(int cpu, v
info->func = func;
info->data = data;

View File

@ -4,7 +4,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/acpi/power.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/acpi/power.c
+++ xen-4.0.0-testing/xen/arch/x86/acpi/power.c
@@ -229,7 +229,7 @@ static int enter_state(u32 state)
@@ -231,7 +231,7 @@ static int enter_state(u32 state)
return error;
}
@ -13,7 +13,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/acpi/power.c
{
struct acpi_sleep_info *sinfo = (struct acpi_sleep_info *)data;
return enter_state(sinfo->sleep_state);
@@ -260,7 +260,7 @@ int acpi_enter_sleep(struct xenpf_enter_
@@ -262,7 +262,7 @@ int acpi_enter_sleep(struct xenpf_enter_
acpi_sinfo.pm1b_cnt_val = sleep->pm1b_cnt_val;
acpi_sinfo.sleep_state = sleep->sleep_state;
@ -26,7 +26,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/domain.c
+++ xen-4.0.0-testing/xen/arch/x86/domain.c
@@ -1510,42 +1510,52 @@ void sync_vcpu_execstate(struct vcpu *v)
@@ -1517,42 +1517,52 @@ void sync_vcpu_execstate(struct vcpu *v)
}
struct migrate_info {
@ -96,7 +96,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
info = v->arch.continue_info;
if ( info == NULL )
@@ -1554,16 +1564,12 @@ int continue_hypercall_on_cpu(int cpu, l
@@ -1561,16 +1571,12 @@ int continue_hypercall_on_cpu(int cpu, l
if ( info == NULL )
return -ENOMEM;
@ -117,7 +117,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
v->arch.continue_info = info;
@@ -1571,17 +1577,17 @@ int continue_hypercall_on_cpu(int cpu, l
@@ -1578,17 +1584,17 @@ int continue_hypercall_on_cpu(int cpu, l
else
{
BUG_ON(info->nest != 0);
@ -160,7 +160,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain_build.c
if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
opt_dom0_max_vcpus = MAX_VIRT_CPUS;
@@ -248,7 +249,7 @@ int __init construct_dom0(
@@ -277,7 +278,7 @@ int __init construct_dom0(
unsigned long _initrd_start, unsigned long initrd_len,
char *cmdline)
{
@ -169,7 +169,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain_build.c
struct cpu_user_regs *regs;
unsigned long pfn, mfn;
unsigned long nr_pages;
@@ -757,8 +758,12 @@ int __init construct_dom0(
@@ -776,8 +777,12 @@ int __init construct_dom0(
printk("Dom0 has maximum %u VCPUs\n", opt_dom0_max_vcpus);
@ -372,7 +372,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
*/
int alloc_cpu_id(void)
{
@@ -984,10 +984,10 @@ static int __devinit do_boot_cpu(int api
@@ -985,10 +985,10 @@ static int __devinit do_boot_cpu(int api
cpucount--;
/* Mark the CPU as non-present */
@ -430,7 +430,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
if (cpu_online(cpu)) {
printk("Bring up a online cpu. Bogus!\n");
err = -EBUSY;
@@ -1398,7 +1400,7 @@ int cpu_up(unsigned int cpu)
@@ -1400,7 +1402,7 @@ int cpu_up(unsigned int cpu)
out:
if (!err)
send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
@ -439,7 +439,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
return err;
}
@@ -1479,13 +1481,13 @@ int cpu_add(uint32_t apic_id, uint32_t a
@@ -1481,13 +1483,13 @@ int cpu_add(uint32_t apic_id, uint32_t a
if ( physid_isset(apic_id, phys_cpu_present_map) )
return -EEXIST;
@ -455,7 +455,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
return cpu;
}
@@ -1502,7 +1504,7 @@ int cpu_add(uint32_t apic_id, uint32_t a
@@ -1504,7 +1506,7 @@ int cpu_add(uint32_t apic_id, uint32_t a
"Setup node failed for pxm %x\n", pxm);
x86_acpiid_to_apicid[acpi_id] = 0xff;
mp_unregister_lapic(apic_id, cpu);
@ -464,7 +464,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
return node;
}
apicid_to_node[apic_id] = node;
@@ -1510,7 +1512,7 @@ int cpu_add(uint32_t apic_id, uint32_t a
@@ -1512,7 +1514,7 @@ int cpu_add(uint32_t apic_id, uint32_t a
srat_detect_node(cpu);
numa_add_cpu(cpu);
@ -473,7 +473,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
dprintk(XENLOG_INFO, "Add CPU %x with index %x\n", apic_id, cpu);
return cpu;
}
@@ -1554,6 +1556,7 @@ int __devinit __cpu_up(unsigned int cpu)
@@ -1556,6 +1558,7 @@ int __devinit __cpu_up(unsigned int cpu)
process_pending_softirqs();
}

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/domain.c
+++ xen-4.0.0-testing/xen/arch/x86/domain.c
@@ -143,7 +143,7 @@ void dump_pageframe_info(struct domain *
@@ -144,7 +144,7 @@ void dump_pageframe_info(struct domain *
printk("Memory pages belonging to domain %u:\n", d->domain_id);
@ -11,7 +11,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
{
printk(" DomPage list too long to display\n");
}
@@ -151,6 +151,15 @@ void dump_pageframe_info(struct domain *
@@ -152,6 +152,15 @@ void dump_pageframe_info(struct domain *
{
page_list_for_each ( page, &d->page_list )
{

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c
+++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
@@ -917,16 +917,16 @@ static PyObject *pyxc_hvm_build(XcObject
@@ -950,16 +950,16 @@ static PyObject *pyxc_hvm_build(XcObject
#endif
int i;
char *image;
@ -24,7 +24,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
return NULL;
memset(vcpu_avail, 0, sizeof(vcpu_avail));
@@ -978,6 +978,7 @@ static PyObject *pyxc_hvm_build(XcObject
@@ -1011,6 +1011,7 @@ static PyObject *pyxc_hvm_build(XcObject
va_hvm->checksum -= sum;
munmap(va_map, XC_PAGE_SIZE);
#endif

View File

@ -1,66 +0,0 @@
In domain_create, previously we reserve 1M memory for domain creation (as
described in xend comment), and these memory SHOULD NOT related with vcpu
number. And later, shadow_mem_control() will modify the shadow size to 256
pages per vcpu (also plus some other values related with guest memory size...).
Therefore the C/S 20389 which modifies 1M to 4M to fit more vcpu number is
wrong. I'm sorry for that.
Following is the reason why currently 1M doesn't work for big number vcpus,
as we mentioned, it caused Xen crash.
Each time when sh_set_allocation() is called, it checks whether
shadow_min_acceptable_pages() has been allocated, if not, it will allocate
them. That is to say, it is 128 pages per vcpu. But before we define
d->max_vcpu, guest vcpu hasn't been initialized, so
shadow_min_acceptable_pages() always returns 0. Therefore we only allocated 1M
shadow memory for domain_create, and didn't satisfy 128 pages per vcpu for
alloc_vcpu().
As we know, vcpu allocation is done in the hypercall of
XEN_DOMCTL_max_vcpus. However, at this point we haven't called
shadow_mem_control() and are still using the pre-allocated 1M shadow memory to
allocate so many vcpus. So it should be a BUG. Therefore when vcpu number
increases, 1M is not enough and causes Xen crash. C/S 20389 exposes this issue.
So I think the right process should be, after d->max_vcpu is set and before
alloc_vcpu(), we should call sh_set_allocation() to satisfy 128 pages per vcpu.
The following patch does this work. Is it work for you? Thanks!
Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
Index: xen-4.0.0-testing/xen/arch/x86/mm/shadow/common.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/mm/shadow/common.c
+++ xen-4.0.0-testing/xen/arch/x86/mm/shadow/common.c
@@ -41,6 +41,9 @@
DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags);
+static unsigned int sh_set_allocation(struct domain *d,
+ unsigned int pages,
+ int *preempted);
/* Set up the shadow-specific parts of a domain struct at start of day.
* Called for every domain from arch_domain_create() */
void shadow_domain_init(struct domain *d, unsigned int domcr_flags)
@@ -82,6 +85,12 @@ void shadow_vcpu_init(struct vcpu *v)
}
#endif
+ if ( !is_idle_domain(v->domain) )
+ {
+ shadow_lock(v->domain);
+ sh_set_allocation(v->domain, 128, NULL);
+ shadow_unlock(v->domain);
+ }
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
}
@@ -3100,7 +3109,7 @@ int shadow_enable(struct domain *d, u32
{
unsigned int r;
shadow_lock(d);
- r = sh_set_allocation(d, 1024, NULL); /* Use at least 4MB */
+ r = sh_set_allocation(d, 256, NULL); /* Use at least 1MB */
if ( r != 0 )
{
sh_set_allocation(d, 0, NULL);

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/blktap/drivers/blktapctrl.c
===================================================================
--- xen-4.0.0-testing.orig/tools/blktap/drivers/blktapctrl.c
+++ xen-4.0.0-testing/tools/blktap/drivers/blktapctrl.c
@@ -347,6 +347,7 @@ static int write_msg(int fd, int msgtype
@@ -348,6 +348,7 @@ static int write_msg(int fd, int msgtype
msg_dev = (msg_newdev_t *)(buf + sizeof(msg_hdr_t));
msg_dev->devnum = blkif->minor;
msg_dev->domid = blkif->domid;

View File

@ -325,9 +325,9 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
@@ -235,6 +235,7 @@ XENAPI_CFG_TYPES = {
'superpages' : int,
@@ -236,6 +236,7 @@ XENAPI_CFG_TYPES = {
'memory_sharing': int,
'Description': str,
'pool_name' : str,
+ 'snapshotname': str,
}

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:93bf258a35751639f506e7a46ef6056f17bdea18c0b31823da3f562523c7d44a
size 23112538
oid sha256:ecdd97da2f63df66d5e965f8ba11481422332cd01f70521d67bcf62d207f5d61
size 23203635

View File

@ -9,7 +9,7 @@ Index: xen-4.0.0-testing/Config.mk
-CONFIG_QEMU ?= $(QEMU_REMOTE)
+CONFIG_QEMU ?= ioemu-remote
QEMU_TAG := xen-4.0.0-rc5
QEMU_TAG := xen-4.0.0-rc6
#QEMU_TAG ?= e5d14857cd67490bf956d97c8888c0be95ed3f78
@@ -167,9 +167,9 @@ CONFIG_OCAML_XENSTORED ?= n
# Optional components

View File

@ -165,7 +165,7 @@ Index: xen-4.0.0-testing/tools/blktap2/drivers/block-remus.c
===================================================================
--- xen-4.0.0-testing.orig/tools/blktap2/drivers/block-remus.c
+++ xen-4.0.0-testing/tools/blktap2/drivers/block-remus.c
@@ -1578,7 +1578,7 @@ static int tdremus_open(td_driver_t *dri
@@ -1579,7 +1579,7 @@ static int tdremus_open(td_driver_t *dri
td_flag_t flags)
{
struct tdremus_state *s = (struct tdremus_state *)driver->data;

View File

@ -1,3 +1,19 @@
-------------------------------------------------------------------
Wed Mar 17 16:42:20 CST 2010 - jsong@novell.com
-Fix bnc#466899 - numa enabled xen fails to start/create vms
adjust_vcpuaffinity_more_cpu.patch
-------------------------------------------------------------------
Tue Mar 9 16:28:59 MST 2010 - carnold@novell.com
- Update to changeset 21022 Xen 4.0.0 RC6.
-------------------------------------------------------------------
Tue Mar 9 10:43:27 MST 2010 - carnold@novell.com
- bnc#586510 - cpupool fixes
cpu-pools-update.patch
-------------------------------------------------------------------
Fri Mar 5 09:04:18 MST 2010 - carnold@novell.com

View File

@ -1,5 +1,5 @@
#
# spec file for package xen (Version 4.0.0_21010_01)
# spec file for package xen (Version 4.0.0_21046_01)
#
# Copyright (c) 2010 SUSE LINUX Products GmbH, Nuernberg, Germany.
#
@ -22,7 +22,7 @@ Name: xen
ExclusiveArch: %ix86 x86_64
%define xvers 4.0
%define xvermaj 4
%define changeset 21010
%define changeset 21046
%define xen_build_dir xen-4.0.0-testing
%define with_kmp 0
BuildRequires: LibVNCServer-devel SDL-devel acpica automake bin86 curl-devel dev86 graphviz latex2html libjpeg-devel libxml2-devel ncurses-devel openssl openssl-devel pciutils-devel python-devel texinfo transfig
@ -37,7 +37,7 @@ BuildRequires: glibc-32bit glibc-devel-32bit
%if %{?with_kmp}0
BuildRequires: kernel-source kernel-syms module-init-tools xorg-x11
%endif
Version: 4.0.0_21010_01
Version: 4.0.0_21046_01
Release: 1
License: GPLv2
Group: System/Kernel
@ -91,6 +91,7 @@ Patch313: xen-xm-top-needs-root.diff
Patch314: xen-max-free-mem.diff
Patch315: xen-disable-libxl.diff
Patch316: xen-disable-xenpaging.diff
Patch317: xen-extra-fixes.patch
Patch320: block-losetup-retry.diff
Patch321: block-flags.diff
Patch322: bridge-opensuse.patch
@ -132,7 +133,7 @@ Patch366: cpu-pools-python.patch
Patch367: cpu-pools-libxen.patch
Patch368: cpu-pools-xmtest.patch
Patch369: cpu-pools-docs.patch
Patch370: xen-extra-fixes.patch
Patch370: cpu-pools-fixes.patch
# Patches for snapshot support
Patch400: snapshot-ioemu-save.patch
Patch401: snapshot-ioemu-restore.patch
@ -154,8 +155,7 @@ Patch424: ioemu-7615-qcow2-fix-alloc_cluster_link_l2.patch
Patch425: ioemu-bdrv-open-CACHE_WB.patch
Patch426: xen-ioemu-hvm-pv-support.diff
Patch427: qemu-dm-segfault.patch
Patch428: shadow.patch
Patch429: hibernate.patch
Patch428: hibernate.patch
# Jim's domain lock patch
Patch450: xend-domain-lock.patch
# Hypervisor and PV driver Patches
@ -531,6 +531,7 @@ Authors:
%patch314 -p1
%patch315 -p1
%patch316 -p1
%patch317 -p1
#%patch320 -p1
#%patch321 -p1
%patch322 -p1
@ -591,7 +592,6 @@ Authors:
%patch426 -p1
%patch427 -p1
%patch428 -p1
%patch429 -p1
%patch450 -p1
%patch500 -p1
%patch501 -p1