Accepting request 35384 from Virtualization

checked in (request 35384)

OBS-URL: https://build.opensuse.org/request/show/35384
OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=36
This commit is contained in:
OBS User autobuild 2010-03-21 10:57:54 +00:00 committed by Git OBS Bridge
parent a9bb3bc2f5
commit daed39e878
27 changed files with 311 additions and 930 deletions

View File

@ -673,10 +673,10 @@ Index: xen-4.0.0-testing/tools/blktap/drivers/Makefile
MEMSHRLIBS += $(MEMSHR_DIR)/libmemshr.a
endif
-LDFLAGS_blktapctrl := $(LDFLAGS_libxenctrl) $(LDFLAGS_libxenstore) $(MEMSHRLIBS) -L../lib -lblktap -lrt -lm -lpthread
-LDFLAGS_blktapctrl := $(LDFLAGS_libxenctrl) $(LDFLAGS_libxenstore) $(MEMSHRLIBS) -L../lib -lblktap -lrt -lm
-LDFLAGS_img := $(LIBAIO_DIR)/libaio.a $(CRYPT_LIB) -lpthread -lz
+LDFLAGS_xen := $(LDFLAGS_libxenctrl) $(LDFLAGS_libxenstore)
+LDFLAGS_blktapctrl := $(LDFLAGS_xen) $(MEMSHRLIBS) -L../lib -lblktap -lrt -lm -lpthread
+LDFLAGS_blktapctrl := $(LDFLAGS_xen) $(MEMSHRLIBS) -L../lib -lblktap -lrt -lm
+LDFLAGS_img := $(LIBAIO_DIR)/libaio.a $(CRYPT_LIB) -lpthread -lz $(LDFLAGS_xen)
BLK-OBJS-y := block-aio.o

View File

@ -2,9 +2,9 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
===================================================================
--- xen-4.0.0-testing.orig/tools/hotplug/Linux/network-bridge
+++ xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
@@ -245,6 +245,9 @@ op_start () {
claim_lock "network-bridge"
@@ -241,6 +241,9 @@ op_start () {
return
fi
+ local bonded=""
+ [ -e /sys/class/net/${netdev}/bonding ] && bonded="yes"
@ -12,7 +12,7 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
vlans=$(find_active_vlans "${netdev}")
for vlan in $vlans ; do ifdown $vlan ; done
@@ -262,18 +265,32 @@ op_start () {
@@ -258,18 +261,32 @@ op_start () {
ip link set ${netdev} down
ip addr flush ${netdev}
fi

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
===================================================================
--- xen-4.0.0-testing.orig/tools/hotplug/Linux/network-bridge
+++ xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
@@ -270,19 +270,19 @@ op_stop () {
@@ -262,18 +262,18 @@ op_stop () {
transfer_addrs ${bridge} ${pdev}
if ! ifdown ${bridge}; then
get_ip_info ${bridge}
@ -24,9 +24,8 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
+ ip link set ${pdev} down
ip link set ${pdev} name ${netdev}
do_ifup ${netdev}
- brctl delbr ${tdev}
-
release_lock "network-bridge"
- brctl delbr ${tdev}
}
# adds $dev to $bridge but waits for $dev to be in running state first

View File

@ -1,8 +1,8 @@
Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
Index: xen-3.5.0-testing/tools/hotplug/Linux/network-bridge
===================================================================
--- xen-4.0.0-testing.orig/tools/hotplug/Linux/network-bridge
+++ xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
@@ -253,6 +253,11 @@ op_start () {
--- xen-3.5.0-testing.orig/tools/hotplug/Linux/network-bridge
+++ xen-3.5.0-testing/tools/hotplug/Linux/network-bridge
@@ -249,6 +249,11 @@ op_start () {
create_bridge ${tdev}
@ -14,7 +14,7 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
preiftransfer ${netdev}
transfer_addrs ${netdev} ${tdev}
# Remember slaves for bonding interface.
@@ -330,6 +335,13 @@ op_stop () {
@@ -322,6 +327,13 @@ op_stop () {
ip link set ${pdev} name ${netdev}
do_ifup ${netdev}
@ -26,5 +26,5 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
+ fi
+
for vlan in $vlans ; do ifup $vlan ; done
}
release_lock "network-bridge"

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
===================================================================
--- xen-4.0.0-testing.orig/tools/hotplug/Linux/network-bridge
+++ xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
@@ -193,6 +193,28 @@ antispoofing () {
@@ -191,6 +191,28 @@ antispoofing () {
iptables -A FORWARD -m physdev --physdev-in ${pdev} -j ACCEPT
}
@ -31,9 +31,9 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
# Usage: show_status dev bridge
# Print ifconfig and routes.
show_status () {
@@ -223,6 +245,9 @@ op_start () {
claim_lock "network-bridge"
@@ -219,6 +241,9 @@ op_start () {
return
fi
+ vlans=$(find_active_vlans "${netdev}")
+ for vlan in $vlans ; do ifdown $vlan ; done
@ -41,7 +41,7 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
create_bridge ${tdev}
preiftransfer ${netdev}
@@ -250,6 +275,8 @@ op_start () {
@@ -246,6 +271,8 @@ op_start () {
add_to_bridge2 ${bridge} ${pdev}
do_ifup ${bridge}
@ -50,9 +50,9 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
if [ ${antispoof} = 'yes' ] ; then
antispoofing
fi
@@ -267,6 +294,9 @@ op_stop () {
claim_lock "network-bridge"
@@ -259,6 +286,9 @@ op_stop () {
return
fi
+ vlans=$(find_active_vlans "${netdev}")
+ for vlan in $vlans ; do ifdown $vlan ; done
@ -60,12 +60,12 @@ Index: xen-4.0.0-testing/tools/hotplug/Linux/network-bridge
transfer_addrs ${bridge} ${pdev}
if ! ifdown ${bridge}; then
get_ip_info ${bridge}
@@ -283,6 +313,8 @@ op_stop () {
@@ -274,6 +304,8 @@ op_stop () {
ip link set ${pdev} down
ip link set ${pdev} name ${netdev}
do_ifup ${netdev}
+ for vlan in $vlans ; do ifup $vlan ; done
+
release_lock "network-bridge"
+ for vlan in $vlans ; do ifup $vlan ; done
}
# adds $dev to $bridge but waits for $dev to be in running state first

View File

@ -60,7 +60,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/configure
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/configure
+++ xen-4.0.0-testing/tools/ioemu-remote/configure
@@ -1511,7 +1511,7 @@ bsd)
@@ -1508,7 +1508,7 @@ bsd)
;;
esac

View File

@ -1,773 +0,0 @@
Index: xen-4.0.0-testing/docs/xen-api/revision-history.tex
===================================================================
--- xen-4.0.0-testing.orig/docs/xen-api/revision-history.tex
+++ xen-4.0.0-testing/docs/xen-api/revision-history.tex
@@ -54,7 +54,7 @@
Added definitions of new classes cpu\_pool. Updated the table
and the diagram representing relationships between classes.
Added fields host.resident\_cpu\_pools, VM.cpu\_pool and
- host\_cpu.cpu\_pool.
+ host\_cpu.cpu\_pool.\tabularnewline
\hline
\end{tabular}
\end{center}
Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendCPUPool.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
@@ -547,7 +547,7 @@ class XendCPUPool(XendBase):
def pool_start(cls, poolname):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
pool.activate()
except XendAPIError, ex:
@@ -566,8 +566,12 @@ class XendCPUPool(XendBase):
for cpu_ref in pool_vals['host_CPUs'] ]
cpus.sort()
pool_vals['host_CPU_numbers'] = cpus
- vm_names = [ xd.get_vm_by_uuid(uuid).getName()
- for uuid in pool_vals['started_VMs'] ]
+ # query VMs names. Take in account, that a VM
+ # returned by get_all_records could be destroy, now
+ vm_names = [ vm.getName()
+ for vm in map(xd.get_vm_by_uuid,
+ pool_vals['started_VMs'])
+ if vm ]
pool_vals['started_VM_names'] = vm_names
pool_vals['auto_power_on'] = int(pool_vals['auto_power_on'])
sxprs += [[pool_uuid] + map2sxp(pool_vals)]
@@ -578,7 +582,7 @@ class XendCPUPool(XendBase):
def pool_destroy(cls, poolname):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
pool.deactivate()
if not pool.is_managed():
@@ -589,7 +593,7 @@ class XendCPUPool(XendBase):
def pool_delete(cls, poolname):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
pool.destroy()
except XendAPIError, ex:
@@ -598,28 +602,28 @@ class XendCPUPool(XendBase):
def pool_cpu_add(cls, poolname, cpu):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
cpu_ref = cls._cpu_number_to_ref(int(cpu))
if cpu_ref:
pool.add_host_CPU_live(cpu_ref)
else:
raise PoolError(XEND_ERROR_INVALID_CPU,
- 'CPU unkown')
+ 'CPU unknown')
except XendAPIError, ex:
raise VmError(ex.get_api_error())
def pool_cpu_remove(cls, poolname, cpu):
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
try:
cpu_ref = cls._cpu_number_to_ref(int(cpu))
if cpu_ref:
pool.remove_host_CPU_live(cpu_ref)
else:
raise PoolError(XEND_ERROR_INVALID_CPU,
- 'CPU unkown')
+ 'CPU unknown')
except XendAPIError, ex:
raise VmError(ex.get_api_error())
@@ -627,10 +631,10 @@ class XendCPUPool(XendBase):
dom = XendDomain.instance()
pool = cls.lookup_pool(poolname)
if not pool:
- raise VmError('unkown pool %s' % poolname)
+ raise VmError('unknown pool %s' % poolname)
dominfo = dom.domain_lookup_nr(domname)
if not dominfo:
- raise VmError('unkown domain %s' % domname)
+ raise VmError('unknown domain %s' % domname)
domid = dominfo.getDomid()
if domid is not None:
if domid == 0:
@@ -860,8 +864,11 @@ class XendCPUPool(XendBase):
pool_uuid = None
try:
pool_id = int(id_or_name)
- # pool id given
+ # pool id given ?
pool_uuid = cls.query_pool_ref(pool_id)
+ if not pool_uuid:
+ # not found -> search name
+ pool_uuid = cls.get_by_name_label(id_or_name)
except ValueError:
# pool name given
pool_uuid = cls.get_by_name_label(id_or_name)
Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -2574,7 +2574,7 @@ class XendDomainInfo:
pool = XendCPUPool.lookup_pool(pool_name)
if pool is None:
- raise VmError("unkown pool %s" % pool_name)
+ raise VmError("unknown pool %s" % pool_name)
pool_id = pool.query_pool_id()
if pool_id is None:
raise VmError("pool %s not activated" % pool_name)
Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
@@ -3515,7 +3515,7 @@ def get_pool_ref(name):
if len(refs) > 0:
return refs[0]
else:
- err('unkown pool name')
+ err('unknown pool name')
sys.exit(1)
def xm_pool_start(args):
@@ -3643,7 +3643,7 @@ def xm_pool_cpu_add(args):
cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
if c_rec['number'] == args[1] ]
if len(cpu_ref) == 0:
- err('cpu number unkown')
+ err('cpu number unknown')
else:
server.xenapi.cpu_pool.add_host_CPU_live(ref, cpu_ref[0])
else:
@@ -3657,7 +3657,7 @@ def xm_pool_cpu_remove(args):
cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
if c_rec['number'] == args[1] ]
if len(cpu_ref) == 0:
- err('cpu number unkown')
+ err('cpu number unknown')
else:
server.xenapi.cpu_pool.remove_host_CPU_live(ref, cpu_ref[0])
else:
Index: xen-4.0.0-testing/xen/common/cpupool.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/cpupool.c
+++ xen-4.0.0-testing/xen/common/cpupool.c
@@ -29,6 +29,9 @@ static struct cpupool *cpupool_list;
static int cpupool0_max_cpus;
integer_param("pool0_max_cpus", cpupool0_max_cpus);
+static int cpupool_moving_cpu = -1;
+static struct cpupool *cpupool_cpu_moving = NULL;
+
/* cpupool lock: be carefull, this lock is sometimes released on another cpu
* as it was obtained!
*/
@@ -104,7 +107,6 @@ struct cpupool *cpupool_create(int pooli
}
*q = c;
c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
- c->cpu_in_transit = -1;
if ( schedule_init_global(sched, &(c->sched)) )
{
spin_unlock(&cpupool_lock);
@@ -151,16 +153,20 @@ int cpupool_destroy(struct cpupool *c)
* assign a specific cpu to a cpupool
* cpupool_lock must be held
*/
-static void cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
+static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
{
- printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
- c->cpupool_id, cpu);
+ if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
+ return -EBUSY;
per_cpu(cpupool, cpu) = c;
schedule_cpu_switch(cpu, c);
cpu_clear(cpu, cpupool_free_cpus);
+ if (cpupool_moving_cpu == cpu)
+ {
+ cpupool_moving_cpu = -1;
+ cpupool_cpu_moving = NULL;
+ }
cpu_set(cpu, c->cpu_valid);
- printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ready\n",
- c->cpupool_id, cpu);
+ return 0;
}
/*
@@ -177,8 +183,8 @@ int cpupool_assign_ncpu(struct cpupool *
spin_lock(&cpupool_lock);
for_each_cpu_mask(i, cpupool_free_cpus)
{
- cpupool_assign_cpu_locked(c, i);
- n++;
+ if ( cpupool_assign_cpu_locked(c, i) == 0 )
+ n++;
if ( n == ncpu )
break;
}
@@ -188,43 +194,25 @@ int cpupool_assign_ncpu(struct cpupool *
return n;
}
-static void cpupool_unassign_cpu_locked_1(struct cpupool *c, unsigned int cpu)
-{
- printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
- c->cpupool_id, cpu);
- c->cpu_in_transit = cpu;
-}
-
-static int cpupool_unassign_cpu_locked_2(struct cpupool *c)
+static long cpupool_unassign_cpu_helper(void *hdl, void *info)
{
- int cpu = c->cpu_in_transit;
- int ret;
+ struct cpupool *c = (struct cpupool *)info;
+ int cpu = cpupool_moving_cpu;
+ long ret;
+ int cpupool_id = c->cpupool_id;
- c->cpu_in_transit = -1;
- cpu_clear(cpu, c->cpu_valid);
ret = cpu_disable_scheduler(cpu, 1);
- if ( ret )
- {
- cpu_set(cpu, c->cpu_valid);
- }
- else
+ cpu_set(cpu, cpupool_free_cpus);
+ if ( !ret )
{
- cpu_set(cpu, cpupool_free_cpus);
schedule_cpu_switch(cpu, NULL);
per_cpu(cpupool, cpu) = NULL;
+ cpupool_moving_cpu = -1;
+ cpupool_cpu_moving = NULL;
}
- printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
- c->cpupool_id, cpu, ret);
- return ret;
-}
-
-static long cpupool_unassign_cpu_helper(void *hdl, void *info)
-{
- struct cpupool *c = (struct cpupool *)info;
- long ret;
-
- ret = cpupool_unassign_cpu_locked_2(c);
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %ld\n",
+ cpupool_id, cpu, ret);
return ret;
}
@@ -242,16 +230,23 @@ static long cpupool_unassign_cpu_helper(
int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
{
int work_cpu;
- int rc = 0;
+ int ret;
struct domain *d;
+ int cpupool_id = c->cpupool_id;
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
+ cpupool_id, cpu);
spin_lock(&cpupool_lock);
- if ( !cpu_isset(cpu, c->cpu_valid) )
- {
- spin_unlock(&cpupool_lock);
- return 0;
- }
- if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) )
+ ret = -EBUSY;
+ if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
+ goto out;
+
+ ret = 0;
+ if ( !cpu_isset(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
+ goto out;
+
+ if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) &&
+ (cpu != cpupool_moving_cpu) )
{
for_each_domain(d)
{
@@ -259,27 +254,24 @@ int cpupool_unassign_cpu(struct cpupool
continue;
if ( !d->is_dying )
{
- rc = -EBUSY;
+ ret = -EBUSY;
break;
}
- printk(XENLOG_DEBUG "moving dying domain %d to pool0\n",
- d->domain_id);
c->n_dom--;
- rc = sched_move_domain(d, cpupool0);
- if ( rc )
+ ret = sched_move_domain(d, cpupool0);
+ if ( ret )
{
c->n_dom++;
break;
}
cpupool0->n_dom++;
}
- if ( rc )
- {
- spin_unlock(&cpupool_lock);
- return rc;
- }
+ if ( ret )
+ goto out;
}
- cpupool_unassign_cpu_locked_1(c, cpu);
+ cpupool_moving_cpu = cpu;
+ cpupool_cpu_moving = c;
+ cpu_clear(cpu, c->cpu_valid);
work_cpu = smp_processor_id();
if ( work_cpu == cpu )
{
@@ -289,6 +281,12 @@ int cpupool_unassign_cpu(struct cpupool
}
return continue_hypercall_on_cpu(work_cpu, NULL,
cpupool_unassign_cpu_helper, c);
+
+out:
+ spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
+ cpupool_id, cpu, ret);
+ return ret;
}
/*
@@ -316,6 +314,7 @@ int cpupool_add_domain(struct domain *d,
{
struct cpupool *c;
int rc = 1;
+ int n_dom;
if ( poolid == CPUPOOLID_NONE )
return 0;
@@ -324,12 +323,14 @@ int cpupool_add_domain(struct domain *d,
if ( (c != NULL) && cpus_weight(c->cpu_valid) )
{
c->n_dom++;
+ n_dom = c->n_dom;
d->cpupool = c;
- printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
- d->domain_id, poolid, c->n_dom);
rc = 0;
}
spin_unlock(&cpupool_lock);
+ if (!rc)
+ printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
+ d->domain_id, poolid, n_dom);
return rc;
}
@@ -338,14 +339,19 @@ int cpupool_add_domain(struct domain *d,
*/
void cpupool_rm_domain(struct domain *d)
{
+ int cpupool_id;
+ int n_dom;
+
if ( d->cpupool == NULL )
return;
spin_lock(&cpupool_lock);
+ cpupool_id = d->cpupool->cpupool_id;
d->cpupool->n_dom--;
- printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
- d->domain_id, d->cpupool->cpupool_id, d->cpupool->n_dom);
+ n_dom = d->cpupool->n_dom;
d->cpupool = NULL;
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
+ d->domain_id, cpupool_id, n_dom);
return;
}
@@ -359,7 +365,7 @@ void cpupool_cpu_add(unsigned int cpu)
return;
spin_lock(&cpupool_lock);
cpu_set(cpu, cpupool_free_cpus);
- cpupool_assign_cpu_locked(cpupool0, cpu);
+ (void)cpupool_assign_cpu_locked(cpupool0, cpu);
spin_unlock(&cpupool_lock);
return;
}
@@ -428,6 +434,8 @@ int cpupool_do_domctl(struct xen_domctl_
unsigned cpu;
cpu = op->cpu;
+ printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
+ op->cpupool_id, cpu);
spin_lock(&cpupool_lock);
if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
cpu = first_cpu(cpupool_free_cpus);
@@ -441,10 +449,11 @@ int cpupool_do_domctl(struct xen_domctl_
ret = -ENOENT;
if ( c == NULL )
goto addcpu_out;
- cpupool_assign_cpu_locked(c, cpu);
- ret = 0;
+ ret = cpupool_assign_cpu_locked(c, cpu);
addcpu_out:
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
+ op->cpupool_id, cpu, ret);
}
break;
@@ -488,23 +497,23 @@ addcpu_out:
rcu_unlock_domain(d);
break;
}
+ printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
+ d->domain_id, op->cpupool_id);
ret = -ENOENT;
spin_lock(&cpupool_lock);
c = cpupool_find_by_id(op->cpupool_id, 1);
if ( (c != NULL) && cpus_weight(c->cpu_valid) )
{
- printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
- d->domain_id, c->cpupool_id);
d->cpupool->n_dom--;
ret = sched_move_domain(d, c);
if ( ret )
d->cpupool->n_dom++;
else
c->n_dom++;
- printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
- d->domain_id, c->cpupool_id, ret);
}
spin_unlock(&cpupool_lock);
+ printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
+ d->domain_id, op->cpupool_id, ret);
rcu_unlock_domain(d);
}
break;
Index: xen-4.0.0-testing/xen/common/sched_credit.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/sched_credit.c
+++ xen-4.0.0-testing/xen/common/sched_credit.c
@@ -602,7 +602,7 @@ csched_vcpu_acct(struct csched_private *
}
static void *
-csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc)
+csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc, void *dd)
{
struct csched_vcpu *svc;
@@ -614,7 +614,7 @@ csched_alloc_vdata(struct scheduler *ops
INIT_LIST_HEAD(&svc->runq_elem);
INIT_LIST_HEAD(&svc->active_vcpu_elem);
- svc->sdom = CSCHED_DOM(vc->domain);
+ svc->sdom = dd;
svc->vcpu = vc;
atomic_set(&svc->credit, 0);
svc->flags = 0U;
@@ -778,19 +778,14 @@ csched_dom_cntl(
return 0;
}
-static int
-csched_dom_init(struct scheduler *ops, struct domain *dom)
+static void *
+csched_alloc_domdata(struct scheduler *ops, struct domain *dom)
{
struct csched_dom *sdom;
- CSCHED_STAT_CRANK(dom_init);
-
- if ( is_idle_domain(dom) )
- return 0;
-
sdom = xmalloc(struct csched_dom);
if ( sdom == NULL )
- return -ENOMEM;
+ return NULL;
memset(sdom, 0, sizeof(*sdom));
/* Initialize credit and weight */
@@ -800,16 +795,40 @@ csched_dom_init(struct scheduler *ops, s
sdom->dom = dom;
sdom->weight = CSCHED_DEFAULT_WEIGHT;
sdom->cap = 0U;
+
+ return (void *)sdom;
+}
+
+static int
+csched_dom_init(struct scheduler *ops, struct domain *dom)
+{
+ struct csched_dom *sdom;
+
+ CSCHED_STAT_CRANK(dom_init);
+
+ if ( is_idle_domain(dom) )
+ return 0;
+
+ sdom = csched_alloc_domdata(ops, dom);
+ if ( sdom == NULL )
+ return -ENOMEM;
+
dom->sched_priv = sdom;
return 0;
}
static void
+csched_free_domdata(struct scheduler *ops, void *data)
+{
+ xfree(data);
+}
+
+static void
csched_dom_destroy(struct scheduler *ops, struct domain *dom)
{
CSCHED_STAT_CRANK(dom_destroy);
- xfree(CSCHED_DOM(dom));
+ csched_free_domdata(ops, CSCHED_DOM(dom));
}
/*
@@ -1147,9 +1166,10 @@ csched_load_balance(struct csched_privat
int peer_cpu;
BUG_ON( cpu != snext->vcpu->processor );
+ online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
/* If this CPU is going offline we shouldn't steal work. */
- if ( unlikely(!cpu_online(cpu)) )
+ if ( unlikely(!cpu_isset(cpu, *online)) )
goto out;
if ( snext->pri == CSCHED_PRI_IDLE )
@@ -1163,7 +1183,6 @@ csched_load_balance(struct csched_privat
* Peek at non-idling CPUs in the system, starting with our
* immediate neighbour.
*/
- online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
cpus_andnot(workers, *online, prv->idlers);
cpu_clear(cpu, workers);
peer_cpu = cpu;
@@ -1218,31 +1237,6 @@ csched_schedule(struct scheduler *ops, s
CSCHED_STAT_CRANK(schedule);
CSCHED_VCPU_CHECK(current);
- if ( unlikely(!cpu_isset(cpu, *CSCHED_CPUONLINE(per_cpu(cpupool, cpu)))) )
- {
- /* race with switching cpu between pools: when cpu is leaving the
- pool try to schedule idle vcpu */
-
- struct list_head * iter;
-
- snext = scurr;
- if (is_idle_vcpu(current))
- goto out;
-
- if ( vcpu_runnable(current) )
- __runq_insert(cpu, scurr);
-
- list_for_each(iter, runq)
- {
- snext = __runq_elem(iter);
- if ( snext->pri == CSCHED_PRI_IDLE )
- break;
- }
- BUG_ON( snext->pri != CSCHED_PRI_IDLE );
- __runq_remove(snext);
- goto out;
- }
-
/* Update credits */
if ( !is_idle_vcpu(scurr->vcpu) )
{
@@ -1273,7 +1267,6 @@ csched_schedule(struct scheduler *ops, s
else
snext = csched_load_balance(prv, cpu, snext);
-out:
/*
* Update idlers mask if necessary. When we're idling, other CPUs
* will tickle us when they get extra work.
@@ -1553,6 +1546,8 @@ struct scheduler sched_credit_def = {
.free_vdata = csched_free_vdata,
.alloc_pdata = csched_alloc_pdata,
.free_pdata = csched_free_pdata,
+ .alloc_domdata = csched_alloc_domdata,
+ .free_domdata = csched_free_domdata,
.tick_suspend = csched_tick_suspend,
.tick_resume = csched_tick_resume,
Index: xen-4.0.0-testing/xen/common/sched_sedf.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/sched_sedf.c
+++ xen-4.0.0-testing/xen/common/sched_sedf.c
@@ -332,7 +332,7 @@ static inline void __add_to_runqueue_sor
}
-static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v)
+static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v, void *dd)
{
struct sedf_vcpu_info *inf;
@@ -415,20 +415,37 @@ static void sedf_destroy_vcpu(struct sch
sedf_free_vdata(ops, v->sched_priv);
}
+static void *
+sedf_alloc_domdata(struct scheduler *ops, struct domain *d)
+{
+ void *mem;
+
+ mem = xmalloc(struct sedf_dom_info);
+ if ( mem == NULL )
+ return NULL;
+
+ memset(mem, 0, sizeof(struct sedf_dom_info));
+
+ return mem;
+}
+
static int sedf_init_domain(struct scheduler *ops, struct domain *d)
{
- d->sched_priv = xmalloc(struct sedf_dom_info);
+ d->sched_priv = sedf_alloc_domdata(ops, d);
if ( d->sched_priv == NULL )
return -ENOMEM;
- memset(d->sched_priv, 0, sizeof(struct sedf_dom_info));
-
return 0;
}
+static void sedf_free_domdata(struct scheduler *ops, void *data)
+{
+ xfree(data);
+}
+
static void sedf_destroy_domain(struct scheduler *ops, struct domain *d)
{
- xfree(d->sched_priv);
+ sedf_free_domdata(ops, d->sched_priv);
}
static int sedf_pick_cpu(struct scheduler *ops, struct vcpu *v)
@@ -1498,6 +1515,8 @@ struct scheduler sched_sedf_def = {
.free_vdata = sedf_free_vdata,
.alloc_pdata = sedf_alloc_pdata,
.free_pdata = sedf_free_pdata,
+ .alloc_domdata = sedf_alloc_domdata,
+ .free_domdata = sedf_free_domdata,
.do_schedule = sedf_do_schedule,
.pick_cpu = sedf_pick_cpu,
Index: xen-4.0.0-testing/xen/common/schedule.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/schedule.c
+++ xen-4.0.0-testing/xen/common/schedule.c
@@ -222,7 +222,7 @@ int sched_init_vcpu(struct vcpu *v, unsi
return 1;
}
- v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v);
+ v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
if ( v->sched_priv == NULL )
return 1;
@@ -237,14 +237,23 @@ int sched_move_domain(struct domain *d,
struct vcpu *v;
unsigned int new_p;
void **vcpu_priv;
+ void *domdata;
+
+ domdata = SCHED_OP(&(c->sched), alloc_domdata, d);
+ if ( domdata == NULL )
+ return -ENOMEM;
vcpu_priv = xmalloc_array(void *, d->max_vcpus);
if ( vcpu_priv == NULL )
+ {
+ SCHED_OP(&(c->sched), free_domdata, domdata);
return -ENOMEM;
+ }
+
memset(vcpu_priv, 0, d->max_vcpus * sizeof(void *));
for_each_vcpu ( d, v )
{
- vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v);
+ vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v, domdata);
if ( vcpu_priv[v->vcpu_id] == NULL )
{
for_each_vcpu ( d, v )
@@ -253,6 +262,7 @@ int sched_move_domain(struct domain *d,
xfree(vcpu_priv[v->vcpu_id]);
}
xfree(vcpu_priv);
+ SCHED_OP(&(c->sched), free_domdata, domdata);
return -ENOMEM;
}
}
@@ -276,6 +286,8 @@ int sched_move_domain(struct domain *d,
}
d->cpupool = c;
+ SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
+ d->sched_priv = domdata;
domain_unpause(d);
@@ -1079,7 +1091,7 @@ void schedule_cpu_switch(unsigned int cp
v = per_cpu(schedule_data, cpu).idle;
ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
if ( c != NULL )
- vpriv = SCHED_OP(new_ops, alloc_vdata, v);
+ vpriv = SCHED_OP(new_ops, alloc_vdata, v, v->domain->sched_priv);
spin_lock_irqsave(&per_cpu(schedule_data, cpu).schedule_lock, flags);
Index: xen-4.0.0-testing/xen/include/xen/sched-if.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/xen/sched-if.h
+++ xen-4.0.0-testing/xen/include/xen/sched-if.h
@@ -78,9 +78,12 @@ struct scheduler {
void (*deinit) (struct scheduler *);
void (*free_vdata) (struct scheduler *, void *);
- void * (*alloc_vdata) (struct scheduler *, struct vcpu *);
+ void * (*alloc_vdata) (struct scheduler *, struct vcpu *,
+ void *);
void (*free_pdata) (struct scheduler *, void *, int);
void * (*alloc_pdata) (struct scheduler *, int);
+ void (*free_domdata) (struct scheduler *, void *);
+ void * (*alloc_domdata) (struct scheduler *, struct domain *);
int (*init_domain) (struct scheduler *, struct domain *);
void (*destroy_domain) (struct scheduler *, struct domain *);
@@ -109,7 +112,6 @@ struct cpupool
cpumask_t cpu_valid; /* all cpus assigned to pool */
struct cpupool *next;
unsigned int n_dom;
- int cpu_in_transit; /* used for adding/removing cpus */
struct scheduler sched;
};

View File

@ -204,7 +204,7 @@ Index: xen-4.0.0-testing/tools/libxc/xc_domain.c
memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
return err;
@@ -220,6 +227,7 @@ int xc_domain_getinfo(int xc_handle,
@@ -206,6 +213,7 @@ int xc_domain_getinfo(int xc_handle,
info->cpu_time = domctl.u.getdomaininfo.cpu_time;
info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
@ -257,7 +257,7 @@ Index: xen-4.0.0-testing/tools/libxc/xenctrl.h
/* Functions to produce a dump of a given domain
@@ -502,6 +503,100 @@ int xc_domain_setdebugging(int xc_handle
@@ -500,6 +501,100 @@ int xc_domain_setdebugging(int xc_handle
unsigned int enable);
/*

View File

@ -36,7 +36,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
return pyxc_error_to_exception();
if ( target )
@@ -332,7 +334,7 @@ static PyObject *pyxc_domain_getinfo(XcO
@@ -316,7 +318,7 @@ static PyObject *pyxc_domain_getinfo(XcO
{
info_dict = Py_BuildValue(
"{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
@ -45,7 +45,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
"domid", (int)info[i].domid,
"online_vcpus", info[i].nr_online_vcpus,
"max_vcpu_id", info[i].max_vcpu_id,
@@ -347,7 +349,8 @@ static PyObject *pyxc_domain_getinfo(XcO
@@ -331,7 +333,8 @@ static PyObject *pyxc_domain_getinfo(XcO
"cpu_time", (long long)info[i].cpu_time,
"maxmem_kb", (long long)info[i].max_memkb,
"ssidref", (int)info[i].ssidref,
@ -55,7 +55,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
pyhandle = PyList_New(sizeof(xen_domain_handle_t));
if ( (pyhandle == NULL) || (info_dict == NULL) )
{
@@ -1755,6 +1758,179 @@ static PyObject *pyxc_dom_set_memshr(XcO
@@ -1697,6 +1700,179 @@ static PyObject *pyxc_dom_set_memshr(XcO
return zero;
}
@ -235,7 +235,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
static PyMethodDef pyxc_methods[] = {
{ "handle",
@@ -1870,7 +2046,8 @@ static PyMethodDef pyxc_methods[] = {
@@ -1812,7 +1988,8 @@ static PyMethodDef pyxc_methods[] = {
" maxmem_kb [int]: Maximum memory limit, in kilobytes\n"
" cpu_time [long]: CPU time consumed, in nanoseconds\n"
" shutdown_reason [int]: Numeric code from guest OS, explaining "
@ -245,7 +245,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
{ "vcpu_getinfo",
(PyCFunction)pyxc_vcpu_getinfo,
@@ -2268,6 +2445,66 @@ static PyMethodDef pyxc_methods[] = {
@@ -2210,6 +2387,66 @@ static PyMethodDef pyxc_methods[] = {
" enable [int,0|1]: Disable or enable?\n"
"Returns: [int] 0 on success; -1 on error.\n" },
@ -1489,15 +1489,15 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
}
LEGACY_CFG_TO_XENAPI_CFG = reverse_dict(XENAPI_CFG_TO_LEGACY_CFG)
@@ -234,6 +235,7 @@ XENAPI_CFG_TYPES = {
@@ -233,6 +234,7 @@ XENAPI_CFG_TYPES = {
's3_integrity' : int,
'superpages' : int,
'memory_sharing': int,
'Description': str,
+ 'pool_name' : str,
}
# List of legacy configuration keys that have no equivalent in the
@@ -279,6 +281,7 @@ LEGACY_CFG_TYPES = {
@@ -278,6 +280,7 @@ LEGACY_CFG_TYPES = {
'bootloader': str,
'bootloader_args': str,
'description': str,
@ -1505,7 +1505,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
}
# Values that should be stored in xenstore's /vm/<uuid> that is used
@@ -300,6 +303,7 @@ LEGACY_XENSTORE_VM_PARAMS = [
@@ -299,6 +302,7 @@ LEGACY_XENSTORE_VM_PARAMS = [
'on_xend_stop',
'bootloader',
'bootloader_args',
@ -1513,7 +1513,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
]
##
@@ -408,6 +412,7 @@ class XendConfig(dict):
@@ -407,6 +411,7 @@ class XendConfig(dict):
'other_config': {},
'platform': {},
'target': 0,
@ -1646,7 +1646,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
def _init_networks(self):
# Initialise networks
@@ -361,6 +364,18 @@ class XendNode:
@@ -357,6 +360,18 @@ class XendNode:
for physical_host, pscsi_HBA_uuid in pscsi_HBA_table.items():
XendPSCSI_HBA(pscsi_HBA_uuid, {'physical_host': physical_host})
@ -1665,7 +1665,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
def add_network(self, interface):
# TODO
@@ -581,6 +596,7 @@ class XendNode:
@@ -577,6 +592,7 @@ class XendNode:
self.save_PPCIs()
self.save_PSCSIs()
self.save_PSCSI_HBAs()
@ -1673,7 +1673,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
def save_PIFs(self):
pif_records = dict([(pif_uuid, XendAPIStore.get(
@@ -623,6 +639,12 @@ class XendNode:
@@ -619,6 +635,12 @@ class XendNode:
for pscsi_HBA_uuid in XendPSCSI_HBA.get_all()])
self.state_store.save_state('pscsi_HBA', pscsi_HBA_records)
@ -1686,7 +1686,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
def shutdown(self):
return 0
@@ -934,6 +956,7 @@ class XendNode:
@@ -930,6 +952,7 @@ class XendNode:
self.format_node_to_memory(info, 'node_to_memory')
info['node_to_dma32_mem'] = \
self.format_node_to_memory(info, 'node_to_dma32_mem')
@ -1694,7 +1694,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
# FIXME: These are hard-coded to be the inverse of the getXenMemory
# functions in image.py. Find a cleaner way.
@@ -953,6 +976,7 @@ class XendNode:
@@ -949,6 +972,7 @@ class XendNode:
'virt_caps',
'total_memory',
'free_memory',

View File

@ -6,13 +6,12 @@
- move and rename csched_priv to make sure eventual backported
upstream patches using the variable get correctly adjusted (i.e.
missing adjustments get detected at build time)
- remove boot time per-CPU pool assignment messages (bnc#572146)
Index: xen-4.0.0-testing/xen/arch/x86/domain.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/domain.c
+++ xen-4.0.0-testing/xen/arch/x86/domain.c
@@ -1580,6 +1580,7 @@ int continue_hypercall_on_cpu(int cpu, v
@@ -1573,6 +1573,7 @@ int continue_hypercall_on_cpu(int cpu, v
v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
v->arch.continue_info = info;
@ -20,7 +19,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
}
else
{
@@ -1590,7 +1591,6 @@ int continue_hypercall_on_cpu(int cpu, v
@@ -1583,7 +1584,6 @@ int continue_hypercall_on_cpu(int cpu, v
info->func = func;
info->data = data;

View File

@ -4,7 +4,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/acpi/power.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/acpi/power.c
+++ xen-4.0.0-testing/xen/arch/x86/acpi/power.c
@@ -231,7 +231,7 @@ static int enter_state(u32 state)
@@ -229,7 +229,7 @@ static int enter_state(u32 state)
return error;
}
@ -13,7 +13,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/acpi/power.c
{
struct acpi_sleep_info *sinfo = (struct acpi_sleep_info *)data;
return enter_state(sinfo->sleep_state);
@@ -262,7 +262,7 @@ int acpi_enter_sleep(struct xenpf_enter_
@@ -260,7 +260,7 @@ int acpi_enter_sleep(struct xenpf_enter_
acpi_sinfo.pm1b_cnt_val = sleep->pm1b_cnt_val;
acpi_sinfo.sleep_state = sleep->sleep_state;
@ -26,7 +26,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/domain.c
+++ xen-4.0.0-testing/xen/arch/x86/domain.c
@@ -1517,42 +1517,52 @@ void sync_vcpu_execstate(struct vcpu *v)
@@ -1510,42 +1510,52 @@ void sync_vcpu_execstate(struct vcpu *v)
}
struct migrate_info {
@ -96,7 +96,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
info = v->arch.continue_info;
if ( info == NULL )
@@ -1561,16 +1571,12 @@ int continue_hypercall_on_cpu(int cpu, l
@@ -1554,16 +1564,12 @@ int continue_hypercall_on_cpu(int cpu, l
if ( info == NULL )
return -ENOMEM;
@ -117,7 +117,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
v->arch.continue_info = info;
@@ -1578,17 +1584,17 @@ int continue_hypercall_on_cpu(int cpu, l
@@ -1571,17 +1577,17 @@ int continue_hypercall_on_cpu(int cpu, l
else
{
BUG_ON(info->nest != 0);
@ -160,7 +160,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain_build.c
if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
opt_dom0_max_vcpus = MAX_VIRT_CPUS;
@@ -277,7 +278,7 @@ int __init construct_dom0(
@@ -248,7 +249,7 @@ int __init construct_dom0(
unsigned long _initrd_start, unsigned long initrd_len,
char *cmdline)
{
@ -169,7 +169,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain_build.c
struct cpu_user_regs *regs;
unsigned long pfn, mfn;
unsigned long nr_pages;
@@ -776,8 +777,12 @@ int __init construct_dom0(
@@ -757,8 +758,12 @@ int __init construct_dom0(
printk("Dom0 has maximum %u VCPUs\n", opt_dom0_max_vcpus);
@ -372,7 +372,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
*/
int alloc_cpu_id(void)
{
@@ -985,10 +985,10 @@ static int __devinit do_boot_cpu(int api
@@ -984,10 +984,10 @@ static int __devinit do_boot_cpu(int api
cpucount--;
/* Mark the CPU as non-present */
@ -430,7 +430,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
if (cpu_online(cpu)) {
printk("Bring up a online cpu. Bogus!\n");
err = -EBUSY;
@@ -1400,7 +1402,7 @@ int cpu_up(unsigned int cpu)
@@ -1398,7 +1400,7 @@ int cpu_up(unsigned int cpu)
out:
if (!err)
send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
@ -439,7 +439,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
return err;
}
@@ -1481,13 +1483,13 @@ int cpu_add(uint32_t apic_id, uint32_t a
@@ -1479,13 +1481,13 @@ int cpu_add(uint32_t apic_id, uint32_t a
if ( physid_isset(apic_id, phys_cpu_present_map) )
return -EEXIST;
@ -455,7 +455,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
return cpu;
}
@@ -1504,7 +1506,7 @@ int cpu_add(uint32_t apic_id, uint32_t a
@@ -1502,7 +1504,7 @@ int cpu_add(uint32_t apic_id, uint32_t a
"Setup node failed for pxm %x\n", pxm);
x86_acpiid_to_apicid[acpi_id] = 0xff;
mp_unregister_lapic(apic_id, cpu);
@ -464,7 +464,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
return node;
}
apicid_to_node[apic_id] = node;
@@ -1512,7 +1514,7 @@ int cpu_add(uint32_t apic_id, uint32_t a
@@ -1510,7 +1512,7 @@ int cpu_add(uint32_t apic_id, uint32_t a
srat_detect_node(cpu);
numa_add_cpu(cpu);
@ -473,7 +473,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/smpboot.c
dprintk(XENLOG_INFO, "Add CPU %x with index %x\n", apic_id, cpu);
return cpu;
}
@@ -1556,6 +1558,7 @@ int __devinit __cpu_up(unsigned int cpu)
@@ -1554,6 +1556,7 @@ int __devinit __cpu_up(unsigned int cpu)
process_pending_softirqs();
}

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/domain.c
+++ xen-4.0.0-testing/xen/arch/x86/domain.c
@@ -144,7 +144,7 @@ void dump_pageframe_info(struct domain *
@@ -143,7 +143,7 @@ void dump_pageframe_info(struct domain *
printk("Memory pages belonging to domain %u:\n", d->domain_id);
@ -11,7 +11,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/domain.c
{
printk(" DomPage list too long to display\n");
}
@@ -152,6 +152,15 @@ void dump_pageframe_info(struct domain *
@@ -151,6 +151,15 @@ void dump_pageframe_info(struct domain *
{
page_list_for_each ( page, &d->page_list )
{

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c
+++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
@@ -950,16 +950,16 @@ static PyObject *pyxc_hvm_build(XcObject
@@ -917,16 +917,16 @@ static PyObject *pyxc_hvm_build(XcObject
#endif
int i;
char *image;
@ -24,7 +24,7 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
return NULL;
memset(vcpu_avail, 0, sizeof(vcpu_avail));
@@ -1011,6 +1011,7 @@ static PyObject *pyxc_hvm_build(XcObject
@@ -978,6 +978,7 @@ static PyObject *pyxc_hvm_build(XcObject
va_hvm->checksum -= sum;
munmap(va_map, XC_PAGE_SIZE);
#endif

25
qemu-console-retry.patch Normal file
View File

@ -0,0 +1,25 @@
Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_console.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_console.c
+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_console.c
@@ -182,6 +182,7 @@ static int con_init(struct XenDevice *xe
{
struct XenConsole *con = container_of(xendev, struct XenConsole, xendev);
char *type, *dom;
+ int retries = 0;
/* setup */
dom = xs_get_domain_path(xenstore, con->xendev.dom);
@@ -191,7 +192,11 @@ static int con_init(struct XenDevice *xe
snprintf(con->console, sizeof(con->console), "%s/device/console/%d", dom, xendev->dev);
free(dom);
- type = xenstore_read_str(con->console, "type");
+ while (!(type = xenstore_read_str(con->console, "type")) && retries < 5) {
+ usleep(250000);
+ retries++;
+ }
+
if (!type || 0 != strcmp(type, "ioemu")) {
xen_be_printf(xendev, 1, "not for me (type=%s)\n", type);
if (type)

66
shadow.patch Normal file
View File

@ -0,0 +1,66 @@
In domain_create, previously we reserve 1M memory for domain creation (as
described in xend comment), and these memory SHOULD NOT related with vcpu
number. And later, shadow_mem_control() will modify the shadow size to 256
pages per vcpu (also plus some other values related with guest memory size...).
Therefore the C/S 20389 which modifies 1M to 4M to fit more vcpu number is
wrong. I'm sorry for that.
Following is the reason why currently 1M doesn't work for big number vcpus,
as we mentioned, it caused Xen crash.
Each time when sh_set_allocation() is called, it checks whether
shadow_min_acceptable_pages() has been allocated, if not, it will allocate
them. That is to say, it is 128 pages per vcpu. But before we define
d->max_vcpu, guest vcpu hasn't been initialized, so
shadow_min_acceptable_pages() always returns 0. Therefore we only allocated 1M
shadow memory for domain_create, and didn't satisfy 128 pages per vcpu for
alloc_vcpu().
As we know, vcpu allocation is done in the hypercall of
XEN_DOMCTL_max_vcpus. However, at this point we haven't called
shadow_mem_control() and are still using the pre-allocated 1M shadow memory to
allocate so many vcpus. So it should be a BUG. Therefore when vcpu number
increases, 1M is not enough and causes Xen crash. C/S 20389 exposes this issue.
So I think the right process should be, after d->max_vcpu is set and before
alloc_vcpu(), we should call sh_set_allocation() to satisfy 128 pages per vcpu.
The following patch does this work. Is it work for you? Thanks!
Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
Index: xen-4.0.0-testing/xen/arch/x86/mm/shadow/common.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/mm/shadow/common.c
+++ xen-4.0.0-testing/xen/arch/x86/mm/shadow/common.c
@@ -41,6 +41,9 @@
DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags);
+static unsigned int sh_set_allocation(struct domain *d,
+ unsigned int pages,
+ int *preempted);
/* Set up the shadow-specific parts of a domain struct at start of day.
* Called for every domain from arch_domain_create() */
void shadow_domain_init(struct domain *d, unsigned int domcr_flags)
@@ -82,6 +85,12 @@ void shadow_vcpu_init(struct vcpu *v)
}
#endif
+ if ( !is_idle_domain(v->domain) )
+ {
+ shadow_lock(v->domain);
+ sh_set_allocation(v->domain, 128, NULL);
+ shadow_unlock(v->domain);
+ }
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
}
@@ -3102,7 +3111,7 @@ int shadow_enable(struct domain *d, u32
{
unsigned int r;
shadow_lock(d);
- r = sh_set_allocation(d, 1024, NULL); /* Use at least 4MB */
+ r = sh_set_allocation(d, 256, NULL); /* Use at least 1MB */
if ( r != 0 )
{
sh_set_allocation(d, 0, NULL);

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/blktap/drivers/blktapctrl.c
===================================================================
--- xen-4.0.0-testing.orig/tools/blktap/drivers/blktapctrl.c
+++ xen-4.0.0-testing/tools/blktap/drivers/blktapctrl.c
@@ -348,6 +348,7 @@ static int write_msg(int fd, int msgtype
@@ -347,6 +347,7 @@ static int write_msg(int fd, int msgtype
msg_dev = (msg_newdev_t *)(buf + sizeof(msg_hdr_t));
msg_dev->devnum = blkif->minor;
msg_dev->domid = blkif->domid;

View File

@ -325,9 +325,9 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
@@ -236,6 +236,7 @@ XENAPI_CFG_TYPES = {
@@ -235,6 +235,7 @@ XENAPI_CFG_TYPES = {
'superpages' : int,
'memory_sharing': int,
'Description': str,
'pool_name' : str,
+ 'snapshotname': str,
}

View File

@ -1,9 +1,7 @@
Change default IO-APIC ack mode for single IO-APIC systems to old-style.
Index: xen-4.0.0-testing/xen/arch/x86/io_apic.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/io_apic.c
+++ xen-4.0.0-testing/xen/arch/x86/io_apic.c
--- 2010-01-06.orig/xen/arch/x86/io_apic.c 2009-12-17 12:20:22.000000000 +0100
+++ 2010-01-06/xen/arch/x86/io_apic.c 2010-01-06 11:17:14.000000000 +0100
@@ -1442,7 +1442,7 @@ static unsigned int startup_level_ioapic
return 0; /* don't check for pending */
}
@ -13,7 +11,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/io_apic.c
static void setup_ioapic_ack(char *s)
{
if ( !strcmp(s, "old") )
@@ -1946,6 +1946,8 @@ void __init setup_IO_APIC(void)
@@ -1954,6 +1954,8 @@ void __init setup_IO_APIC(void)
else
io_apic_irqs = ~PIC_IRQS;

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ecdd97da2f63df66d5e965f8ba11481422332cd01f70521d67bcf62d207f5d61
size 23203635
oid sha256:88c2cad04e93a909e405bee6f4c3dff2c6b12ea2485b6e7f1db4813cb74f2f38
size 23155997

View File

@ -2,16 +2,16 @@ Index: xen-4.0.0-testing/Config.mk
===================================================================
--- xen-4.0.0-testing.orig/Config.mk
+++ xen-4.0.0-testing/Config.mk
@@ -151,7 +151,7 @@ QEMU_REMOTE=http://xenbits.xensource.com
@@ -148,7 +148,7 @@ QEMU_REMOTE=http://xenbits.xensource.com
# Specify which qemu-dm to use. This may be `ioemu' to use the old
# Mercurial in-tree version, or a local directory, or a git URL.
# CONFIG_QEMU ?= ../qemu-xen.git
-CONFIG_QEMU ?= $(QEMU_REMOTE)
+CONFIG_QEMU ?= ioemu-remote
QEMU_TAG := xen-4.0.0-rc6
#QEMU_TAG ?= e5d14857cd67490bf956d97c8888c0be95ed3f78
@@ -167,9 +167,9 @@ CONFIG_OCAML_XENSTORED ?= n
QEMU_TAG ?= e5d14857cd67490bf956d97c8888c0be95ed3f78
# Thu Feb 18 15:36:29 2010 +0000
@@ -163,9 +163,9 @@ CONFIG_OCAML_XENSTORED ?= n
# Optional components
XENSTAT_XENTOP ?= y
VTPM_TOOLS ?= n

View File

@ -1,27 +0,0 @@
Index: xen-4.0.0-testing/tools/ioemu-remote/block-vvfat.c
===================================================================
--- xen-4.0.0-testing.orig/tools/ioemu-remote/block-vvfat.c
+++ xen-4.0.0-testing/tools/ioemu-remote/block-vvfat.c
@@ -865,7 +865,8 @@ static int init_directories(BDRVVVFATSta
{
direntry_t* entry=array_get_next(&(s->directory));
entry->attributes=0x28; /* archive | volume label */
- snprintf((char*)entry->name,11,"QEMU VVFAT");
+ memcpy(entry->name,"QEMU VVF",8);
+ memcpy(entry->extension,"AT ",3);
}
/* Now build FAT, and write back information into directory */
Index: xen-4.0.0-testing/unmodified_drivers/linux-2.6/overrides.mk
===================================================================
--- xen-4.0.0-testing.orig/unmodified_drivers/linux-2.6/overrides.mk
+++ xen-4.0.0-testing/unmodified_drivers/linux-2.6/overrides.mk
@@ -11,7 +11,7 @@ ifeq ($(ARCH),ia64)
_XEN_CPPFLAGS += -DCONFIG_VMX_GUEST
endif
-_XEN_CPPFLAGS += -include $(objtree)/include/linux/autoconf.h
+_XEN_CPPFLAGS += -include $(objtree)/include/generated/autoconf.h
EXTRA_CFLAGS += $(_XEN_CPPFLAGS)
EXTRA_AFLAGS += $(_XEN_CPPFLAGS)

99
xen-gcc45-fixes.patch Normal file
View File

@ -0,0 +1,99 @@
--- xen-4.0.0-testing/tools/xenstore/xs_lib.c.orig 2010-03-01 08:28:04.000000000 -0700
+++ xen-4.0.0-testing/tools/xenstore/xs_lib.c 2010-03-01 09:12:04.000000000 -0700
@@ -149,7 +149,7 @@ bool xs_strings_to_perms(struct xs_permi
bool xs_perm_to_string(const struct xs_permissions *perm,
char *buffer, size_t buf_len)
{
- switch (perm->perms) {
+ switch ((int)perm->perms) {
case XS_PERM_WRITE:
*buffer = 'w';
break;
--- xen-4.0.0-testing/tools/blktap/lib/blktaplib.h.orig 2010-03-01 09:24:26.000000000 -0700
+++ xen-4.0.0-testing/tools/blktap/lib/blktaplib.h 2010-03-01 09:28:16.000000000 -0700
@@ -42,7 +42,7 @@
#include <sys/types.h>
#include <unistd.h>
-#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, XC_PAGE_SIZE)
+#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, XC_PAGE_SIZE)
/* size of the extra VMA area to map in attached pages. */
#define BLKTAP_VMA_PAGES BLK_RING_SIZE
--- xen-4.0.0-testing/tools/blktap2/include/blktaplib.h.orig 2010-03-01 09:46:30.000000000 -0700
+++ xen-4.0.0-testing/tools/blktap2/include/blktaplib.h 2010-03-01 09:46:50.000000000 -0700
@@ -45,7 +45,7 @@
#define EPRINTF(_f, _a...) syslog(LOG_ERR, "tap-err:%s: " _f, __func__, ##_a)
#define PERROR(_f, _a...) EPRINTF(_f ": %s", ##_a, strerror(errno))
-#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, XC_PAGE_SIZE)
+#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, XC_PAGE_SIZE)
/* size of the extra VMA area to map in attached pages. */
#define BLKTAP_VMA_PAGES BLK_RING_SIZE
--- xen-4.0.0-testing/xen/include/xen/compat.h.orig 2010-03-01 13:22:34.000000000 -0700
+++ xen-4.0.0-testing/xen/include/xen/compat.h 2010-03-01 14:30:34.000000000 -0700
@@ -129,8 +129,8 @@
#define CHECK_TYPE(name) \
- typedef int __checkT ## name[1 - ((xen_ ## name ## _t *)0 != \
- (compat_ ## name ## _t *)0) * 2]
+ typedef int __checkT ## name[1 - (sizeof(xen_ ## name ## _t) != \
+ sizeof(compat_ ## name ## _t)) * 2]
#define CHECK_TYPE_(k, n) \
typedef int __checkT ## k ## _ ## n[1 - ((k xen_ ## n *)0 != \
(k compat_ ## n *)0) * 2]
@@ -146,26 +146,30 @@
typedef int __checkF ## t ## __ ## f[1 - (&((xen_ ## t ## _t *)0)->f != \
&((compat_ ## t ## _t *)0)->f) * 2]
#define CHECK_FIELD_(k, n, f) \
- typedef int __checkF ## k ## _ ## n ## __ ## f[1 - (&((k xen_ ## n *)0)->f != \
- &((k compat_ ## n *)0)->f) * 2]
+ typedef int __checkF ## k ## _ ## n ## __ ## f[1 - (offsetof(k xen_ ## n,f) != \
+ offsetof(k compat_ ## n,f)) * 2]
#define CHECK_SUBFIELD_1(t, f1, f2) \
typedef int __checkF1 ## t ## __ ## f1 ## __ ## f2 \
- [1 - (&((xen_ ## t ## _t *)0)->f1.f2 != \
- &((compat_ ## t ## _t *)0)->f1.f2) * 2]
+ [1 - (offsetof(xen_ ## t ## _t,f1.f2) != \
+ offsetof(compat_ ## t ## _t,f1.f2)) * 2]
#define CHECK_SUBFIELD_1_(k, n, f1, f2) \
typedef int __checkF1 ## k ## _ ## n ## __ ## f1 ## __ ## f2 \
- [1 - (&((k xen_ ## n *)0)->f1.f2 != \
- &((k compat_ ## n *)0)->f1.f2) * 2]
+ [1 - (offsetof(k xen_ ## n,f1.f2) != \
+ offsetof(k compat_ ## n,f1.f2)) * 2]
#define CHECK_SUBFIELD_2(t, f1, f2, f3) \
typedef int __checkF2 ## t ## __ ## f1 ## __ ## f2 ## __ ## f3 \
- [1 - (&((xen_ ## t ## _t *)0)->f1.f2.f3 != \
- &((compat_ ## t ## _t *)0)->f1.f2.f3) * 2]
+ [1 - (offsetof(xen_ ## t ## _t,f1.f2.f3) != \
+ offsetof(compat_ ## t ## _t,f1.f2.f3)) * 2]
+#define CHECK_SUBFIELD_2_(k, n, f1, f2, f3) \
+ typedef int __checkF2 ## k ## _ ## n ## __ ## f1 ## __ ## f2 ## __ ## f3 \
+ [1 - (offsetof(k xen_ ## n,f1.f2.f3) != \
+ offsetof(k compat_ ## n,f1.f2.f3)) * 2]
#define CHECK_SUBFIELD_2_(k, n, f1, f2, f3) \
typedef int __checkF2 ## k ## _ ## n ## __ ## f1 ## __ ## f2 ## __ ## f3 \
- [1 - (&((k xen_ ## n *)0)->f1.f2.f3 != \
- &((k compat_ ## n *)0)->f1.f2.f3) * 2]
+ [1 - (offsetof(k xen_ ## n,f1.f2.f3) != \
+ offsetof(k compat_ ## n,f1.f2.f3)) * 2]
int hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...);
--- xen-4.0.0-testing/tools/ioemu-remote/block-vvfat.c.orig 2010-03-01 14:53:11.000000000 -0700
+++ xen-4.0.0-testing/tools/ioemu-remote/block-vvfat.c 2010-03-01 14:53:56.000000000 -0700
@@ -865,7 +865,8 @@ static int init_directories(BDRVVVFATSta
{
direntry_t* entry=array_get_next(&(s->directory));
entry->attributes=0x28; /* archive | volume label */
- snprintf((char*)entry->name,11,"QEMU VVFAT");
+ memcpy(entry->name,"QEMU VVF",8);
+ memcpy(entry->extension,"AT ",3);
}
/* Now build FAT, and write back information into directory */

View File

@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendNode.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
@@ -911,15 +911,39 @@ class XendNode:
@@ -907,15 +907,39 @@ class XendNode:
info['cpu_mhz'] = info['cpu_khz'] / 1000
@ -47,7 +47,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
ITEM_ORDER = ['nr_cpus',
'nr_nodes',
'cores_per_socket',
@@ -929,6 +953,9 @@ class XendNode:
@@ -925,6 +949,9 @@ class XendNode:
'virt_caps',
'total_memory',
'free_memory',

View File

@ -165,7 +165,7 @@ Index: xen-4.0.0-testing/tools/blktap2/drivers/block-remus.c
===================================================================
--- xen-4.0.0-testing.orig/tools/blktap2/drivers/block-remus.c
+++ xen-4.0.0-testing/tools/blktap2/drivers/block-remus.c
@@ -1579,7 +1579,7 @@ static int tdremus_open(td_driver_t *dri
@@ -1578,7 +1578,7 @@ static int tdremus_open(td_driver_t *dri
td_flag_t flags)
{
struct tdremus_state *s = (struct tdremus_state *)driver->data;
@ -174,3 +174,16 @@ Index: xen-4.0.0-testing/tools/blktap2/drivers/block-remus.c
RPRINTF("opening %s\n", name);
Index: xen-4.0.0-testing/tools/libxl/Makefile
===================================================================
--- xen-4.0.0-testing.orig/tools/libxl/Makefile
+++ xen-4.0.0-testing/tools/libxl/Makefile
@@ -50,7 +50,7 @@ xl.o: $(LIBCONFIG_OUTPUT)/libconfig.so x
$(CC) $(CFLAGS) -I$(LIBCONFIG_SOURCE) -c xl.c
$(CLIENTS): xl.o libxenlight.so $(LIBCONFIG_OUTPUT)/libconfig.so
- $(CC) $(LDFLAGS) -o $@ $< $(LIBS) -L . -lxenlight -L$(LIBCONFIG_OUTPUT) -lconfig
+ $(CC) $(LDFLAGS) -o $@ $< -L . -lxenlight $(LIBS) -L$(LIBCONFIG_OUTPUT) -lconfig
.PHONY: install
install: all

View File

@ -1,30 +1,3 @@
-------------------------------------------------------------------
Wed Mar 17 16:42:20 CST 2010 - jsong@novell.com
-Fix bnc#466899 - numa enabled xen fails to start/create vms
adjust_vcpuaffinity_more_cpu.patch
-------------------------------------------------------------------
Tue Mar 9 16:28:59 MST 2010 - carnold@novell.com
- Update to changeset 21022 Xen 4.0.0 RC6.
-------------------------------------------------------------------
Tue Mar 9 10:43:27 MST 2010 - carnold@novell.com
- bnc#586510 - cpupool fixes
cpu-pools-update.patch
-------------------------------------------------------------------
Fri Mar 5 09:04:18 MST 2010 - carnold@novell.com
- bnc#582645 - Xen stuck, mptbase driver attempting to reset config
request
-------------------------------------------------------------------
Mon Mar 1 10:05:07 MST 2010 - carnold@novell.com
- Update to changeset 20990 Xen 4.0.0 RC5.
-------------------------------------------------------------------
Mon Feb 22 08:26:01 MST 2010 - jfehlig@novell.com

View File

@ -1,5 +1,5 @@
#
# spec file for package xen (Version 4.0.0_21046_01)
# spec file for package xen (Version 4.0.0_20978_01)
#
# Copyright (c) 2010 SUSE LINUX Products GmbH, Nuernberg, Germany.
#
@ -22,7 +22,7 @@ Name: xen
ExclusiveArch: %ix86 x86_64
%define xvers 4.0
%define xvermaj 4
%define changeset 21046
%define changeset 20978
%define xen_build_dir xen-4.0.0-testing
%define with_kmp 0
BuildRequires: LibVNCServer-devel SDL-devel acpica automake bin86 curl-devel dev86 graphviz latex2html libjpeg-devel libxml2-devel ncurses-devel openssl openssl-devel pciutils-devel python-devel texinfo transfig
@ -37,7 +37,7 @@ BuildRequires: glibc-32bit glibc-devel-32bit
%if %{?with_kmp}0
BuildRequires: kernel-source kernel-syms module-init-tools xorg-x11
%endif
Version: 4.0.0_21046_01
Version: 4.0.0_20978_01
Release: 1
License: GPLv2
Group: System/Kernel
@ -91,7 +91,6 @@ Patch313: xen-xm-top-needs-root.diff
Patch314: xen-max-free-mem.diff
Patch315: xen-disable-libxl.diff
Patch316: xen-disable-xenpaging.diff
Patch317: xen-extra-fixes.patch
Patch320: block-losetup-retry.diff
Patch321: block-flags.diff
Patch322: bridge-opensuse.patch
@ -121,19 +120,20 @@ Patch355: tools-gdbserver-build.diff
Patch356: ioemu-vnc-resize.patch
Patch357: ioemu-debuginfo.patch
Patch358: vif-bridge-no-iptables.patch
Patch359: qemu-console-retry.patch
# Needs to go upstream
Patch359: checkpoint-rename.patch
Patch360: xm-save-check-file.patch
Patch361: xm-create-xflag.patch
Patch362: cpupools-core.patch
Patch363: cpupools-core-fixup.patch
Patch364: keyhandler-alternative.patch
Patch365: cpu-pools-libxc.patch
Patch366: cpu-pools-python.patch
Patch367: cpu-pools-libxen.patch
Patch368: cpu-pools-xmtest.patch
Patch369: cpu-pools-docs.patch
Patch370: cpu-pools-fixes.patch
Patch360: checkpoint-rename.patch
Patch361: xm-save-check-file.patch
Patch362: xm-create-xflag.patch
Patch363: cpupools-core.patch
Patch364: cpupools-core-fixup.patch
Patch365: keyhandler-alternative.patch
Patch366: cpu-pools-libxc.patch
Patch367: cpu-pools-python.patch
Patch368: cpu-pools-libxen.patch
Patch369: cpu-pools-xmtest.patch
Patch370: cpu-pools-docs.patch
Patch371: xen-gcc45-fixes.patch
# Patches for snapshot support
Patch400: snapshot-ioemu-save.patch
Patch401: snapshot-ioemu-restore.patch
@ -155,7 +155,8 @@ Patch424: ioemu-7615-qcow2-fix-alloc_cluster_link_l2.patch
Patch425: ioemu-bdrv-open-CACHE_WB.patch
Patch426: xen-ioemu-hvm-pv-support.diff
Patch427: qemu-dm-segfault.patch
Patch428: hibernate.patch
Patch428: shadow.patch
Patch429: hibernate.patch
# Jim's domain lock patch
Patch450: xend-domain-lock.patch
# Hypervisor and PV driver Patches
@ -531,7 +532,6 @@ Authors:
%patch314 -p1
%patch315 -p1
%patch316 -p1
%patch317 -p1
#%patch320 -p1
#%patch321 -p1
%patch322 -p1
@ -573,6 +573,7 @@ Authors:
%patch368 -p1
%patch369 -p1
%patch370 -p1
%patch371 -p1
%patch400 -p1
%patch401 -p1
%patch402 -p1
@ -592,6 +593,7 @@ Authors:
%patch426 -p1
%patch427 -p1
%patch428 -p1
%patch429 -p1
%patch450 -p1
%patch500 -p1
%patch501 -p1

7
xen_pvdrivers Normal file
View File

@ -0,0 +1,7 @@
# Install the paravirtualized drivers
install libata /sbin/modprobe xen-vbd 2>&1 |:; /sbin/modprobe --ignore-install libata
install 8139cp /sbin/modprobe xen-vnif 2>&1 |:; /sbin/modprobe --ignore-install 8139cp
install 8139too /sbin/modprobe xen-vnif 2>&1 |:; /sbin/modprobe --ignore-install 8139too