6327676635
OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=2cde1443cbaf9b44fea7d2345354b1f7
774 lines
25 KiB
Diff
774 lines
25 KiB
Diff
Index: xen-4.0.0-testing/docs/xen-api/revision-history.tex
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/docs/xen-api/revision-history.tex
|
|
+++ xen-4.0.0-testing/docs/xen-api/revision-history.tex
|
|
@@ -54,7 +54,7 @@
|
|
Added definitions of new classes cpu\_pool. Updated the table
|
|
and the diagram representing relationships between classes.
|
|
Added fields host.resident\_cpu\_pools, VM.cpu\_pool and
|
|
- host\_cpu.cpu\_pool.
|
|
+ host\_cpu.cpu\_pool.\tabularnewline
|
|
\hline
|
|
\end{tabular}
|
|
\end{center}
|
|
Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendCPUPool.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
|
|
@@ -547,7 +547,7 @@ class XendCPUPool(XendBase):
|
|
def pool_start(cls, poolname):
|
|
pool = cls.lookup_pool(poolname)
|
|
if not pool:
|
|
- raise VmError('unkown pool %s' % poolname)
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
try:
|
|
pool.activate()
|
|
except XendAPIError, ex:
|
|
@@ -566,8 +566,12 @@ class XendCPUPool(XendBase):
|
|
for cpu_ref in pool_vals['host_CPUs'] ]
|
|
cpus.sort()
|
|
pool_vals['host_CPU_numbers'] = cpus
|
|
- vm_names = [ xd.get_vm_by_uuid(uuid).getName()
|
|
- for uuid in pool_vals['started_VMs'] ]
|
|
+ # query VMs names. Take in account, that a VM
|
|
+ # returned by get_all_records could be destroy, now
|
|
+ vm_names = [ vm.getName()
|
|
+ for vm in map(xd.get_vm_by_uuid,
|
|
+ pool_vals['started_VMs'])
|
|
+ if vm ]
|
|
pool_vals['started_VM_names'] = vm_names
|
|
pool_vals['auto_power_on'] = int(pool_vals['auto_power_on'])
|
|
sxprs += [[pool_uuid] + map2sxp(pool_vals)]
|
|
@@ -578,7 +582,7 @@ class XendCPUPool(XendBase):
|
|
def pool_destroy(cls, poolname):
|
|
pool = cls.lookup_pool(poolname)
|
|
if not pool:
|
|
- raise VmError('unkown pool %s' % poolname)
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
try:
|
|
pool.deactivate()
|
|
if not pool.is_managed():
|
|
@@ -589,7 +593,7 @@ class XendCPUPool(XendBase):
|
|
def pool_delete(cls, poolname):
|
|
pool = cls.lookup_pool(poolname)
|
|
if not pool:
|
|
- raise VmError('unkown pool %s' % poolname)
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
try:
|
|
pool.destroy()
|
|
except XendAPIError, ex:
|
|
@@ -598,28 +602,28 @@ class XendCPUPool(XendBase):
|
|
def pool_cpu_add(cls, poolname, cpu):
|
|
pool = cls.lookup_pool(poolname)
|
|
if not pool:
|
|
- raise VmError('unkown pool %s' % poolname)
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
try:
|
|
cpu_ref = cls._cpu_number_to_ref(int(cpu))
|
|
if cpu_ref:
|
|
pool.add_host_CPU_live(cpu_ref)
|
|
else:
|
|
raise PoolError(XEND_ERROR_INVALID_CPU,
|
|
- 'CPU unkown')
|
|
+ 'CPU unknown')
|
|
except XendAPIError, ex:
|
|
raise VmError(ex.get_api_error())
|
|
|
|
def pool_cpu_remove(cls, poolname, cpu):
|
|
pool = cls.lookup_pool(poolname)
|
|
if not pool:
|
|
- raise VmError('unkown pool %s' % poolname)
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
try:
|
|
cpu_ref = cls._cpu_number_to_ref(int(cpu))
|
|
if cpu_ref:
|
|
pool.remove_host_CPU_live(cpu_ref)
|
|
else:
|
|
raise PoolError(XEND_ERROR_INVALID_CPU,
|
|
- 'CPU unkown')
|
|
+ 'CPU unknown')
|
|
except XendAPIError, ex:
|
|
raise VmError(ex.get_api_error())
|
|
|
|
@@ -627,10 +631,10 @@ class XendCPUPool(XendBase):
|
|
dom = XendDomain.instance()
|
|
pool = cls.lookup_pool(poolname)
|
|
if not pool:
|
|
- raise VmError('unkown pool %s' % poolname)
|
|
+ raise VmError('unknown pool %s' % poolname)
|
|
dominfo = dom.domain_lookup_nr(domname)
|
|
if not dominfo:
|
|
- raise VmError('unkown domain %s' % domname)
|
|
+ raise VmError('unknown domain %s' % domname)
|
|
domid = dominfo.getDomid()
|
|
if domid is not None:
|
|
if domid == 0:
|
|
@@ -860,8 +864,11 @@ class XendCPUPool(XendBase):
|
|
pool_uuid = None
|
|
try:
|
|
pool_id = int(id_or_name)
|
|
- # pool id given
|
|
+ # pool id given ?
|
|
pool_uuid = cls.query_pool_ref(pool_id)
|
|
+ if not pool_uuid:
|
|
+ # not found -> search name
|
|
+ pool_uuid = cls.get_by_name_label(id_or_name)
|
|
except ValueError:
|
|
# pool name given
|
|
pool_uuid = cls.get_by_name_label(id_or_name)
|
|
Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
|
|
@@ -2574,7 +2574,7 @@ class XendDomainInfo:
|
|
pool = XendCPUPool.lookup_pool(pool_name)
|
|
|
|
if pool is None:
|
|
- raise VmError("unkown pool %s" % pool_name)
|
|
+ raise VmError("unknown pool %s" % pool_name)
|
|
pool_id = pool.query_pool_id()
|
|
if pool_id is None:
|
|
raise VmError("pool %s not activated" % pool_name)
|
|
Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
|
|
+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
|
|
@@ -3515,7 +3515,7 @@ def get_pool_ref(name):
|
|
if len(refs) > 0:
|
|
return refs[0]
|
|
else:
|
|
- err('unkown pool name')
|
|
+ err('unknown pool name')
|
|
sys.exit(1)
|
|
|
|
def xm_pool_start(args):
|
|
@@ -3643,7 +3643,7 @@ def xm_pool_cpu_add(args):
|
|
cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
|
|
if c_rec['number'] == args[1] ]
|
|
if len(cpu_ref) == 0:
|
|
- err('cpu number unkown')
|
|
+ err('cpu number unknown')
|
|
else:
|
|
server.xenapi.cpu_pool.add_host_CPU_live(ref, cpu_ref[0])
|
|
else:
|
|
@@ -3657,7 +3657,7 @@ def xm_pool_cpu_remove(args):
|
|
cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
|
|
if c_rec['number'] == args[1] ]
|
|
if len(cpu_ref) == 0:
|
|
- err('cpu number unkown')
|
|
+ err('cpu number unknown')
|
|
else:
|
|
server.xenapi.cpu_pool.remove_host_CPU_live(ref, cpu_ref[0])
|
|
else:
|
|
Index: xen-4.0.0-testing/xen/common/cpupool.c
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/xen/common/cpupool.c
|
|
+++ xen-4.0.0-testing/xen/common/cpupool.c
|
|
@@ -29,6 +29,9 @@ static struct cpupool *cpupool_list;
|
|
static int cpupool0_max_cpus;
|
|
integer_param("pool0_max_cpus", cpupool0_max_cpus);
|
|
|
|
+static int cpupool_moving_cpu = -1;
|
|
+static struct cpupool *cpupool_cpu_moving = NULL;
|
|
+
|
|
/* cpupool lock: be carefull, this lock is sometimes released on another cpu
|
|
* as it was obtained!
|
|
*/
|
|
@@ -104,7 +107,6 @@ struct cpupool *cpupool_create(int pooli
|
|
}
|
|
*q = c;
|
|
c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
|
|
- c->cpu_in_transit = -1;
|
|
if ( schedule_init_global(sched, &(c->sched)) )
|
|
{
|
|
spin_unlock(&cpupool_lock);
|
|
@@ -151,16 +153,20 @@ int cpupool_destroy(struct cpupool *c)
|
|
* assign a specific cpu to a cpupool
|
|
* cpupool_lock must be held
|
|
*/
|
|
-static void cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
|
|
+static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
|
|
{
|
|
- printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
|
|
- c->cpupool_id, cpu);
|
|
+ if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
|
|
+ return -EBUSY;
|
|
per_cpu(cpupool, cpu) = c;
|
|
schedule_cpu_switch(cpu, c);
|
|
cpu_clear(cpu, cpupool_free_cpus);
|
|
+ if (cpupool_moving_cpu == cpu)
|
|
+ {
|
|
+ cpupool_moving_cpu = -1;
|
|
+ cpupool_cpu_moving = NULL;
|
|
+ }
|
|
cpu_set(cpu, c->cpu_valid);
|
|
- printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ready\n",
|
|
- c->cpupool_id, cpu);
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
@@ -177,8 +183,8 @@ int cpupool_assign_ncpu(struct cpupool *
|
|
spin_lock(&cpupool_lock);
|
|
for_each_cpu_mask(i, cpupool_free_cpus)
|
|
{
|
|
- cpupool_assign_cpu_locked(c, i);
|
|
- n++;
|
|
+ if ( cpupool_assign_cpu_locked(c, i) == 0 )
|
|
+ n++;
|
|
if ( n == ncpu )
|
|
break;
|
|
}
|
|
@@ -188,43 +194,25 @@ int cpupool_assign_ncpu(struct cpupool *
|
|
return n;
|
|
}
|
|
|
|
-static void cpupool_unassign_cpu_locked_1(struct cpupool *c, unsigned int cpu)
|
|
-{
|
|
- printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
|
|
- c->cpupool_id, cpu);
|
|
- c->cpu_in_transit = cpu;
|
|
-}
|
|
-
|
|
-static int cpupool_unassign_cpu_locked_2(struct cpupool *c)
|
|
+static long cpupool_unassign_cpu_helper(void *hdl, void *info)
|
|
{
|
|
- int cpu = c->cpu_in_transit;
|
|
- int ret;
|
|
+ struct cpupool *c = (struct cpupool *)info;
|
|
+ int cpu = cpupool_moving_cpu;
|
|
+ long ret;
|
|
+ int cpupool_id = c->cpupool_id;
|
|
|
|
- c->cpu_in_transit = -1;
|
|
- cpu_clear(cpu, c->cpu_valid);
|
|
ret = cpu_disable_scheduler(cpu, 1);
|
|
- if ( ret )
|
|
- {
|
|
- cpu_set(cpu, c->cpu_valid);
|
|
- }
|
|
- else
|
|
+ cpu_set(cpu, cpupool_free_cpus);
|
|
+ if ( !ret )
|
|
{
|
|
- cpu_set(cpu, cpupool_free_cpus);
|
|
schedule_cpu_switch(cpu, NULL);
|
|
per_cpu(cpupool, cpu) = NULL;
|
|
+ cpupool_moving_cpu = -1;
|
|
+ cpupool_cpu_moving = NULL;
|
|
}
|
|
- printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
|
|
- c->cpupool_id, cpu, ret);
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static long cpupool_unassign_cpu_helper(void *hdl, void *info)
|
|
-{
|
|
- struct cpupool *c = (struct cpupool *)info;
|
|
- long ret;
|
|
-
|
|
- ret = cpupool_unassign_cpu_locked_2(c);
|
|
spin_unlock(&cpupool_lock);
|
|
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %ld\n",
|
|
+ cpupool_id, cpu, ret);
|
|
return ret;
|
|
}
|
|
|
|
@@ -242,16 +230,23 @@ static long cpupool_unassign_cpu_helper(
|
|
int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
|
|
{
|
|
int work_cpu;
|
|
- int rc = 0;
|
|
+ int ret;
|
|
struct domain *d;
|
|
+ int cpupool_id = c->cpupool_id;
|
|
|
|
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
|
|
+ cpupool_id, cpu);
|
|
spin_lock(&cpupool_lock);
|
|
- if ( !cpu_isset(cpu, c->cpu_valid) )
|
|
- {
|
|
- spin_unlock(&cpupool_lock);
|
|
- return 0;
|
|
- }
|
|
- if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) )
|
|
+ ret = -EBUSY;
|
|
+ if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
|
|
+ goto out;
|
|
+
|
|
+ ret = 0;
|
|
+ if ( !cpu_isset(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
|
|
+ goto out;
|
|
+
|
|
+ if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) &&
|
|
+ (cpu != cpupool_moving_cpu) )
|
|
{
|
|
for_each_domain(d)
|
|
{
|
|
@@ -259,27 +254,24 @@ int cpupool_unassign_cpu(struct cpupool
|
|
continue;
|
|
if ( !d->is_dying )
|
|
{
|
|
- rc = -EBUSY;
|
|
+ ret = -EBUSY;
|
|
break;
|
|
}
|
|
- printk(XENLOG_DEBUG "moving dying domain %d to pool0\n",
|
|
- d->domain_id);
|
|
c->n_dom--;
|
|
- rc = sched_move_domain(d, cpupool0);
|
|
- if ( rc )
|
|
+ ret = sched_move_domain(d, cpupool0);
|
|
+ if ( ret )
|
|
{
|
|
c->n_dom++;
|
|
break;
|
|
}
|
|
cpupool0->n_dom++;
|
|
}
|
|
- if ( rc )
|
|
- {
|
|
- spin_unlock(&cpupool_lock);
|
|
- return rc;
|
|
- }
|
|
+ if ( ret )
|
|
+ goto out;
|
|
}
|
|
- cpupool_unassign_cpu_locked_1(c, cpu);
|
|
+ cpupool_moving_cpu = cpu;
|
|
+ cpupool_cpu_moving = c;
|
|
+ cpu_clear(cpu, c->cpu_valid);
|
|
work_cpu = smp_processor_id();
|
|
if ( work_cpu == cpu )
|
|
{
|
|
@@ -289,6 +281,12 @@ int cpupool_unassign_cpu(struct cpupool
|
|
}
|
|
return continue_hypercall_on_cpu(work_cpu, NULL,
|
|
cpupool_unassign_cpu_helper, c);
|
|
+
|
|
+out:
|
|
+ spin_unlock(&cpupool_lock);
|
|
+ printk(XENLOG_DEBUG "cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
|
|
+ cpupool_id, cpu, ret);
|
|
+ return ret;
|
|
}
|
|
|
|
/*
|
|
@@ -316,6 +314,7 @@ int cpupool_add_domain(struct domain *d,
|
|
{
|
|
struct cpupool *c;
|
|
int rc = 1;
|
|
+ int n_dom;
|
|
|
|
if ( poolid == CPUPOOLID_NONE )
|
|
return 0;
|
|
@@ -324,12 +323,14 @@ int cpupool_add_domain(struct domain *d,
|
|
if ( (c != NULL) && cpus_weight(c->cpu_valid) )
|
|
{
|
|
c->n_dom++;
|
|
+ n_dom = c->n_dom;
|
|
d->cpupool = c;
|
|
- printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
|
|
- d->domain_id, poolid, c->n_dom);
|
|
rc = 0;
|
|
}
|
|
spin_unlock(&cpupool_lock);
|
|
+ if (!rc)
|
|
+ printk(XENLOG_DEBUG "cpupool_add_domain(dom=%d,pool=%d) n_dom %d\n",
|
|
+ d->domain_id, poolid, n_dom);
|
|
return rc;
|
|
}
|
|
|
|
@@ -338,14 +339,19 @@ int cpupool_add_domain(struct domain *d,
|
|
*/
|
|
void cpupool_rm_domain(struct domain *d)
|
|
{
|
|
+ int cpupool_id;
|
|
+ int n_dom;
|
|
+
|
|
if ( d->cpupool == NULL )
|
|
return;
|
|
spin_lock(&cpupool_lock);
|
|
+ cpupool_id = d->cpupool->cpupool_id;
|
|
d->cpupool->n_dom--;
|
|
- printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
|
|
- d->domain_id, d->cpupool->cpupool_id, d->cpupool->n_dom);
|
|
+ n_dom = d->cpupool->n_dom;
|
|
d->cpupool = NULL;
|
|
spin_unlock(&cpupool_lock);
|
|
+ printk(XENLOG_DEBUG "cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
|
|
+ d->domain_id, cpupool_id, n_dom);
|
|
return;
|
|
}
|
|
|
|
@@ -359,7 +365,7 @@ void cpupool_cpu_add(unsigned int cpu)
|
|
return;
|
|
spin_lock(&cpupool_lock);
|
|
cpu_set(cpu, cpupool_free_cpus);
|
|
- cpupool_assign_cpu_locked(cpupool0, cpu);
|
|
+ (void)cpupool_assign_cpu_locked(cpupool0, cpu);
|
|
spin_unlock(&cpupool_lock);
|
|
return;
|
|
}
|
|
@@ -428,6 +434,8 @@ int cpupool_do_domctl(struct xen_domctl_
|
|
unsigned cpu;
|
|
|
|
cpu = op->cpu;
|
|
+ printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
|
|
+ op->cpupool_id, cpu);
|
|
spin_lock(&cpupool_lock);
|
|
if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
|
|
cpu = first_cpu(cpupool_free_cpus);
|
|
@@ -441,10 +449,11 @@ int cpupool_do_domctl(struct xen_domctl_
|
|
ret = -ENOENT;
|
|
if ( c == NULL )
|
|
goto addcpu_out;
|
|
- cpupool_assign_cpu_locked(c, cpu);
|
|
- ret = 0;
|
|
+ ret = cpupool_assign_cpu_locked(c, cpu);
|
|
addcpu_out:
|
|
spin_unlock(&cpupool_lock);
|
|
+ printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
|
|
+ op->cpupool_id, cpu, ret);
|
|
}
|
|
break;
|
|
|
|
@@ -488,23 +497,23 @@ addcpu_out:
|
|
rcu_unlock_domain(d);
|
|
break;
|
|
}
|
|
+ printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
|
|
+ d->domain_id, op->cpupool_id);
|
|
ret = -ENOENT;
|
|
spin_lock(&cpupool_lock);
|
|
c = cpupool_find_by_id(op->cpupool_id, 1);
|
|
if ( (c != NULL) && cpus_weight(c->cpu_valid) )
|
|
{
|
|
- printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d\n",
|
|
- d->domain_id, c->cpupool_id);
|
|
d->cpupool->n_dom--;
|
|
ret = sched_move_domain(d, c);
|
|
if ( ret )
|
|
d->cpupool->n_dom++;
|
|
else
|
|
c->n_dom++;
|
|
- printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
|
|
- d->domain_id, c->cpupool_id, ret);
|
|
}
|
|
spin_unlock(&cpupool_lock);
|
|
+ printk(XENLOG_DEBUG "cpupool move_domain(dom=%d)->pool=%d ret %d\n",
|
|
+ d->domain_id, op->cpupool_id, ret);
|
|
rcu_unlock_domain(d);
|
|
}
|
|
break;
|
|
Index: xen-4.0.0-testing/xen/common/sched_credit.c
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/xen/common/sched_credit.c
|
|
+++ xen-4.0.0-testing/xen/common/sched_credit.c
|
|
@@ -602,7 +602,7 @@ csched_vcpu_acct(struct csched_private *
|
|
}
|
|
|
|
static void *
|
|
-csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc)
|
|
+csched_alloc_vdata(struct scheduler *ops, struct vcpu *vc, void *dd)
|
|
{
|
|
struct csched_vcpu *svc;
|
|
|
|
@@ -614,7 +614,7 @@ csched_alloc_vdata(struct scheduler *ops
|
|
|
|
INIT_LIST_HEAD(&svc->runq_elem);
|
|
INIT_LIST_HEAD(&svc->active_vcpu_elem);
|
|
- svc->sdom = CSCHED_DOM(vc->domain);
|
|
+ svc->sdom = dd;
|
|
svc->vcpu = vc;
|
|
atomic_set(&svc->credit, 0);
|
|
svc->flags = 0U;
|
|
@@ -778,19 +778,14 @@ csched_dom_cntl(
|
|
return 0;
|
|
}
|
|
|
|
-static int
|
|
-csched_dom_init(struct scheduler *ops, struct domain *dom)
|
|
+static void *
|
|
+csched_alloc_domdata(struct scheduler *ops, struct domain *dom)
|
|
{
|
|
struct csched_dom *sdom;
|
|
|
|
- CSCHED_STAT_CRANK(dom_init);
|
|
-
|
|
- if ( is_idle_domain(dom) )
|
|
- return 0;
|
|
-
|
|
sdom = xmalloc(struct csched_dom);
|
|
if ( sdom == NULL )
|
|
- return -ENOMEM;
|
|
+ return NULL;
|
|
memset(sdom, 0, sizeof(*sdom));
|
|
|
|
/* Initialize credit and weight */
|
|
@@ -800,16 +795,40 @@ csched_dom_init(struct scheduler *ops, s
|
|
sdom->dom = dom;
|
|
sdom->weight = CSCHED_DEFAULT_WEIGHT;
|
|
sdom->cap = 0U;
|
|
+
|
|
+ return (void *)sdom;
|
|
+}
|
|
+
|
|
+static int
|
|
+csched_dom_init(struct scheduler *ops, struct domain *dom)
|
|
+{
|
|
+ struct csched_dom *sdom;
|
|
+
|
|
+ CSCHED_STAT_CRANK(dom_init);
|
|
+
|
|
+ if ( is_idle_domain(dom) )
|
|
+ return 0;
|
|
+
|
|
+ sdom = csched_alloc_domdata(ops, dom);
|
|
+ if ( sdom == NULL )
|
|
+ return -ENOMEM;
|
|
+
|
|
dom->sched_priv = sdom;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void
|
|
+csched_free_domdata(struct scheduler *ops, void *data)
|
|
+{
|
|
+ xfree(data);
|
|
+}
|
|
+
|
|
+static void
|
|
csched_dom_destroy(struct scheduler *ops, struct domain *dom)
|
|
{
|
|
CSCHED_STAT_CRANK(dom_destroy);
|
|
- xfree(CSCHED_DOM(dom));
|
|
+ csched_free_domdata(ops, CSCHED_DOM(dom));
|
|
}
|
|
|
|
/*
|
|
@@ -1147,9 +1166,10 @@ csched_load_balance(struct csched_privat
|
|
int peer_cpu;
|
|
|
|
BUG_ON( cpu != snext->vcpu->processor );
|
|
+ online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
|
|
|
|
/* If this CPU is going offline we shouldn't steal work. */
|
|
- if ( unlikely(!cpu_online(cpu)) )
|
|
+ if ( unlikely(!cpu_isset(cpu, *online)) )
|
|
goto out;
|
|
|
|
if ( snext->pri == CSCHED_PRI_IDLE )
|
|
@@ -1163,7 +1183,6 @@ csched_load_balance(struct csched_privat
|
|
* Peek at non-idling CPUs in the system, starting with our
|
|
* immediate neighbour.
|
|
*/
|
|
- online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
|
|
cpus_andnot(workers, *online, prv->idlers);
|
|
cpu_clear(cpu, workers);
|
|
peer_cpu = cpu;
|
|
@@ -1218,31 +1237,6 @@ csched_schedule(struct scheduler *ops, s
|
|
CSCHED_STAT_CRANK(schedule);
|
|
CSCHED_VCPU_CHECK(current);
|
|
|
|
- if ( unlikely(!cpu_isset(cpu, *CSCHED_CPUONLINE(per_cpu(cpupool, cpu)))) )
|
|
- {
|
|
- /* race with switching cpu between pools: when cpu is leaving the
|
|
- pool try to schedule idle vcpu */
|
|
-
|
|
- struct list_head * iter;
|
|
-
|
|
- snext = scurr;
|
|
- if (is_idle_vcpu(current))
|
|
- goto out;
|
|
-
|
|
- if ( vcpu_runnable(current) )
|
|
- __runq_insert(cpu, scurr);
|
|
-
|
|
- list_for_each(iter, runq)
|
|
- {
|
|
- snext = __runq_elem(iter);
|
|
- if ( snext->pri == CSCHED_PRI_IDLE )
|
|
- break;
|
|
- }
|
|
- BUG_ON( snext->pri != CSCHED_PRI_IDLE );
|
|
- __runq_remove(snext);
|
|
- goto out;
|
|
- }
|
|
-
|
|
/* Update credits */
|
|
if ( !is_idle_vcpu(scurr->vcpu) )
|
|
{
|
|
@@ -1273,7 +1267,6 @@ csched_schedule(struct scheduler *ops, s
|
|
else
|
|
snext = csched_load_balance(prv, cpu, snext);
|
|
|
|
-out:
|
|
/*
|
|
* Update idlers mask if necessary. When we're idling, other CPUs
|
|
* will tickle us when they get extra work.
|
|
@@ -1553,6 +1546,8 @@ struct scheduler sched_credit_def = {
|
|
.free_vdata = csched_free_vdata,
|
|
.alloc_pdata = csched_alloc_pdata,
|
|
.free_pdata = csched_free_pdata,
|
|
+ .alloc_domdata = csched_alloc_domdata,
|
|
+ .free_domdata = csched_free_domdata,
|
|
|
|
.tick_suspend = csched_tick_suspend,
|
|
.tick_resume = csched_tick_resume,
|
|
Index: xen-4.0.0-testing/xen/common/sched_sedf.c
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/xen/common/sched_sedf.c
|
|
+++ xen-4.0.0-testing/xen/common/sched_sedf.c
|
|
@@ -332,7 +332,7 @@ static inline void __add_to_runqueue_sor
|
|
}
|
|
|
|
|
|
-static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v)
|
|
+static void *sedf_alloc_vdata(struct scheduler *ops, struct vcpu *v, void *dd)
|
|
{
|
|
struct sedf_vcpu_info *inf;
|
|
|
|
@@ -415,20 +415,37 @@ static void sedf_destroy_vcpu(struct sch
|
|
sedf_free_vdata(ops, v->sched_priv);
|
|
}
|
|
|
|
+static void *
|
|
+sedf_alloc_domdata(struct scheduler *ops, struct domain *d)
|
|
+{
|
|
+ void *mem;
|
|
+
|
|
+ mem = xmalloc(struct sedf_dom_info);
|
|
+ if ( mem == NULL )
|
|
+ return NULL;
|
|
+
|
|
+ memset(mem, 0, sizeof(struct sedf_dom_info));
|
|
+
|
|
+ return mem;
|
|
+}
|
|
+
|
|
static int sedf_init_domain(struct scheduler *ops, struct domain *d)
|
|
{
|
|
- d->sched_priv = xmalloc(struct sedf_dom_info);
|
|
+ d->sched_priv = sedf_alloc_domdata(ops, d);
|
|
if ( d->sched_priv == NULL )
|
|
return -ENOMEM;
|
|
|
|
- memset(d->sched_priv, 0, sizeof(struct sedf_dom_info));
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
+static void sedf_free_domdata(struct scheduler *ops, void *data)
|
|
+{
|
|
+ xfree(data);
|
|
+}
|
|
+
|
|
static void sedf_destroy_domain(struct scheduler *ops, struct domain *d)
|
|
{
|
|
- xfree(d->sched_priv);
|
|
+ sedf_free_domdata(ops, d->sched_priv);
|
|
}
|
|
|
|
static int sedf_pick_cpu(struct scheduler *ops, struct vcpu *v)
|
|
@@ -1498,6 +1515,8 @@ struct scheduler sched_sedf_def = {
|
|
.free_vdata = sedf_free_vdata,
|
|
.alloc_pdata = sedf_alloc_pdata,
|
|
.free_pdata = sedf_free_pdata,
|
|
+ .alloc_domdata = sedf_alloc_domdata,
|
|
+ .free_domdata = sedf_free_domdata,
|
|
|
|
.do_schedule = sedf_do_schedule,
|
|
.pick_cpu = sedf_pick_cpu,
|
|
Index: xen-4.0.0-testing/xen/common/schedule.c
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/xen/common/schedule.c
|
|
+++ xen-4.0.0-testing/xen/common/schedule.c
|
|
@@ -222,7 +222,7 @@ int sched_init_vcpu(struct vcpu *v, unsi
|
|
return 1;
|
|
}
|
|
|
|
- v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v);
|
|
+ v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
|
|
if ( v->sched_priv == NULL )
|
|
return 1;
|
|
|
|
@@ -237,14 +237,23 @@ int sched_move_domain(struct domain *d,
|
|
struct vcpu *v;
|
|
unsigned int new_p;
|
|
void **vcpu_priv;
|
|
+ void *domdata;
|
|
+
|
|
+ domdata = SCHED_OP(&(c->sched), alloc_domdata, d);
|
|
+ if ( domdata == NULL )
|
|
+ return -ENOMEM;
|
|
|
|
vcpu_priv = xmalloc_array(void *, d->max_vcpus);
|
|
if ( vcpu_priv == NULL )
|
|
+ {
|
|
+ SCHED_OP(&(c->sched), free_domdata, domdata);
|
|
return -ENOMEM;
|
|
+ }
|
|
+
|
|
memset(vcpu_priv, 0, d->max_vcpus * sizeof(void *));
|
|
for_each_vcpu ( d, v )
|
|
{
|
|
- vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v);
|
|
+ vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v, domdata);
|
|
if ( vcpu_priv[v->vcpu_id] == NULL )
|
|
{
|
|
for_each_vcpu ( d, v )
|
|
@@ -253,6 +262,7 @@ int sched_move_domain(struct domain *d,
|
|
xfree(vcpu_priv[v->vcpu_id]);
|
|
}
|
|
xfree(vcpu_priv);
|
|
+ SCHED_OP(&(c->sched), free_domdata, domdata);
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
@@ -276,6 +286,8 @@ int sched_move_domain(struct domain *d,
|
|
}
|
|
|
|
d->cpupool = c;
|
|
+ SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
|
|
+ d->sched_priv = domdata;
|
|
|
|
domain_unpause(d);
|
|
|
|
@@ -1079,7 +1091,7 @@ void schedule_cpu_switch(unsigned int cp
|
|
v = per_cpu(schedule_data, cpu).idle;
|
|
ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
|
|
if ( c != NULL )
|
|
- vpriv = SCHED_OP(new_ops, alloc_vdata, v);
|
|
+ vpriv = SCHED_OP(new_ops, alloc_vdata, v, v->domain->sched_priv);
|
|
|
|
spin_lock_irqsave(&per_cpu(schedule_data, cpu).schedule_lock, flags);
|
|
|
|
Index: xen-4.0.0-testing/xen/include/xen/sched-if.h
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/xen/include/xen/sched-if.h
|
|
+++ xen-4.0.0-testing/xen/include/xen/sched-if.h
|
|
@@ -78,9 +78,12 @@ struct scheduler {
|
|
void (*deinit) (struct scheduler *);
|
|
|
|
void (*free_vdata) (struct scheduler *, void *);
|
|
- void * (*alloc_vdata) (struct scheduler *, struct vcpu *);
|
|
+ void * (*alloc_vdata) (struct scheduler *, struct vcpu *,
|
|
+ void *);
|
|
void (*free_pdata) (struct scheduler *, void *, int);
|
|
void * (*alloc_pdata) (struct scheduler *, int);
|
|
+ void (*free_domdata) (struct scheduler *, void *);
|
|
+ void * (*alloc_domdata) (struct scheduler *, struct domain *);
|
|
|
|
int (*init_domain) (struct scheduler *, struct domain *);
|
|
void (*destroy_domain) (struct scheduler *, struct domain *);
|
|
@@ -109,7 +112,6 @@ struct cpupool
|
|
cpumask_t cpu_valid; /* all cpus assigned to pool */
|
|
struct cpupool *next;
|
|
unsigned int n_dom;
|
|
- int cpu_in_transit; /* used for adding/removing cpus */
|
|
struct scheduler sched;
|
|
};
|
|
|