b244ce9e91
sles10. Patch pygrub to get the kernel and initrd from the image. pygrub-boot-legacy-sles.patch - bnc#842515 - VUL-0: CVE-2013-4375: XSA-71: xen: qemu disk backend (qdisk) resource leak CVE-2013-4375-xsa71.patch - Upstream patches from Jan 52496bea-x86-properly-handle-hvm_copy_from_guest_-phys-virt-errors.patch (Replaces CVE-2013-4355-xsa63.patch) 52496c11-x86-mm-shadow-Fix-initialization-of-PV-shadow-L4-tables.patch (Replaces CVE-2013-4356-xsa64.patch) 52496c32-x86-properly-set-up-fbld-emulation-operand-address.patch (Replaces CVE-2013-4361-xsa66.patch) 52497c6c-x86-don-t-blindly-create-L3-tables-for-the-direct-map.patch 524e971b-x86-idle-Fix-get_cpu_idle_time-s-interaction-with-offline-pcpus.patch 524e9762-x86-percpu-Force-INVALID_PERCPU_AREA-to-non-canonical.patch 524e983e-Nested-VMX-check-VMX-capability-before-read-VMX-related-MSRs.patch 524e98b1-Nested-VMX-fix-IA32_VMX_CR4_FIXED1-msr-emulation.patch 524e9dc0-xsm-forbid-PV-guest-console-reads.patch 5256a979-x86-check-segment-descriptor-read-result-in-64-bit-OUTS-emulation.patch 5256be57-libxl-fix-vif-rate-parsing.patch 5256be84-tools-ocaml-fix-erroneous-free-of-cpumap-in-stub_xc_vcpu_getaffinity.patch 5256be92-libxl-fix-out-of-memory-error-handling-in-libxl_list_cpupool.patch 5257a89a-x86-correct-LDT-checks.patch 5257a8e7-x86-add-address-validity-check-to-guest_map_l1e.patch 5257a944-x86-check-for-canonical-address-before-doing-page-walks.patch 525b95f4-scheduler-adjust-internal-locking-interface.patch 525b9617-sched-fix-race-between-sched_move_domain-and-vcpu_wake.patch 525e69e8-credit-unpause-parked-vcpu-before-destroying-it.patch 525faf5e-x86-print-relevant-tail-part-of-filename-for-warnings-and-crashes.patch - bnc#840196 - L3: MTU size on Dom0 gets reset when booting DomU OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=276
633 lines
21 KiB
Diff
633 lines
21 KiB
Diff
# Commit eedd60391610629b4e8a2e8278b857ff884f750d
|
|
# Date 2013-10-14 08:57:56 +0200
|
|
# Author Jan Beulich <jbeulich@suse.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
scheduler: adjust internal locking interface
|
|
|
|
Make the locking functions return the lock pointers, so they can be
|
|
passed to the unlocking functions (which in turn can check that the
|
|
lock is still actually providing the intended protection, i.e. the
|
|
parameters determining which lock is the right one didn't change).
|
|
|
|
Further use proper spin lock primitives rather than open coded
|
|
local_irq_...() constructs, so that interrupts can be re-enabled as
|
|
appropriate while spinning.
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
Acked-by: Keir Fraser <keir@xen.org>
|
|
|
|
--- a/xen/common/sched_credit.c
|
|
+++ b/xen/common/sched_credit.c
|
|
@@ -1170,6 +1170,7 @@ csched_runq_sort(struct csched_private *
|
|
struct csched_pcpu * const spc = CSCHED_PCPU(cpu);
|
|
struct list_head *runq, *elem, *next, *last_under;
|
|
struct csched_vcpu *svc_elem;
|
|
+ spinlock_t *lock;
|
|
unsigned long flags;
|
|
int sort_epoch;
|
|
|
|
@@ -1179,7 +1180,7 @@ csched_runq_sort(struct csched_private *
|
|
|
|
spc->runq_sort_last = sort_epoch;
|
|
|
|
- pcpu_schedule_lock_irqsave(cpu, flags);
|
|
+ lock = pcpu_schedule_lock_irqsave(cpu, &flags);
|
|
|
|
runq = &spc->runq;
|
|
elem = runq->next;
|
|
@@ -1204,7 +1205,7 @@ csched_runq_sort(struct csched_private *
|
|
elem = next;
|
|
}
|
|
|
|
- pcpu_schedule_unlock_irqrestore(cpu, flags);
|
|
+ pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
|
|
}
|
|
|
|
static void
|
|
@@ -1568,7 +1569,9 @@ csched_load_balance(struct csched_privat
|
|
* could cause a deadlock if the peer CPU is also load
|
|
* balancing and trying to lock this CPU.
|
|
*/
|
|
- if ( !pcpu_schedule_trylock(peer_cpu) )
|
|
+ spinlock_t *lock = pcpu_schedule_trylock(peer_cpu);
|
|
+
|
|
+ if ( !lock )
|
|
{
|
|
SCHED_STAT_CRANK(steal_trylock_failed);
|
|
peer_cpu = cpumask_cycle(peer_cpu, &workers);
|
|
@@ -1578,7 +1581,7 @@ csched_load_balance(struct csched_privat
|
|
/* Any work over there to steal? */
|
|
speer = cpumask_test_cpu(peer_cpu, online) ?
|
|
csched_runq_steal(peer_cpu, cpu, snext->pri, bstep) : NULL;
|
|
- pcpu_schedule_unlock(peer_cpu);
|
|
+ pcpu_schedule_unlock(lock, peer_cpu);
|
|
|
|
/* As soon as one vcpu is found, balancing ends */
|
|
if ( speer != NULL )
|
|
--- a/xen/common/sched_credit2.c
|
|
+++ b/xen/common/sched_credit2.c
|
|
@@ -881,15 +881,17 @@ csched_vcpu_insert(const struct schedule
|
|
*/
|
|
if ( ! is_idle_vcpu(vc) )
|
|
{
|
|
+ spinlock_t *lock;
|
|
+
|
|
/* FIXME: Do we need the private lock here? */
|
|
list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
|
|
|
|
/* Add vcpu to runqueue of initial processor */
|
|
- vcpu_schedule_lock_irq(vc);
|
|
+ lock = vcpu_schedule_lock_irq(vc);
|
|
|
|
runq_assign(ops, vc);
|
|
|
|
- vcpu_schedule_unlock_irq(vc);
|
|
+ vcpu_schedule_unlock_irq(lock, vc);
|
|
|
|
sdom->nr_vcpus++;
|
|
}
|
|
@@ -916,14 +918,16 @@ csched_vcpu_remove(const struct schedule
|
|
|
|
if ( ! is_idle_vcpu(vc) )
|
|
{
|
|
+ spinlock_t *lock;
|
|
+
|
|
SCHED_STAT_CRANK(vcpu_destroy);
|
|
|
|
/* Remove from runqueue */
|
|
- vcpu_schedule_lock_irq(vc);
|
|
+ lock = vcpu_schedule_lock_irq(vc);
|
|
|
|
runq_deassign(ops, vc);
|
|
|
|
- vcpu_schedule_unlock_irq(vc);
|
|
+ vcpu_schedule_unlock_irq(lock, vc);
|
|
|
|
/* Remove from sdom list. Don't need a lock for this, as it's called
|
|
* syncronously when nothing else can happen. */
|
|
@@ -1010,8 +1014,7 @@ csched_context_saved(const struct schedu
|
|
{
|
|
struct csched_vcpu * const svc = CSCHED_VCPU(vc);
|
|
s_time_t now = NOW();
|
|
-
|
|
- vcpu_schedule_lock_irq(vc);
|
|
+ spinlock_t *lock = vcpu_schedule_lock_irq(vc);
|
|
|
|
BUG_ON( !is_idle_vcpu(vc) && svc->rqd != RQD(ops, vc->processor));
|
|
|
|
@@ -1037,7 +1040,7 @@ csched_context_saved(const struct schedu
|
|
else if ( !is_idle_vcpu(vc) )
|
|
update_load(ops, svc->rqd, svc, -1, now);
|
|
|
|
- vcpu_schedule_unlock_irq(vc);
|
|
+ vcpu_schedule_unlock_irq(lock, vc);
|
|
}
|
|
|
|
#define MAX_LOAD (1ULL<<60);
|
|
@@ -1454,14 +1457,14 @@ csched_dom_cntl(
|
|
* must never lock csched_priv.lock if we're holding a runqueue lock.
|
|
* Also, calling vcpu_schedule_lock() is enough, since IRQs have already
|
|
* been disabled. */
|
|
- vcpu_schedule_lock(svc->vcpu);
|
|
+ spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
|
|
|
|
BUG_ON(svc->rqd != RQD(ops, svc->vcpu->processor));
|
|
|
|
svc->weight = sdom->weight;
|
|
update_max_weight(svc->rqd, svc->weight, old_weight);
|
|
|
|
- vcpu_schedule_unlock(svc->vcpu);
|
|
+ vcpu_schedule_unlock(lock, svc->vcpu);
|
|
}
|
|
}
|
|
}
|
|
@@ -1991,6 +1994,7 @@ static void init_pcpu(const struct sched
|
|
cpumask_set_cpu(cpu, &rqd->idle);
|
|
cpumask_set_cpu(cpu, &rqd->active);
|
|
|
|
+ /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
|
|
spin_unlock(old_lock);
|
|
|
|
cpumask_set_cpu(cpu, &prv->initialized);
|
|
--- a/xen/common/sched_sedf.c
|
|
+++ b/xen/common/sched_sedf.c
|
|
@@ -1350,14 +1350,16 @@ static int sedf_adjust_weights(struct cp
|
|
if ( EDOM_INFO(p)->weight )
|
|
{
|
|
/* Interrupts already off */
|
|
- vcpu_schedule_lock(p);
|
|
+ spinlock_t *lock = vcpu_schedule_lock(p);
|
|
+
|
|
EDOM_INFO(p)->period_orig =
|
|
EDOM_INFO(p)->period = WEIGHT_PERIOD;
|
|
EDOM_INFO(p)->slice_orig =
|
|
EDOM_INFO(p)->slice =
|
|
(EDOM_INFO(p)->weight *
|
|
(WEIGHT_PERIOD - WEIGHT_SAFETY - sumt[cpu])) / sumw[cpu];
|
|
- vcpu_schedule_unlock(p);
|
|
+
|
|
+ vcpu_schedule_unlock(lock, p);
|
|
}
|
|
}
|
|
}
|
|
@@ -1418,21 +1420,24 @@ static int sedf_adjust(const struct sche
|
|
{
|
|
/* (Here and everywhere in the following) IRQs are already off,
|
|
* hence vcpu_spin_lock() is the one. */
|
|
- vcpu_schedule_lock(v);
|
|
+ spinlock_t *lock = vcpu_schedule_lock(v);
|
|
+
|
|
EDOM_INFO(v)->extraweight = op->u.sedf.weight;
|
|
EDOM_INFO(v)->weight = 0;
|
|
EDOM_INFO(v)->slice = 0;
|
|
EDOM_INFO(v)->period = WEIGHT_PERIOD;
|
|
- vcpu_schedule_unlock(v);
|
|
+ vcpu_schedule_unlock(lock, v);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* Weight-driven domains with real-time execution */
|
|
- for_each_vcpu ( p, v ) {
|
|
- vcpu_schedule_lock(v);
|
|
+ for_each_vcpu ( p, v )
|
|
+ {
|
|
+ spinlock_t *lock = vcpu_schedule_lock(v);
|
|
+
|
|
EDOM_INFO(v)->weight = op->u.sedf.weight;
|
|
- vcpu_schedule_unlock(v);
|
|
+ vcpu_schedule_unlock(lock, v);
|
|
}
|
|
}
|
|
}
|
|
@@ -1454,14 +1459,15 @@ static int sedf_adjust(const struct sche
|
|
/* Time-driven domains */
|
|
for_each_vcpu ( p, v )
|
|
{
|
|
- vcpu_schedule_lock(v);
|
|
+ spinlock_t *lock = vcpu_schedule_lock(v);
|
|
+
|
|
EDOM_INFO(v)->weight = 0;
|
|
EDOM_INFO(v)->extraweight = 0;
|
|
EDOM_INFO(v)->period_orig =
|
|
EDOM_INFO(v)->period = op->u.sedf.period;
|
|
EDOM_INFO(v)->slice_orig =
|
|
EDOM_INFO(v)->slice = op->u.sedf.slice;
|
|
- vcpu_schedule_unlock(v);
|
|
+ vcpu_schedule_unlock(lock, v);
|
|
}
|
|
}
|
|
|
|
@@ -1471,13 +1477,14 @@ static int sedf_adjust(const struct sche
|
|
|
|
for_each_vcpu ( p, v )
|
|
{
|
|
- vcpu_schedule_lock(v);
|
|
+ spinlock_t *lock = vcpu_schedule_lock(v);
|
|
+
|
|
EDOM_INFO(v)->status =
|
|
(EDOM_INFO(v)->status &
|
|
~EXTRA_AWARE) | (op->u.sedf.extratime & EXTRA_AWARE);
|
|
EDOM_INFO(v)->latency = op->u.sedf.latency;
|
|
extraq_check(v);
|
|
- vcpu_schedule_unlock(v);
|
|
+ vcpu_schedule_unlock(lock, v);
|
|
}
|
|
}
|
|
else if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
|
|
--- a/xen/common/schedule.c
|
|
+++ b/xen/common/schedule.c
|
|
@@ -160,18 +160,16 @@ static inline void vcpu_runstate_change(
|
|
|
|
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
|
|
{
|
|
+ spinlock_t *lock = likely(v == current) ? NULL : vcpu_schedule_lock_irq(v);
|
|
s_time_t delta;
|
|
|
|
- if ( unlikely(v != current) )
|
|
- vcpu_schedule_lock_irq(v);
|
|
-
|
|
memcpy(runstate, &v->runstate, sizeof(*runstate));
|
|
delta = NOW() - runstate->state_entry_time;
|
|
if ( delta > 0 )
|
|
runstate->time[runstate->state] += delta;
|
|
|
|
- if ( unlikely(v != current) )
|
|
- vcpu_schedule_unlock_irq(v);
|
|
+ if ( unlikely(lock != NULL) )
|
|
+ vcpu_schedule_unlock_irq(lock, v);
|
|
}
|
|
|
|
uint64_t get_cpu_idle_time(unsigned int cpu)
|
|
@@ -333,8 +331,7 @@ void sched_destroy_domain(struct domain
|
|
void vcpu_sleep_nosync(struct vcpu *v)
|
|
{
|
|
unsigned long flags;
|
|
-
|
|
- vcpu_schedule_lock_irqsave(v, flags);
|
|
+ spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
|
|
|
|
if ( likely(!vcpu_runnable(v)) )
|
|
{
|
|
@@ -344,7 +341,7 @@ void vcpu_sleep_nosync(struct vcpu *v)
|
|
SCHED_OP(VCPU2OP(v), sleep, v);
|
|
}
|
|
|
|
- vcpu_schedule_unlock_irqrestore(v, flags);
|
|
+ vcpu_schedule_unlock_irqrestore(lock, flags, v);
|
|
|
|
TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
|
|
}
|
|
@@ -362,8 +359,7 @@ void vcpu_sleep_sync(struct vcpu *v)
|
|
void vcpu_wake(struct vcpu *v)
|
|
{
|
|
unsigned long flags;
|
|
-
|
|
- vcpu_schedule_lock_irqsave(v, flags);
|
|
+ spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
|
|
|
|
if ( likely(vcpu_runnable(v)) )
|
|
{
|
|
@@ -377,7 +373,7 @@ void vcpu_wake(struct vcpu *v)
|
|
vcpu_runstate_change(v, RUNSTATE_offline, NOW());
|
|
}
|
|
|
|
- vcpu_schedule_unlock_irqrestore(v, flags);
|
|
+ vcpu_schedule_unlock_irqrestore(lock, flags, v);
|
|
|
|
TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
|
|
}
|
|
@@ -528,10 +524,11 @@ static void vcpu_migrate(struct vcpu *v)
|
|
*/
|
|
void vcpu_force_reschedule(struct vcpu *v)
|
|
{
|
|
- vcpu_schedule_lock_irq(v);
|
|
+ spinlock_t *lock = vcpu_schedule_lock_irq(v);
|
|
+
|
|
if ( v->is_running )
|
|
set_bit(_VPF_migrating, &v->pause_flags);
|
|
- vcpu_schedule_unlock_irq(v);
|
|
+ vcpu_schedule_unlock_irq(lock, v);
|
|
|
|
if ( test_bit(_VPF_migrating, &v->pause_flags) )
|
|
{
|
|
@@ -546,7 +543,7 @@ void restore_vcpu_affinity(struct domain
|
|
|
|
for_each_vcpu ( d, v )
|
|
{
|
|
- vcpu_schedule_lock_irq(v);
|
|
+ spinlock_t *lock = vcpu_schedule_lock_irq(v);
|
|
|
|
if ( v->affinity_broken )
|
|
{
|
|
@@ -559,13 +556,13 @@ void restore_vcpu_affinity(struct domain
|
|
if ( v->processor == smp_processor_id() )
|
|
{
|
|
set_bit(_VPF_migrating, &v->pause_flags);
|
|
- vcpu_schedule_unlock_irq(v);
|
|
+ vcpu_schedule_unlock_irq(lock, v);
|
|
vcpu_sleep_nosync(v);
|
|
vcpu_migrate(v);
|
|
}
|
|
else
|
|
{
|
|
- vcpu_schedule_unlock_irq(v);
|
|
+ vcpu_schedule_unlock_irq(lock, v);
|
|
}
|
|
}
|
|
|
|
@@ -592,7 +589,7 @@ int cpu_disable_scheduler(unsigned int c
|
|
{
|
|
for_each_vcpu ( d, v )
|
|
{
|
|
- vcpu_schedule_lock_irq(v);
|
|
+ spinlock_t *lock = vcpu_schedule_lock_irq(v);
|
|
|
|
cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
|
|
if ( cpumask_empty(&online_affinity) &&
|
|
@@ -613,13 +610,13 @@ int cpu_disable_scheduler(unsigned int c
|
|
if ( v->processor == cpu )
|
|
{
|
|
set_bit(_VPF_migrating, &v->pause_flags);
|
|
- vcpu_schedule_unlock_irq(v);
|
|
+ vcpu_schedule_unlock_irq(lock, v);
|
|
vcpu_sleep_nosync(v);
|
|
vcpu_migrate(v);
|
|
}
|
|
else
|
|
{
|
|
- vcpu_schedule_unlock_irq(v);
|
|
+ vcpu_schedule_unlock_irq(lock, v);
|
|
}
|
|
|
|
/*
|
|
@@ -646,6 +643,7 @@ int vcpu_set_affinity(struct vcpu *v, co
|
|
{
|
|
cpumask_t online_affinity;
|
|
cpumask_t *online;
|
|
+ spinlock_t *lock;
|
|
|
|
if ( v->domain->is_pinned )
|
|
return -EINVAL;
|
|
@@ -654,7 +652,7 @@ int vcpu_set_affinity(struct vcpu *v, co
|
|
if ( cpumask_empty(&online_affinity) )
|
|
return -EINVAL;
|
|
|
|
- vcpu_schedule_lock_irq(v);
|
|
+ lock = vcpu_schedule_lock_irq(v);
|
|
|
|
cpumask_copy(v->cpu_affinity, affinity);
|
|
|
|
@@ -662,7 +660,7 @@ int vcpu_set_affinity(struct vcpu *v, co
|
|
* when changing the affinity */
|
|
set_bit(_VPF_migrating, &v->pause_flags);
|
|
|
|
- vcpu_schedule_unlock_irq(v);
|
|
+ vcpu_schedule_unlock_irq(lock, v);
|
|
|
|
domain_update_node_affinity(v->domain);
|
|
|
|
@@ -776,10 +774,10 @@ static long do_poll(struct sched_poll *s
|
|
static long do_yield(void)
|
|
{
|
|
struct vcpu * v=current;
|
|
+ spinlock_t *lock = vcpu_schedule_lock_irq(v);
|
|
|
|
- vcpu_schedule_lock_irq(v);
|
|
SCHED_OP(VCPU2OP(v), yield, v);
|
|
- vcpu_schedule_unlock_irq(v);
|
|
+ vcpu_schedule_unlock_irq(lock, v);
|
|
|
|
TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
|
|
raise_softirq(SCHEDULE_SOFTIRQ);
|
|
@@ -1140,6 +1138,7 @@ static void schedule(void)
|
|
unsigned long *tasklet_work = &this_cpu(tasklet_work_to_do);
|
|
bool_t tasklet_work_scheduled = 0;
|
|
struct schedule_data *sd;
|
|
+ spinlock_t *lock;
|
|
struct task_slice next_slice;
|
|
int cpu = smp_processor_id();
|
|
|
|
@@ -1166,7 +1165,7 @@ static void schedule(void)
|
|
BUG();
|
|
}
|
|
|
|
- pcpu_schedule_lock_irq(cpu);
|
|
+ lock = pcpu_schedule_lock_irq(cpu);
|
|
|
|
stop_timer(&sd->s_timer);
|
|
|
|
@@ -1183,7 +1182,7 @@ static void schedule(void)
|
|
|
|
if ( unlikely(prev == next) )
|
|
{
|
|
- pcpu_schedule_unlock_irq(cpu);
|
|
+ pcpu_schedule_unlock_irq(lock, cpu);
|
|
trace_continue_running(next);
|
|
return continue_running(prev);
|
|
}
|
|
@@ -1221,7 +1220,7 @@ static void schedule(void)
|
|
ASSERT(!next->is_running);
|
|
next->is_running = 1;
|
|
|
|
- pcpu_schedule_unlock_irq(cpu);
|
|
+ pcpu_schedule_unlock_irq(lock, cpu);
|
|
|
|
SCHED_STAT_CRANK(sched_ctx);
|
|
|
|
@@ -1408,6 +1407,7 @@ int schedule_cpu_switch(unsigned int cpu
|
|
{
|
|
unsigned long flags;
|
|
struct vcpu *idle;
|
|
+ spinlock_t *lock;
|
|
void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
|
|
struct scheduler *old_ops = per_cpu(scheduler, cpu);
|
|
struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
|
|
@@ -1426,7 +1426,7 @@ int schedule_cpu_switch(unsigned int cpu
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- pcpu_schedule_lock_irqsave(cpu, flags);
|
|
+ lock = pcpu_schedule_lock_irqsave(cpu, &flags);
|
|
|
|
SCHED_OP(old_ops, tick_suspend, cpu);
|
|
vpriv_old = idle->sched_priv;
|
|
@@ -1437,7 +1437,7 @@ int schedule_cpu_switch(unsigned int cpu
|
|
SCHED_OP(new_ops, tick_resume, cpu);
|
|
SCHED_OP(new_ops, insert_vcpu, idle);
|
|
|
|
- pcpu_schedule_unlock_irqrestore(cpu, flags);
|
|
+ pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
|
|
|
|
SCHED_OP(old_ops, free_vdata, vpriv_old);
|
|
SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
|
|
@@ -1495,10 +1495,11 @@ void schedule_dump(struct cpupool *c)
|
|
|
|
for_each_cpu (i, cpus)
|
|
{
|
|
- pcpu_schedule_lock(i);
|
|
+ spinlock_t *lock = pcpu_schedule_lock(i);
|
|
+
|
|
printk("CPU[%02d] ", i);
|
|
SCHED_OP(sched, dump_cpu_state, i);
|
|
- pcpu_schedule_unlock(i);
|
|
+ pcpu_schedule_unlock(lock, i);
|
|
}
|
|
}
|
|
|
|
--- a/xen/include/xen/sched-if.h
|
|
+++ b/xen/include/xen/sched-if.h
|
|
@@ -47,96 +47,70 @@ DECLARE_PER_CPU(struct schedule_data, sc
|
|
DECLARE_PER_CPU(struct scheduler *, scheduler);
|
|
DECLARE_PER_CPU(struct cpupool *, cpupool);
|
|
|
|
-static inline spinlock_t * pcpu_schedule_lock(int cpu)
|
|
-{
|
|
- spinlock_t * lock=NULL;
|
|
-
|
|
- for ( ; ; )
|
|
- {
|
|
- /* The per_cpu(v->processor) may also change, if changing
|
|
- * cpu pool also changes the scheduler lock. Retry
|
|
- * until they match.
|
|
- */
|
|
- lock=per_cpu(schedule_data, cpu).schedule_lock;
|
|
-
|
|
- spin_lock(lock);
|
|
- if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) )
|
|
- break;
|
|
- spin_unlock(lock);
|
|
- }
|
|
- return lock;
|
|
+#define sched_lock(kind, param, cpu, irq, arg...) \
|
|
+static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
|
|
+{ \
|
|
+ for ( ; ; ) \
|
|
+ { \
|
|
+ spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
|
|
+ /* \
|
|
+ * v->processor may change when grabbing the lock; but \
|
|
+ * per_cpu(v->processor) may also change, if changing cpu pool \
|
|
+ * also changes the scheduler lock. Retry until they match. \
|
|
+ * \
|
|
+ * It may also be the case that v->processor may change but the \
|
|
+ * lock may be the same; this will succeed in that case. \
|
|
+ */ \
|
|
+ spin_lock##irq(lock, ## arg); \
|
|
+ if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
|
|
+ return lock; \
|
|
+ spin_unlock##irq(lock, ## arg); \
|
|
+ } \
|
|
}
|
|
|
|
-static inline int pcpu_schedule_trylock(int cpu)
|
|
-{
|
|
- spinlock_t * lock=NULL;
|
|
-
|
|
- lock=per_cpu(schedule_data, cpu).schedule_lock;
|
|
- if ( ! spin_trylock(lock) )
|
|
- return 0;
|
|
- if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
|
|
- return 1;
|
|
- else
|
|
- {
|
|
- spin_unlock(lock);
|
|
- return 0;
|
|
- }
|
|
+#define sched_unlock(kind, param, cpu, irq, arg...) \
|
|
+static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
|
|
+ EXTRA_TYPE(arg), param) \
|
|
+{ \
|
|
+ ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
|
|
+ spin_unlock##irq(lock, ## arg); \
|
|
}
|
|
|
|
-#define pcpu_schedule_lock_irq(p) \
|
|
- do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 )
|
|
-#define pcpu_schedule_lock_irqsave(p, flags) \
|
|
- do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 )
|
|
+#define EXTRA_TYPE(arg)
|
|
+sched_lock(pcpu, unsigned int cpu, cpu, )
|
|
+sched_lock(vcpu, const struct vcpu *v, v->processor, )
|
|
+sched_lock(pcpu, unsigned int cpu, cpu, _irq)
|
|
+sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
|
|
+sched_unlock(pcpu, unsigned int cpu, cpu, )
|
|
+sched_unlock(vcpu, const struct vcpu *v, v->processor, )
|
|
+sched_unlock(pcpu, unsigned int cpu, cpu, _irq)
|
|
+sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
|
|
+#undef EXTRA_TYPE
|
|
+
|
|
+#define EXTRA_TYPE(arg) , unsigned long arg
|
|
+#define spin_unlock_irqsave spin_unlock_irqrestore
|
|
+sched_lock(pcpu, unsigned int cpu, cpu, _irqsave, *flags)
|
|
+sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
|
|
+#undef spin_unlock_irqsave
|
|
+sched_unlock(pcpu, unsigned int cpu, cpu, _irqrestore, flags)
|
|
+sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
|
|
+#undef EXTRA_TYPE
|
|
+
|
|
+#undef sched_unlock
|
|
+#undef sched_lock
|
|
|
|
-static inline void pcpu_schedule_unlock(int cpu)
|
|
+static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
|
|
{
|
|
- spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
|
|
-}
|
|
+ spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
|
|
|
|
-#define pcpu_schedule_unlock_irq(p) \
|
|
- do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 )
|
|
-#define pcpu_schedule_unlock_irqrestore(p, flags) \
|
|
- do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 )
|
|
-
|
|
-static inline void vcpu_schedule_lock(struct vcpu *v)
|
|
-{
|
|
- spinlock_t * lock;
|
|
-
|
|
- for ( ; ; )
|
|
- {
|
|
- /* v->processor may change when grabbing the lock; but
|
|
- * per_cpu(v->processor) may also change, if changing
|
|
- * cpu pool also changes the scheduler lock. Retry
|
|
- * until they match.
|
|
- *
|
|
- * It may also be the case that v->processor may change
|
|
- * but the lock may be the same; this will succeed
|
|
- * in that case.
|
|
- */
|
|
- lock=per_cpu(schedule_data, v->processor).schedule_lock;
|
|
-
|
|
- spin_lock(lock);
|
|
- if ( likely(lock == per_cpu(schedule_data, v->processor).schedule_lock) )
|
|
- break;
|
|
- spin_unlock(lock);
|
|
- }
|
|
-}
|
|
-
|
|
-#define vcpu_schedule_lock_irq(v) \
|
|
- do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 )
|
|
-#define vcpu_schedule_lock_irqsave(v, flags) \
|
|
- do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 )
|
|
-
|
|
-static inline void vcpu_schedule_unlock(struct vcpu *v)
|
|
-{
|
|
- spin_unlock(per_cpu(schedule_data, v->processor).schedule_lock);
|
|
+ if ( !spin_trylock(lock) )
|
|
+ return NULL;
|
|
+ if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
|
|
+ return lock;
|
|
+ spin_unlock(lock);
|
|
+ return NULL;
|
|
}
|
|
|
|
-#define vcpu_schedule_unlock_irq(v) \
|
|
- do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 )
|
|
-#define vcpu_schedule_unlock_irqrestore(v, flags) \
|
|
- do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 )
|
|
-
|
|
struct task_slice {
|
|
struct vcpu *task;
|
|
s_time_t time;
|