64 lines
2.2 KiB
Diff
64 lines
2.2 KiB
Diff
|
# Commit ef55257bc81204e34691f1c2aa9e01f2d0768bdd
|
||
|
# Date 2013-10-14 08:58:31 +0200
|
||
|
# Author David Vrabel <david.vrabel@citrix.com>
|
||
|
# Committer Jan Beulich <jbeulich@suse.com>
|
||
|
sched: fix race between sched_move_domain() and vcpu_wake()
|
||
|
|
||
|
From: David Vrabel <david.vrabel@citrix.com>
|
||
|
|
||
|
sched_move_domain() changes v->processor for all the domain's VCPUs.
|
||
|
If another domain, softirq etc. triggers a simultaneous call to
|
||
|
vcpu_wake() (e.g., by setting an event channel as pending), then
|
||
|
vcpu_wake() may lock one schedule lock and try to unlock another.
|
||
|
|
||
|
vcpu_schedule_lock() attempts to handle this but only does so for the
|
||
|
window between reading the schedule_lock from the per-CPU data and the
|
||
|
spin_lock() call. This does not help with sched_move_domain()
|
||
|
changing v->processor between the calls to vcpu_schedule_lock() and
|
||
|
vcpu_schedule_unlock().
|
||
|
|
||
|
Fix the race by taking the schedule_lock for v->processor in
|
||
|
sched_move_domain().
|
||
|
|
||
|
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
|
||
|
Acked-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
|
||
|
|
||
|
Use vcpu_schedule_lock_irq() (which now returns the lock) to properly
|
||
|
retry the locking should the to be used lock have changed in the course
|
||
|
of acquiring it (issue pointed out by George Dunlap).
|
||
|
|
||
|
Add a comment explaining the state after the v->processor adjustment.
|
||
|
|
||
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||
|
Acked-by: Keir Fraser <keir@xen.org>
|
||
|
|
||
|
--- a/xen/common/schedule.c
|
||
|
+++ b/xen/common/schedule.c
|
||
|
@@ -276,6 +276,8 @@ int sched_move_domain(struct domain *d,
|
||
|
new_p = cpumask_first(c->cpu_valid);
|
||
|
for_each_vcpu ( d, v )
|
||
|
{
|
||
|
+ spinlock_t *lock;
|
||
|
+
|
||
|
vcpudata = v->sched_priv;
|
||
|
|
||
|
migrate_timer(&v->periodic_timer, new_p);
|
||
|
@@ -283,7 +285,16 @@ int sched_move_domain(struct domain *d,
|
||
|
migrate_timer(&v->poll_timer, new_p);
|
||
|
|
||
|
cpumask_setall(v->cpu_affinity);
|
||
|
+
|
||
|
+ lock = vcpu_schedule_lock_irq(v);
|
||
|
v->processor = new_p;
|
||
|
+ /*
|
||
|
+ * With v->processor modified we must not
|
||
|
+ * - make any further changes assuming we hold the scheduler lock,
|
||
|
+ * - use vcpu_schedule_unlock_irq().
|
||
|
+ */
|
||
|
+ spin_unlock_irq(lock);
|
||
|
+
|
||
|
v->sched_priv = vcpu_priv[v->vcpu_id];
|
||
|
evtchn_move_pirqs(v);
|
||
|
|