75 lines
3.0 KiB
Diff
75 lines
3.0 KiB
Diff
|
# Commit 41a0cc9e26160a89245c9ba3233e3f70bf9cd4b4
|
||
|
# Date 2013-10-29 09:57:14 +0100
|
||
|
# Author Jan Beulich <jbeulich@suse.com>
|
||
|
# Committer Jan Beulich <jbeulich@suse.com>
|
||
|
fix locking in cpu_disable_scheduler()
|
||
|
|
||
|
So commit eedd6039 ("scheduler: adjust internal locking interface")
|
||
|
uncovered - by now using proper spin lock constructs - a bug after all:
|
||
|
When bringing down a CPU, cpu_disable_scheduler() gets called with
|
||
|
interrupts disabled, and hence the use of vcpu_schedule_lock_irq() was
|
||
|
never really correct (i.e. the caller ended up with interrupts enabled
|
||
|
despite having disabled them explicitly).
|
||
|
|
||
|
Fixing this however surfaced another problem: The call path
|
||
|
vcpu_migrate() -> evtchn_move_pirqs() wants to acquire the event lock,
|
||
|
which however is a non-IRQ-safe once, and hence check_lock() doesn't
|
||
|
like this lock to be acquired when interrupts are already off. As we're
|
||
|
in stop-machine context here, getting things wrong wrt interrupt state
|
||
|
management during lock acquire/release is out of question though, so
|
||
|
the simple solution to this appears to be to just suppress spin lock
|
||
|
debugging for the period of time while the stop machine callback gets
|
||
|
run.
|
||
|
|
||
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||
|
Acked-by: Keir Fraser <keir@xen.org>
|
||
|
|
||
|
--- a/xen/common/schedule.c
|
||
|
+++ b/xen/common/schedule.c
|
||
|
@@ -600,7 +600,8 @@ int cpu_disable_scheduler(unsigned int c
|
||
|
{
|
||
|
for_each_vcpu ( d, v )
|
||
|
{
|
||
|
- spinlock_t *lock = vcpu_schedule_lock_irq(v);
|
||
|
+ unsigned long flags;
|
||
|
+ spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
|
||
|
|
||
|
cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
|
||
|
if ( cpumask_empty(&online_affinity) &&
|
||
|
@@ -621,14 +622,12 @@ int cpu_disable_scheduler(unsigned int c
|
||
|
if ( v->processor == cpu )
|
||
|
{
|
||
|
set_bit(_VPF_migrating, &v->pause_flags);
|
||
|
- vcpu_schedule_unlock_irq(lock, v);
|
||
|
+ vcpu_schedule_unlock_irqrestore(lock, flags, v);
|
||
|
vcpu_sleep_nosync(v);
|
||
|
vcpu_migrate(v);
|
||
|
}
|
||
|
else
|
||
|
- {
|
||
|
- vcpu_schedule_unlock_irq(lock, v);
|
||
|
- }
|
||
|
+ vcpu_schedule_unlock_irqrestore(lock, flags, v);
|
||
|
|
||
|
/*
|
||
|
* A vcpu active in the hypervisor will not be migratable.
|
||
|
--- a/xen/common/stop_machine.c
|
||
|
+++ b/xen/common/stop_machine.c
|
||
|
@@ -110,6 +110,7 @@ int stop_machine_run(int (*fn)(void *),
|
||
|
local_irq_disable();
|
||
|
stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
|
||
|
stopmachine_wait_state();
|
||
|
+ spin_debug_disable();
|
||
|
|
||
|
stopmachine_set_state(STOPMACHINE_INVOKE);
|
||
|
if ( (cpu == smp_processor_id()) || (cpu == NR_CPUS) )
|
||
|
@@ -117,6 +118,7 @@ int stop_machine_run(int (*fn)(void *),
|
||
|
stopmachine_wait_state();
|
||
|
ret = stopmachine_data.fn_result;
|
||
|
|
||
|
+ spin_debug_enable();
|
||
|
stopmachine_set_state(STOPMACHINE_EXIT);
|
||
|
stopmachine_wait_state();
|
||
|
local_irq_enable();
|