xen/18509-continue-hypercall-on-cpu.patch

137 lines
4.4 KiB
Diff
Raw Normal View History

# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1221657190 -3600
# Node ID 366c78ff361bafb2271c551c4976e4caedea72b2
# Parent beb28a3975bd39c93c7934dd5e7ec80c69a86c4a
x86: Allow continue_hypercall_on_cpu() to be called from within an
existing continuation handler. This fix is needed for the new method
of microcode re-programming.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Index: xen-3.3.1-testing/xen/arch/x86/domain.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/domain.c
+++ xen-3.3.1-testing/xen/arch/x86/domain.c
@@ -1356,6 +1356,7 @@ struct migrate_info {
void *data;
void (*saved_schedule_tail)(struct vcpu *);
cpumask_t saved_affinity;
+ unsigned int nest;
};
static void continue_hypercall_on_cpu_helper(struct vcpu *v)
@@ -1363,48 +1364,64 @@ static void continue_hypercall_on_cpu_he
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct migrate_info *info = v->arch.continue_info;
cpumask_t mask = info->saved_affinity;
+ void (*saved_schedule_tail)(struct vcpu *) = info->saved_schedule_tail;
regs->eax = info->func(info->data);
- v->arch.schedule_tail = info->saved_schedule_tail;
- v->arch.continue_info = NULL;
-
- xfree(info);
+ if ( info->nest-- == 0 )
+ {
+ xfree(info);
+ v->arch.schedule_tail = saved_schedule_tail;
+ v->arch.continue_info = NULL;
+ vcpu_unlock_affinity(v, &mask);
+ }
- vcpu_unlock_affinity(v, &mask);
- schedule_tail(v);
+ (*saved_schedule_tail)(v);
}
int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data)
{
struct vcpu *v = current;
struct migrate_info *info;
+ cpumask_t mask = cpumask_of_cpu(cpu);
int rc;
if ( cpu == smp_processor_id() )
return func(data);
- info = xmalloc(struct migrate_info);
+ info = v->arch.continue_info;
if ( info == NULL )
- return -ENOMEM;
+ {
+ info = xmalloc(struct migrate_info);
+ if ( info == NULL )
+ return -ENOMEM;
- info->func = func;
- info->data = data;
- info->saved_schedule_tail = v->arch.schedule_tail;
- info->saved_affinity = cpumask_of_cpu(cpu);
+ rc = vcpu_lock_affinity(v, &mask);
+ if ( rc )
+ {
+ xfree(info);
+ return rc;
+ }
- v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
- v->arch.continue_info = info;
+ info->saved_schedule_tail = v->arch.schedule_tail;
+ info->saved_affinity = mask;
+ info->nest = 0;
- rc = vcpu_lock_affinity(v, &info->saved_affinity);
- if ( rc )
+ v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
+ v->arch.continue_info = info;
+ }
+ else
{
- v->arch.schedule_tail = info->saved_schedule_tail;
- v->arch.continue_info = NULL;
- xfree(info);
- return rc;
+ BUG_ON(info->nest != 0);
+ rc = vcpu_locked_change_affinity(v, &mask);
+ if ( rc )
+ return rc;
+ info->nest++;
}
+ info->func = func;
+ info->data = data;
+
/* Dummy return value will be overwritten by new schedule_tail. */
BUG_ON(!test_bit(SCHEDULE_SOFTIRQ, &softirq_pending(smp_processor_id())));
return 0;
Index: xen-3.3.1-testing/xen/common/schedule.c
===================================================================
--- xen-3.3.1-testing.orig/xen/common/schedule.c
+++ xen-3.3.1-testing/xen/common/schedule.c
@@ -360,6 +360,11 @@ int vcpu_lock_affinity(struct vcpu *v, c
return __vcpu_set_affinity(v, affinity, 0, 1);
}
+int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity)
+{
+ return __vcpu_set_affinity(v, affinity, 1, 1);
+}
+
void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity)
{
cpumask_t online_affinity;
Index: xen-3.3.1-testing/xen/include/xen/sched.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/xen/sched.h
+++ xen-3.3.1-testing/xen/include/xen/sched.h
@@ -534,6 +534,7 @@ void vcpu_force_reschedule(struct vcpu *
void cpu_disable_scheduler(void);
int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity);
+int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity);
void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);