SHA256
1
0
forked from pool/xen
xen/52820863-VMX-don-t-crash-processing-d-debug-key.patch

106 lines
3.3 KiB
Diff
Raw Normal View History

References: bnc#846849
# Commit 58929248461ecadce13e92eb5a5d9ef718a7c88e
# Date 2013-11-12 11:52:19 +0100
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
VMX: don't crash processing 'd' debug key
There's a window during scheduling where "current" and the active VMCS
may disagree: The former gets set much earlier than the latter. Since
both vmx_vmcs_enter() and vmx_vmcs_exit() immediately return when the
subject vCPU is "current", accessing VMCS fields would, depending on
whether there is any currently active VMCS, either read wrong data, or
cause a crash.
Going forward we might want to consider reducing the window during
which vmx_vmcs_enter() might fail (e.g. doing a plain __vmptrld() when
v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) but arch_vmx->active_cpu
== -1), but that would add complexities (acquiring and - more
importantly - properly dropping v->arch.hvm_vmx.vmcs_lock) that don't
look worthwhile adding right now.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Keir Fraser <keir@xen.org>
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -591,16 +591,16 @@ struct foreign_vmcs {
};
static DEFINE_PER_CPU(struct foreign_vmcs, foreign_vmcs);
-void vmx_vmcs_enter(struct vcpu *v)
+bool_t vmx_vmcs_try_enter(struct vcpu *v)
{
struct foreign_vmcs *fv;
/*
* NB. We must *always* run an HVM VCPU on its own VMCS, except for
- * vmx_vmcs_enter/exit critical regions.
+ * vmx_vmcs_enter/exit and scheduling tail critical regions.
*/
if ( likely(v == current) )
- return;
+ return v->arch.hvm_vmx.vmcs == this_cpu(current_vmcs);
fv = &this_cpu(foreign_vmcs);
@@ -623,6 +623,15 @@ void vmx_vmcs_enter(struct vcpu *v)
}
fv->count++;
+
+ return 1;
+}
+
+void vmx_vmcs_enter(struct vcpu *v)
+{
+ bool_t okay = vmx_vmcs_try_enter(v);
+
+ ASSERT(okay);
}
void vmx_vmcs_exit(struct vcpu *v)
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -669,7 +669,27 @@ void vmx_get_segment_register(struct vcp
{
uint32_t attr = 0;
- vmx_vmcs_enter(v);
+ /*
+ * We may get here in the context of dump_execstate(), which may have
+ * interrupted context switching between setting "current" and
+ * vmx_do_resume() reaching the end of vmx_load_vmcs(). That would make
+ * all the VMREADs below fail if we don't bail right away.
+ */
+ if ( unlikely(!vmx_vmcs_try_enter(v)) )
+ {
+ static bool_t warned;
+
+ if ( !warned )
+ {
+ warned = 1;
+ printk(XENLOG_WARNING "Segment register inaccessible for d%dv%d\n"
+ "(If you see this outside of debugging activity,"
+ " please report to xen-devel@lists.xenproject.org)\n",
+ v->domain->domain_id, v->vcpu_id);
+ }
+ memset(reg, 0, sizeof(*reg));
+ return;
+ }
switch ( seg )
{
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -144,6 +144,7 @@ struct arch_vmx_struct {
int vmx_create_vmcs(struct vcpu *v);
void vmx_destroy_vmcs(struct vcpu *v);
void vmx_vmcs_enter(struct vcpu *v);
+bool_t __must_check vmx_vmcs_try_enter(struct vcpu *v);
void vmx_vmcs_exit(struct vcpu *v);
#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004