2007-05-21 22:28:46 +02:00
|
|
|
Index: 2007-05-14/xen/arch/x86/domain.c
|
|
|
|
===================================================================
|
2007-07-05 01:35:39 +02:00
|
|
|
--- 2007-05-14.orig/xen/arch/x86/domain.c 2007-07-02 11:19:12.000000000 +0200
|
|
|
|
+++ 2007-05-14/xen/arch/x86/domain.c 2007-07-02 11:19:31.000000000 +0200
|
2007-05-21 22:28:46 +02:00
|
|
|
@@ -47,6 +47,7 @@
|
|
|
|
#endif
|
|
|
|
|
|
|
|
DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
|
|
|
|
+DEFINE_PER_CPU(__u64, efer);
|
|
|
|
|
|
|
|
static void paravirt_ctxt_switch_from(struct vcpu *v);
|
|
|
|
static void paravirt_ctxt_switch_to(struct vcpu *v);
|
2007-07-05 01:35:39 +02:00
|
|
|
@@ -1150,21 +1151,18 @@ void context_switch(struct vcpu *prev, s
|
2007-05-21 22:28:46 +02:00
|
|
|
__context_switch();
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
- if ( is_idle_vcpu(prev) ||
|
|
|
|
- (is_pv_32on64_domain(prev->domain) !=
|
|
|
|
- is_pv_32on64_domain(next->domain)) )
|
|
|
|
+ if ( !is_hvm_vcpu(next) &&
|
|
|
|
+ (is_idle_vcpu(prev) ||
|
|
|
|
+ is_hvm_vcpu(prev) ||
|
|
|
|
+ is_pv_32on64_vcpu(prev) != is_pv_32on64_vcpu(next)) )
|
|
|
|
{
|
|
|
|
- uint32_t efer_lo, efer_hi;
|
|
|
|
+ uint64_t efer = read_efer();
|
|
|
|
|
|
|
|
local_flush_tlb_one(GDT_VIRT_START(next) +
|
|
|
|
FIRST_RESERVED_GDT_BYTE);
|
|
|
|
|
|
|
|
- rdmsr(MSR_EFER, efer_lo, efer_hi);
|
|
|
|
- if ( !is_pv_32on64_domain(next->domain) == !(efer_lo & EFER_SCE) )
|
|
|
|
- {
|
|
|
|
- efer_lo ^= EFER_SCE;
|
|
|
|
- wrmsr(MSR_EFER, efer_lo, efer_hi);
|
|
|
|
- }
|
|
|
|
+ if ( !is_pv_32on64_vcpu(next) == !(efer & EFER_SCE) )
|
|
|
|
+ write_efer(efer ^ EFER_SCE);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
Index: 2007-05-14/xen/arch/x86/hvm/svm/svm.c
|
|
|
|
===================================================================
|
|
|
|
--- 2007-05-14.orig/xen/arch/x86/hvm/svm/svm.c 2007-05-14 13:47:25.000000000 +0200
|
|
|
|
+++ 2007-05-14/xen/arch/x86/hvm/svm/svm.c 2007-05-14 14:28:19.000000000 +0200
|
|
|
|
@@ -96,11 +96,8 @@ static inline void svm_inject_exception(
|
|
|
|
|
|
|
|
static void stop_svm(void)
|
|
|
|
{
|
|
|
|
- u32 eax, edx;
|
|
|
|
/* We turn off the EFER_SVME bit. */
|
|
|
|
- rdmsr(MSR_EFER, eax, edx);
|
|
|
|
- eax &= ~EFER_SVME;
|
|
|
|
- wrmsr(MSR_EFER, eax, edx);
|
|
|
|
+ write_efer(read_efer() & ~EFER_SVME);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void svm_store_cpu_guest_regs(
|
|
|
|
@@ -145,7 +142,13 @@ static inline int long_mode_do_msr_write
|
|
|
|
{
|
|
|
|
case MSR_EFER:
|
|
|
|
/* Offending reserved bit will cause #GP. */
|
|
|
|
- if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
|
|
|
|
+#ifdef __x86_64__
|
|
|
|
+ if ( (msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) ||
|
|
|
|
+#else
|
|
|
|
+ if ( (msr_content & ~(EFER_NX | EFER_SCE)) ||
|
|
|
|
+#endif
|
|
|
|
+ (!cpu_has_nx && (msr_content & EFER_NX)) ||
|
|
|
|
+ (!cpu_has_syscall && (msr_content & EFER_SCE)) )
|
|
|
|
{
|
|
|
|
gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
|
|
|
|
"EFER: %"PRIx64"\n", msr_content);
|
|
|
|
@@ -502,7 +505,7 @@ int svm_vmcb_restore(struct vcpu *v, str
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
-void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
|
|
|
|
+static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
|
|
|
|
{
|
|
|
|
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
|
|
|
|
|
|
|
|
@@ -518,7 +521,7 @@ void svm_save_cpu_state(struct vcpu *v,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
-void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
|
|
|
|
+static void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
|
|
|
|
{
|
|
|
|
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
|
|
|
|
|
|
|
|
@@ -537,13 +540,13 @@ void svm_load_cpu_state(struct vcpu *v,
|
|
|
|
hvm_set_guest_time(v, data->tsc);
|
|
|
|
}
|
|
|
|
|
|
|
|
-void svm_save_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
|
|
|
|
+static void svm_save_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
|
|
|
|
{
|
|
|
|
svm_save_cpu_state(v, ctxt);
|
|
|
|
svm_vmcb_save(v, ctxt);
|
|
|
|
}
|
|
|
|
|
|
|
|
-int svm_load_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
|
|
|
|
+static int svm_load_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
|
|
|
|
{
|
|
|
|
svm_load_cpu_state(v, ctxt);
|
|
|
|
if (svm_vmcb_restore(v, ctxt)) {
|
|
|
|
@@ -871,6 +874,7 @@ static struct hvm_function_table svm_fun
|
|
|
|
.paging_enabled = svm_paging_enabled,
|
|
|
|
.long_mode_enabled = svm_long_mode_enabled,
|
|
|
|
.pae_enabled = svm_pae_enabled,
|
|
|
|
+ .nx_enabled = svm_nx_enabled,
|
|
|
|
.interrupts_enabled = svm_interrupts_enabled,
|
|
|
|
.guest_x86_mode = svm_guest_x86_mode,
|
|
|
|
.get_guest_ctrl_reg = svm_get_ctrl_reg,
|
|
|
|
@@ -927,9 +931,7 @@ int start_svm(void)
|
|
|
|
((root_vmcb[cpu] = alloc_vmcb()) == NULL) )
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
- rdmsr(MSR_EFER, eax, edx);
|
|
|
|
- eax |= EFER_SVME;
|
|
|
|
- wrmsr(MSR_EFER, eax, edx);
|
|
|
|
+ write_efer(read_efer() | EFER_SVME);
|
|
|
|
|
|
|
|
svm_npt_detect();
|
|
|
|
|
|
|
|
Index: 2007-05-14/xen/arch/x86/hvm/vmx/vmcs.c
|
|
|
|
===================================================================
|
2007-07-05 01:35:39 +02:00
|
|
|
--- 2007-05-14.orig/xen/arch/x86/hvm/vmx/vmcs.c 2007-07-02 10:37:53.000000000 +0200
|
2007-05-21 22:28:46 +02:00
|
|
|
+++ 2007-05-14/xen/arch/x86/hvm/vmx/vmcs.c 2007-05-14 14:28:19.000000000 +0200
|
|
|
|
@@ -285,11 +285,6 @@ static void construct_vmcs(struct vcpu *
|
|
|
|
|
|
|
|
vmx_vmcs_enter(v);
|
|
|
|
|
|
|
|
- v->arch.hvm_vmx.cpu_cr2 = 0;
|
|
|
|
- v->arch.hvm_vmx.cpu_cr3 = 0;
|
|
|
|
- memset(&v->arch.hvm_vmx.msr_state, 0, sizeof(v->arch.hvm_vmx.msr_state));
|
|
|
|
- v->arch.hvm_vmx.vmxassist_enabled = 0;
|
|
|
|
-
|
|
|
|
/* VMCS controls. */
|
|
|
|
__vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
|
|
|
|
__vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
|
|
|
|
Index: 2007-05-14/xen/arch/x86/hvm/vmx/vmx.c
|
|
|
|
===================================================================
|
|
|
|
--- 2007-05-14.orig/xen/arch/x86/hvm/vmx/vmx.c 2007-05-14 13:47:25.000000000 +0200
|
|
|
|
+++ 2007-05-14/xen/arch/x86/hvm/vmx/vmx.c 2007-05-14 14:28:19.000000000 +0200
|
|
|
|
@@ -89,7 +89,7 @@ static DEFINE_PER_CPU(struct vmx_msr_sta
|
|
|
|
static u32 msr_index[VMX_MSR_COUNT] =
|
|
|
|
{
|
|
|
|
MSR_LSTAR, MSR_STAR, MSR_CSTAR,
|
|
|
|
- MSR_SYSCALL_MASK, MSR_EFER,
|
|
|
|
+ MSR_SYSCALL_MASK
|
|
|
|
};
|
|
|
|
|
|
|
|
static void vmx_save_host_msrs(void)
|
|
|
|
@@ -117,8 +117,7 @@ static inline int long_mode_do_msr_read(
|
|
|
|
|
|
|
|
switch ( (u32)regs->ecx ) {
|
|
|
|
case MSR_EFER:
|
|
|
|
- HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content 0x%"PRIx64, msr_content);
|
|
|
|
- msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_EFER];
|
|
|
|
+ msr_content = v->arch.hvm_vmx.efer;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MSR_FS_BASE:
|
|
|
|
@@ -130,7 +129,7 @@ static inline int long_mode_do_msr_read(
|
|
|
|
goto check_long_mode;
|
|
|
|
|
|
|
|
case MSR_SHADOW_GS_BASE:
|
|
|
|
- msr_content = guest_msr_state->shadow_gs;
|
|
|
|
+ msr_content = v->arch.hvm_vmx.shadow_gs;
|
|
|
|
check_long_mode:
|
|
|
|
if ( !(vmx_long_mode_enabled(v)) )
|
|
|
|
{
|
|
|
|
@@ -182,7 +181,9 @@ static inline int long_mode_do_msr_write
|
|
|
|
{
|
|
|
|
case MSR_EFER:
|
|
|
|
/* offending reserved bit will cause #GP */
|
|
|
|
- if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
|
|
|
|
+ if ( (msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) ||
|
|
|
|
+ (!cpu_has_nx && (msr_content & EFER_NX)) ||
|
|
|
|
+ (!cpu_has_syscall && (msr_content & EFER_SCE)) )
|
|
|
|
{
|
|
|
|
gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
|
|
|
|
"EFER: %"PRIx64"\n", msr_content);
|
|
|
|
@@ -190,7 +191,7 @@ static inline int long_mode_do_msr_write
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( (msr_content & EFER_LME)
|
|
|
|
- && !(guest_msr_state->msrs[VMX_INDEX_MSR_EFER] & EFER_LME) )
|
|
|
|
+ && !(v->arch.hvm_vmx.efer & EFER_LME) )
|
|
|
|
{
|
|
|
|
if ( unlikely(vmx_paging_enabled(v)) )
|
|
|
|
{
|
|
|
|
@@ -200,7 +201,7 @@ static inline int long_mode_do_msr_write
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if ( !(msr_content & EFER_LME)
|
|
|
|
- && (guest_msr_state->msrs[VMX_INDEX_MSR_EFER] & EFER_LME) )
|
|
|
|
+ && (v->arch.hvm_vmx.efer & EFER_LME) )
|
|
|
|
{
|
|
|
|
if ( unlikely(vmx_paging_enabled(v)) )
|
|
|
|
{
|
|
|
|
@@ -210,7 +211,11 @@ static inline int long_mode_do_msr_write
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
- guest_msr_state->msrs[VMX_INDEX_MSR_EFER] = msr_content;
|
|
|
|
+ if ( (msr_content ^ v->arch.hvm_vmx.efer) & (EFER_NX|EFER_SCE) )
|
|
|
|
+ write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
|
|
|
|
+ (msr_content & (EFER_NX|EFER_SCE)));
|
|
|
|
+
|
|
|
|
+ v->arch.hvm_vmx.efer = msr_content;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MSR_FS_BASE:
|
|
|
|
@@ -228,7 +233,7 @@ static inline int long_mode_do_msr_write
|
|
|
|
__vmwrite(GUEST_GS_BASE, msr_content);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
- v->arch.hvm_vmx.msr_state.shadow_gs = msr_content;
|
|
|
|
+ v->arch.hvm_vmx.shadow_gs = msr_content;
|
|
|
|
wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -280,12 +285,14 @@ static void vmx_restore_host_msrs(void)
|
|
|
|
wrmsrl(msr_index[i], host_msr_state->msrs[i]);
|
|
|
|
clear_bit(i, &host_msr_state->flags);
|
|
|
|
}
|
|
|
|
+ if ( !(read_efer() & EFER_NX) )
|
|
|
|
+ write_efer(read_efer() | EFER_NX);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vmx_save_guest_msrs(struct vcpu *v)
|
|
|
|
{
|
|
|
|
/* MSR_SHADOW_GS_BASE may have been changed by swapgs instruction. */
|
|
|
|
- rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.msr_state.shadow_gs);
|
|
|
|
+ rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vmx_restore_guest_msrs(struct vcpu *v)
|
|
|
|
@@ -297,11 +304,9 @@ static void vmx_restore_guest_msrs(struc
|
|
|
|
guest_msr_state = &v->arch.hvm_vmx.msr_state;
|
|
|
|
host_msr_state = &this_cpu(host_msr_state);
|
|
|
|
|
|
|
|
- wrmsrl(MSR_SHADOW_GS_BASE, guest_msr_state->shadow_gs);
|
|
|
|
+ wrmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs);
|
|
|
|
|
|
|
|
guest_flags = guest_msr_state->flags;
|
|
|
|
- if ( !guest_flags )
|
|
|
|
- return;
|
|
|
|
|
|
|
|
while ( guest_flags ) {
|
|
|
|
i = find_first_set_bit(guest_flags);
|
|
|
|
@@ -313,23 +318,90 @@ static void vmx_restore_guest_msrs(struc
|
|
|
|
wrmsrl(msr_index[i], guest_msr_state->msrs[i]);
|
|
|
|
clear_bit(i, &guest_flags);
|
|
|
|
}
|
|
|
|
+
|
|
|
|
+ if ( (v->arch.hvm_vmx.efer ^ read_efer()) & (EFER_NX|EFER_SCE) )
|
|
|
|
+ {
|
|
|
|
+ HVM_DBG_LOG(DBG_LEVEL_2,
|
|
|
|
+ "restore guest's EFER with value %lx",
|
|
|
|
+ v->arch.hvm_vmx.efer);
|
|
|
|
+ write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
|
|
|
|
+ (v->arch.hvm_vmx.efer & (EFER_NX|EFER_SCE)));
|
|
|
|
+ }
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* __i386__ */
|
|
|
|
|
|
|
|
#define vmx_save_host_msrs() ((void)0)
|
|
|
|
-#define vmx_restore_host_msrs() ((void)0)
|
|
|
|
+
|
|
|
|
+static void vmx_restore_host_msrs(void)
|
|
|
|
+{
|
|
|
|
+ if ( !(read_efer() & EFER_NX) )
|
|
|
|
+ write_efer(read_efer() | EFER_NX);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
#define vmx_save_guest_msrs(v) ((void)0)
|
|
|
|
-#define vmx_restore_guest_msrs(v) ((void)0)
|
|
|
|
+
|
|
|
|
+static void vmx_restore_guest_msrs(struct vcpu *v)
|
|
|
|
+{
|
|
|
|
+ if ( (v->arch.hvm_vmx.efer ^ read_efer()) & EFER_NX )
|
|
|
|
+ {
|
|
|
|
+ HVM_DBG_LOG(DBG_LEVEL_2,
|
|
|
|
+ "restore guest's EFER with value %lx",
|
|
|
|
+ v->arch.hvm_vmx.efer);
|
|
|
|
+ write_efer((read_efer() & ~EFER_NX) |
|
|
|
|
+ (v->arch.hvm_vmx.efer & EFER_NX));
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
|
|
|
|
static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
|
|
|
|
{
|
|
|
|
- return 0;
|
|
|
|
+ u64 msr_content = 0;
|
|
|
|
+ struct vcpu *v = current;
|
|
|
|
+
|
|
|
|
+ switch ( regs->ecx ) {
|
|
|
|
+ case MSR_EFER:
|
|
|
|
+ msr_content = v->arch.hvm_vmx.efer;
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ regs->eax = msr_content >> 0;
|
|
|
|
+ regs->edx = msr_content >> 32;
|
|
|
|
+
|
|
|
|
+ return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
|
|
|
|
{
|
|
|
|
- return 0;
|
|
|
|
+ u64 msr_content = regs->eax | ((u64)regs->edx << 32);
|
|
|
|
+ struct vcpu *v = current;
|
|
|
|
+
|
|
|
|
+ switch ( regs->ecx )
|
|
|
|
+ {
|
|
|
|
+ case MSR_EFER:
|
|
|
|
+ /* offending reserved bit will cause #GP */
|
|
|
|
+ if ( (msr_content & ~EFER_NX) ||
|
|
|
|
+ (!cpu_has_nx && (msr_content & EFER_NX)) )
|
|
|
|
+ {
|
|
|
|
+ gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
|
|
|
|
+ "EFER: %"PRIx64"\n", msr_content);
|
|
|
|
+ vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if ( (msr_content ^ v->arch.hvm_vmx.efer) & EFER_NX )
|
|
|
|
+ write_efer((read_efer() & ~EFER_NX) | (msr_content & EFER_NX));
|
|
|
|
+
|
|
|
|
+ v->arch.hvm_vmx.efer = msr_content;
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* __i386__ */
|
|
|
|
@@ -637,7 +709,7 @@ int vmx_vmcs_restore(struct vcpu *v, str
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
-#ifdef HVM_DEBUG_SUSPEND
|
|
|
|
+#if defined(__x86_64__) && defined(HVM_DEBUG_SUSPEND)
|
|
|
|
static void dump_msr_state(struct vmx_msr_state *m)
|
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
@@ -648,17 +720,16 @@ static void dump_msr_state(struct vmx_ms
|
|
|
|
printk("\n");
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
-static void dump_msr_state(struct vmx_msr_state *m)
|
|
|
|
-{
|
|
|
|
-}
|
|
|
|
+#define dump_msr_state(m) ((void)0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
-void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
|
|
|
|
+static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
|
|
|
|
{
|
|
|
|
+#ifdef __x86_64__
|
|
|
|
struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
|
|
|
|
unsigned long guest_flags = guest_state->flags;
|
|
|
|
|
|
|
|
- data->shadow_gs = guest_state->shadow_gs;
|
|
|
|
+ data->shadow_gs = v->arch.hvm_vmx.shadow_gs;
|
|
|
|
|
|
|
|
/* save msrs */
|
|
|
|
data->msr_flags = guest_flags;
|
|
|
|
@@ -666,15 +737,18 @@ void vmx_save_cpu_state(struct vcpu *v,
|
|
|
|
data->msr_star = guest_state->msrs[VMX_INDEX_MSR_STAR];
|
|
|
|
data->msr_cstar = guest_state->msrs[VMX_INDEX_MSR_CSTAR];
|
|
|
|
data->msr_syscall_mask = guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
|
|
|
|
- data->msr_efer = guest_state->msrs[VMX_INDEX_MSR_EFER];
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+ data->msr_efer = v->arch.hvm_vmx.efer;
|
|
|
|
|
|
|
|
data->tsc = hvm_get_guest_time(v);
|
|
|
|
|
|
|
|
dump_msr_state(guest_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
-void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
|
|
|
|
+static void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
|
|
|
|
{
|
|
|
|
+#ifdef __x86_64__
|
|
|
|
struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
|
|
|
|
|
|
|
|
/* restore msrs */
|
|
|
|
@@ -683,9 +757,11 @@ void vmx_load_cpu_state(struct vcpu *v,
|
|
|
|
guest_state->msrs[VMX_INDEX_MSR_STAR] = data->msr_star;
|
|
|
|
guest_state->msrs[VMX_INDEX_MSR_CSTAR] = data->msr_cstar;
|
|
|
|
guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK] = data->msr_syscall_mask;
|
|
|
|
- guest_state->msrs[VMX_INDEX_MSR_EFER] = data->msr_efer;
|
|
|
|
|
|
|
|
- guest_state->shadow_gs = data->shadow_gs;
|
|
|
|
+ v->arch.hvm_vmx.shadow_gs = data->shadow_gs;
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+ v->arch.hvm_vmx.efer = data->msr_efer;
|
|
|
|
|
|
|
|
v->arch.hvm_vmx.vmxassist_enabled = !(data->cr0 & X86_CR0_PE);
|
|
|
|
|
|
|
|
@@ -695,7 +771,7 @@ void vmx_load_cpu_state(struct vcpu *v,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
-void vmx_save_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
|
|
|
|
+static void vmx_save_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
|
|
|
|
{
|
|
|
|
vmx_save_cpu_state(v, ctxt);
|
|
|
|
vmx_vmcs_enter(v);
|
|
|
|
@@ -703,7 +779,7 @@ void vmx_save_vmcs_ctxt(struct vcpu *v,
|
|
|
|
vmx_vmcs_exit(v);
|
|
|
|
}
|
|
|
|
|
|
|
|
-int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
|
|
|
|
+static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
|
|
|
|
{
|
|
|
|
vmx_load_cpu_state(v, ctxt);
|
|
|
|
if (vmx_vmcs_restore(v, ctxt)) {
|
|
|
|
@@ -1017,6 +1093,11 @@ static int vmx_pae_enabled(struct vcpu *
|
|
|
|
return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
|
|
|
|
}
|
|
|
|
|
|
|
|
+static int vmx_nx_enabled(struct vcpu *v)
|
|
|
|
+{
|
|
|
|
+ return v->arch.hvm_vmx.efer & EFER_NX;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static int vmx_interrupts_enabled(struct vcpu *v)
|
|
|
|
{
|
|
|
|
unsigned long eflags = __vmread(GUEST_RFLAGS);
|
|
|
|
@@ -1097,6 +1178,7 @@ static struct hvm_function_table vmx_fun
|
|
|
|
.paging_enabled = vmx_paging_enabled,
|
|
|
|
.long_mode_enabled = vmx_long_mode_enabled,
|
|
|
|
.pae_enabled = vmx_pae_enabled,
|
|
|
|
+ .nx_enabled = vmx_nx_enabled,
|
|
|
|
.interrupts_enabled = vmx_interrupts_enabled,
|
|
|
|
.guest_x86_mode = vmx_guest_x86_mode,
|
|
|
|
.get_guest_ctrl_reg = vmx_get_ctrl_reg,
|
|
|
|
@@ -1997,8 +2079,7 @@ static int vmx_set_cr0(unsigned long val
|
|
|
|
else
|
|
|
|
{
|
|
|
|
HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode\n");
|
|
|
|
- v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER]
|
|
|
|
- |= EFER_LMA;
|
|
|
|
+ v->arch.hvm_vmx.efer |= EFER_LMA;
|
|
|
|
vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
|
|
|
|
vm_entry_value |= VM_ENTRY_IA32E_MODE;
|
|
|
|
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
|
|
|
|
@@ -2047,8 +2128,7 @@ static int vmx_set_cr0(unsigned long val
|
|
|
|
*/
|
|
|
|
if ( vmx_long_mode_enabled(v) )
|
|
|
|
{
|
|
|
|
- v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER]
|
|
|
|
- &= ~EFER_LMA;
|
|
|
|
+ v->arch.hvm_vmx.efer &= ~EFER_LMA;
|
|
|
|
vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
|
|
|
|
vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
|
|
|
|
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
|
|
|
|
@@ -2080,7 +2160,7 @@ static int vmx_set_cr0(unsigned long val
|
|
|
|
{
|
|
|
|
if ( vmx_long_mode_enabled(v) )
|
|
|
|
{
|
|
|
|
- v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER] &= ~EFER_LMA;
|
|
|
|
+ v->arch.hvm_vmx.efer &= ~EFER_LMA;
|
|
|
|
vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
|
|
|
|
vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
|
|
|
|
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
|
|
|
|
Index: 2007-05-14/xen/arch/x86/mm/shadow/multi.c
|
|
|
|
===================================================================
|
2007-07-05 01:35:39 +02:00
|
|
|
--- 2007-05-14.orig/xen/arch/x86/mm/shadow/multi.c 2007-07-02 10:37:53.000000000 +0200
|
2007-05-21 22:28:46 +02:00
|
|
|
+++ 2007-05-14/xen/arch/x86/mm/shadow/multi.c 2007-05-14 14:28:19.000000000 +0200
|
|
|
|
@@ -181,11 +181,11 @@ guest_supports_superpages(struct vcpu *v
|
|
|
|
static inline int
|
|
|
|
guest_supports_nx(struct vcpu *v)
|
|
|
|
{
|
|
|
|
+ if ( GUEST_PAGING_LEVELS == 2 || !cpu_has_nx )
|
|
|
|
+ return 0;
|
|
|
|
if ( !is_hvm_vcpu(v) )
|
|
|
|
- return cpu_has_nx;
|
|
|
|
-
|
|
|
|
- // XXX - fix this!
|
|
|
|
- return 1;
|
|
|
|
+ return 1;
|
|
|
|
+ return hvm_nx_enabled(v);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Index: 2007-05-14/xen/include/asm-x86/cpufeature.h
|
|
|
|
===================================================================
|
2007-07-05 01:35:39 +02:00
|
|
|
--- 2007-05-14.orig/xen/include/asm-x86/cpufeature.h 2007-07-02 10:37:53.000000000 +0200
|
2007-05-21 22:28:46 +02:00
|
|
|
+++ 2007-05-14/xen/include/asm-x86/cpufeature.h 2007-05-14 14:28:19.000000000 +0200
|
|
|
|
@@ -114,6 +114,7 @@
|
|
|
|
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
|
|
|
|
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
|
|
|
|
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
|
|
|
|
+#define cpu_has_syscall boot_cpu_has(X86_FEATURE_SYSCALL)
|
|
|
|
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
|
|
|
|
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
|
|
|
|
#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR)
|
|
|
|
@@ -136,6 +137,7 @@
|
|
|
|
#define cpu_has_xmm2 1
|
|
|
|
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
|
|
|
|
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
|
|
|
|
+#define cpu_has_syscall 1
|
|
|
|
#define cpu_has_mp 1
|
|
|
|
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
|
|
|
|
#define cpu_has_k6_mtrr 0
|
|
|
|
Index: 2007-05-14/xen/include/asm-x86/hvm/hvm.h
|
|
|
|
===================================================================
|
2007-07-05 01:35:39 +02:00
|
|
|
--- 2007-05-14.orig/xen/include/asm-x86/hvm/hvm.h 2007-07-02 10:37:53.000000000 +0200
|
2007-05-21 22:28:46 +02:00
|
|
|
+++ 2007-05-14/xen/include/asm-x86/hvm/hvm.h 2007-05-14 14:28:19.000000000 +0200
|
|
|
|
@@ -93,14 +93,17 @@ struct hvm_function_table {
|
|
|
|
* 1) determine whether paging is enabled,
|
|
|
|
* 2) determine whether long mode is enabled,
|
|
|
|
* 3) determine whether PAE paging is enabled,
|
|
|
|
- * 4) determine whether interrupts are enabled or not,
|
|
|
|
- * 5) determine the mode the guest is running in,
|
|
|
|
- * 6) return the current guest control-register value
|
|
|
|
- * 7) return the current guest segment descriptor base
|
|
|
|
+ * 4) determine whether NX is enabled,
|
|
|
|
+ * 5) determine whether interrupts are enabled or not,
|
|
|
|
+ * 6) determine the mode the guest is running in,
|
|
|
|
+ * 7) return the current guest control-register value
|
|
|
|
+ * 8) return the current guest segment descriptor base
|
|
|
|
+ * 9) return the current guest segment descriptor
|
|
|
|
*/
|
|
|
|
int (*paging_enabled)(struct vcpu *v);
|
|
|
|
int (*long_mode_enabled)(struct vcpu *v);
|
|
|
|
int (*pae_enabled)(struct vcpu *v);
|
|
|
|
+ int (*nx_enabled)(struct vcpu *v);
|
|
|
|
int (*interrupts_enabled)(struct vcpu *v);
|
|
|
|
int (*guest_x86_mode)(struct vcpu *v);
|
|
|
|
unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
|
|
|
|
@@ -199,6 +202,12 @@ hvm_interrupts_enabled(struct vcpu *v)
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
+hvm_nx_enabled(struct vcpu *v)
|
|
|
|
+{
|
|
|
|
+ return hvm_funcs.nx_enabled(v);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int
|
|
|
|
hvm_guest_x86_mode(struct vcpu *v)
|
|
|
|
{
|
|
|
|
return hvm_funcs.guest_x86_mode(v);
|
|
|
|
Index: 2007-05-14/xen/include/asm-x86/hvm/svm/svm.h
|
|
|
|
===================================================================
|
2007-07-05 01:35:39 +02:00
|
|
|
--- 2007-05-14.orig/xen/include/asm-x86/hvm/svm/svm.h 2007-07-02 10:37:53.000000000 +0200
|
2007-05-21 22:28:46 +02:00
|
|
|
+++ 2007-05-14/xen/include/asm-x86/hvm/svm/svm.h 2007-05-14 14:28:19.000000000 +0200
|
|
|
|
@@ -62,6 +62,11 @@ static inline int svm_pae_enabled(struct
|
|
|
|
return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static inline int svm_nx_enabled(struct vcpu *v)
|
|
|
|
+{
|
|
|
|
+ return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static inline int svm_pgbit_test(struct vcpu *v)
|
|
|
|
{
|
|
|
|
return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
|
|
|
|
Index: 2007-05-14/xen/include/asm-x86/hvm/vmx/vmcs.h
|
|
|
|
===================================================================
|
2007-07-05 01:35:39 +02:00
|
|
|
--- 2007-05-14.orig/xen/include/asm-x86/hvm/vmx/vmcs.h 2007-07-02 10:37:53.000000000 +0200
|
2007-05-21 22:28:46 +02:00
|
|
|
+++ 2007-05-14/xen/include/asm-x86/hvm/vmx/vmcs.h 2007-05-14 14:28:19.000000000 +0200
|
|
|
|
@@ -39,7 +39,6 @@ enum {
|
|
|
|
VMX_INDEX_MSR_STAR,
|
|
|
|
VMX_INDEX_MSR_CSTAR,
|
|
|
|
VMX_INDEX_MSR_SYSCALL_MASK,
|
|
|
|
- VMX_INDEX_MSR_EFER,
|
|
|
|
|
|
|
|
VMX_MSR_COUNT
|
|
|
|
};
|
|
|
|
@@ -47,7 +46,6 @@ enum {
|
|
|
|
struct vmx_msr_state {
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long msrs[VMX_MSR_COUNT];
|
|
|
|
- unsigned long shadow_gs;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct arch_vmx_struct {
|
|
|
|
@@ -76,7 +74,11 @@ struct arch_vmx_struct {
|
|
|
|
unsigned long cpu_shadow_cr4; /* copy of guest read shadow CR4 */
|
|
|
|
unsigned long cpu_cr2; /* save CR2 */
|
|
|
|
unsigned long cpu_cr3;
|
|
|
|
+#ifdef __x86_64__
|
|
|
|
struct vmx_msr_state msr_state;
|
|
|
|
+ unsigned long shadow_gs;
|
|
|
|
+#endif
|
|
|
|
+ unsigned long efer;
|
|
|
|
unsigned long vmxassist_enabled:1;
|
|
|
|
};
|
|
|
|
|
|
|
|
Index: 2007-05-14/xen/include/asm-x86/hvm/vmx/vmx.h
|
|
|
|
===================================================================
|
2007-07-05 01:35:39 +02:00
|
|
|
--- 2007-05-14.orig/xen/include/asm-x86/hvm/vmx/vmx.h 2007-07-02 10:37:53.000000000 +0200
|
2007-05-21 22:28:46 +02:00
|
|
|
+++ 2007-05-14/xen/include/asm-x86/hvm/vmx/vmx.h 2007-05-14 14:28:19.000000000 +0200
|
|
|
|
@@ -261,14 +261,12 @@ static inline int vmx_paging_enabled(str
|
|
|
|
|
|
|
|
static inline int vmx_long_mode_enabled(struct vcpu *v)
|
|
|
|
{
|
|
|
|
- u64 efer = v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER];
|
|
|
|
- return efer & EFER_LMA;
|
|
|
|
+ return v->arch.hvm_vmx.efer & EFER_LMA;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int vmx_lme_is_set(struct vcpu *v)
|
|
|
|
{
|
|
|
|
- u64 efer = v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER];
|
|
|
|
- return efer & EFER_LME;
|
|
|
|
+ return v->arch.hvm_vmx.efer & EFER_LME;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int vmx_pgbit_test(struct vcpu *v)
|
|
|
|
Index: 2007-05-14/xen/include/asm-x86/msr.h
|
|
|
|
===================================================================
|
2007-07-05 01:35:39 +02:00
|
|
|
--- 2007-05-14.orig/xen/include/asm-x86/msr.h 2007-07-02 10:37:53.000000000 +0200
|
2007-05-21 22:28:46 +02:00
|
|
|
+++ 2007-05-14/xen/include/asm-x86/msr.h 2007-05-14 14:28:19.000000000 +0200
|
|
|
|
@@ -3,6 +3,9 @@
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
|
|
+#include <xen/smp.h>
|
|
|
|
+#include <xen/percpu.h>
|
|
|
|
+
|
|
|
|
#define rdmsr(msr,val1,val2) \
|
|
|
|
__asm__ __volatile__("rdmsr" \
|
|
|
|
: "=a" (val1), "=d" (val2) \
|
|
|
|
@@ -142,6 +145,25 @@ static inline void wrmsrl(unsigned int m
|
|
|
|
#define EFER_NX (1<<_EFER_NX)
|
|
|
|
#define EFER_SVME (1<<_EFER_SVME)
|
|
|
|
|
|
|
|
+#ifndef __ASSEMBLY__
|
|
|
|
+
|
|
|
|
+DECLARE_PER_CPU(__u64, efer);
|
|
|
|
+
|
|
|
|
+static inline __u64 read_efer(void)
|
|
|
|
+{
|
|
|
|
+ if (!this_cpu(efer))
|
|
|
|
+ rdmsrl(MSR_EFER, this_cpu(efer));
|
|
|
|
+ return this_cpu(efer);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void write_efer(__u64 val)
|
|
|
|
+{
|
|
|
|
+ this_cpu(efer) = val;
|
|
|
|
+ wrmsrl(MSR_EFER, val);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
/* Intel MSRs. Some also available on other CPUs */
|
|
|
|
#define MSR_IA32_PLATFORM_ID 0x17
|
|
|
|
|