Index: xen-4.0.0-testing/xen/arch/x86/hvm/hvm.c =================================================================== --- xen-4.0.0-testing.orig/xen/arch/x86/hvm/hvm.c +++ xen-4.0.0-testing/xen/arch/x86/hvm/hvm.c @@ -629,11 +629,12 @@ static int hvm_load_cpu_ctxt(struct doma return -EINVAL; } - if ( (ctxt.msr_efer & ~(EFER_FFXSE | EFER_LME | EFER_LMA | + if ( (ctxt.msr_efer & ~(EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) || ((sizeof(long) != 8) && (ctxt.msr_efer & EFER_LME)) || (!cpu_has_nx && (ctxt.msr_efer & EFER_NX)) || (!cpu_has_syscall && (ctxt.msr_efer & EFER_SCE)) || + (!cpu_has_lmsl && (ctxt.msr_efer & EFER_LMSLE)) || (!cpu_has_ffxsr && (ctxt.msr_efer & EFER_FFXSE)) || ((ctxt.msr_efer & (EFER_LME|EFER_LMA)) == EFER_LMA) ) { @@ -984,10 +985,11 @@ int hvm_set_efer(uint64_t value) value &= ~EFER_LMA; - if ( (value & ~(EFER_FFXSE | EFER_LME | EFER_NX | EFER_SCE)) || + if ( (value & ~(EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_NX | EFER_SCE)) || ((sizeof(long) != 8) && (value & EFER_LME)) || (!cpu_has_nx && (value & EFER_NX)) || (!cpu_has_syscall && (value & EFER_SCE)) || + (!cpu_has_lmsl && (value & EFER_LMSLE)) || (!cpu_has_ffxsr && (value & EFER_FFXSE)) ) { gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " Index: xen-4.0.0-testing/xen/arch/x86/hvm/svm/svm.c =================================================================== --- xen-4.0.0-testing.orig/xen/arch/x86/hvm/svm/svm.c +++ xen-4.0.0-testing/xen/arch/x86/hvm/svm/svm.c @@ -56,6 +56,11 @@ u32 svm_feature_flags; +#ifdef __x86_64__ +/* indicate whether guest may use EFER.LMSLE */ +unsigned char cpu_has_lmsl = 0; +#endif + #define set_segment_register(name, value) \ asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) ) @@ -847,6 +852,29 @@ static int svm_cpu_up(struct cpuinfo_x86 /* Initialize core's ASID handling. */ svm_asid_init(c); +#ifdef __x86_64__ + /* + * Check whether EFER.LMSLE can be written. + * Unfortunately there's no feature bit defined for this. + */ + eax = read_efer(); + edx = read_efer() >> 32; + if ( wrmsr_safe(MSR_EFER, eax | EFER_LMSLE, edx) == 0 ) + rdmsr(MSR_EFER, eax, edx); + if ( eax & EFER_LMSLE ) + { + if ( c == &boot_cpu_data ) + cpu_has_lmsl = 1; + wrmsr(MSR_EFER, eax ^ EFER_LMSLE, edx); + } + else + { + if ( cpu_has_lmsl ) + printk(XENLOG_WARNING "Inconsistent LMLSE support across CPUs!\n"); + cpu_has_lmsl = 0; + } +#endif + return 1; } Index: xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm.h =================================================================== --- xen-4.0.0-testing.orig/xen/include/asm-x86/hvm/hvm.h +++ xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm.h @@ -132,6 +132,12 @@ struct hvm_function_table { extern struct hvm_function_table hvm_funcs; extern int hvm_enabled; +#ifdef __i386__ +# define cpu_has_lmsl 0 +#else +extern unsigned char cpu_has_lmsl; +#endif + int hvm_domain_initialise(struct domain *d); void hvm_domain_relinquish_resources(struct domain *d); void hvm_domain_destroy(struct domain *d);