xen/svm-lmsl.patch

88 lines
3.1 KiB
Diff

Index: xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/hvm/hvm.c
+++ xen-3.3.1-testing/xen/arch/x86/hvm/hvm.c
@@ -525,11 +525,12 @@ static int hvm_load_cpu_ctxt(struct doma
return -EINVAL;
}
- if ( (ctxt.msr_efer & ~(EFER_FFXSE | EFER_LME | EFER_LMA |
+ if ( (ctxt.msr_efer & ~(EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_LMA |
EFER_NX | EFER_SCE)) ||
((sizeof(long) != 8) && (ctxt.msr_efer & EFER_LME)) ||
(!cpu_has_nx && (ctxt.msr_efer & EFER_NX)) ||
(!cpu_has_syscall && (ctxt.msr_efer & EFER_SCE)) ||
+ (!cpu_has_lmsl && (ctxt.msr_efer & EFER_LMSLE)) ||
(!cpu_has_ffxsr && (ctxt.msr_efer & EFER_FFXSE)) ||
((ctxt.msr_efer & (EFER_LME|EFER_LMA)) == EFER_LMA) )
{
@@ -790,10 +791,11 @@ int hvm_set_efer(uint64_t value)
value &= ~EFER_LMA;
- if ( (value & ~(EFER_FFXSE | EFER_LME | EFER_NX | EFER_SCE)) ||
+ if ( (value & ~(EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_NX | EFER_SCE)) ||
((sizeof(long) != 8) && (value & EFER_LME)) ||
(!cpu_has_nx && (value & EFER_NX)) ||
(!cpu_has_syscall && (value & EFER_SCE)) ||
+ (!cpu_has_lmsl && (value & EFER_LMSLE)) ||
(!cpu_has_ffxsr && (value & EFER_FFXSE)) )
{
gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
Index: xen-3.3.1-testing/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- xen-3.3.1-testing.orig/xen/arch/x86/hvm/svm/svm.c
+++ xen-3.3.1-testing/xen/arch/x86/hvm/svm/svm.c
@@ -53,6 +53,11 @@
u32 svm_feature_flags;
+#ifdef __x86_64__
+/* indicate whether guest may use EFER.LMSLE */
+unsigned char cpu_has_lmsl = 0;
+#endif
+
#define set_segment_register(name, value) \
asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
@@ -853,6 +858,22 @@ int start_svm(struct cpuinfo_x86 *c)
/* Initialize core's ASID handling. */
svm_asid_init(c);
+#ifdef __x86_64__
+ /*
+ * Check whether EFER.LMSLE can be written.
+ * Unfortunately there's no feature bit defined for this.
+ */
+ eax = read_efer();
+ edx = read_efer() >> 32;
+ if ( wrmsr_safe(MSR_EFER, eax | EFER_LMSLE, edx) == 0 )
+ rdmsr(MSR_EFER, eax, edx);
+ if ( eax & EFER_LMSLE )
+ {
+ cpu_has_lmsl = 1;
+ wrmsr(MSR_EFER, eax ^ EFER_LMSLE, edx);
+ }
+#endif
+
if ( cpu != 0 )
return 1;
Index: xen-3.3.1-testing/xen/include/asm-x86/hvm/hvm.h
===================================================================
--- xen-3.3.1-testing.orig/xen/include/asm-x86/hvm/hvm.h
+++ xen-3.3.1-testing/xen/include/asm-x86/hvm/hvm.h
@@ -133,6 +133,12 @@ struct hvm_function_table {
extern struct hvm_function_table hvm_funcs;
extern int hvm_enabled;
+#ifdef __i386__
+# define cpu_has_lmsl 0
+#else
+extern unsigned char cpu_has_lmsl;
+#endif
+
int hvm_domain_initialise(struct domain *d);
void hvm_domain_relinquish_resources(struct domain *d);
void hvm_domain_destroy(struct domain *d);