44 lines
1.5 KiB
Diff
44 lines
1.5 KiB
Diff
|
# Commit 31ee951a3bee6e7cc21f94f900fe989e3701a79a
|
||
|
# Date 2014-04-28 12:47:24 +0200
|
||
|
# Author Feng Wu <feng.wu@intel.com>
|
||
|
# Committer Jan Beulich <jbeulich@suse.com>
|
||
|
x86/HVM: correct the SMEP logic for HVM_CR0_GUEST_RESERVED_BITS
|
||
|
|
||
|
When checking the SMEP feature for HVM guests, we should check the
|
||
|
VCPU instead of the host CPU.
|
||
|
|
||
|
Signed-off-by: Feng Wu <feng.wu@intel.com>
|
||
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||
|
|
||
|
--- a/xen/include/asm-x86/hvm/hvm.h
|
||
|
+++ b/xen/include/asm-x86/hvm/hvm.h
|
||
|
@@ -347,6 +347,19 @@ static inline int hvm_event_pending(stru
|
||
|
return hvm_funcs.event_pending(v);
|
||
|
}
|
||
|
|
||
|
+static inline bool_t hvm_vcpu_has_smep(void)
|
||
|
+{
|
||
|
+ unsigned int eax, ebx;
|
||
|
+
|
||
|
+ hvm_cpuid(0, &eax, NULL, NULL, NULL);
|
||
|
+
|
||
|
+ if ( eax < 7 )
|
||
|
+ return 0;
|
||
|
+
|
||
|
+ hvm_cpuid(7, NULL, &ebx, NULL, NULL);
|
||
|
+ return !!(ebx & cpufeat_mask(X86_FEATURE_SMEP));
|
||
|
+}
|
||
|
+
|
||
|
/* These reserved bits in lower 32 remain 0 after any load of CR0 */
|
||
|
#define HVM_CR0_GUEST_RESERVED_BITS \
|
||
|
(~((unsigned long) \
|
||
|
@@ -366,7 +379,7 @@ static inline int hvm_event_pending(stru
|
||
|
X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \
|
||
|
X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
|
||
|
X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \
|
||
|
- (cpu_has_smep ? X86_CR4_SMEP : 0) | \
|
||
|
+ (hvm_vcpu_has_smep() ? X86_CR4_SMEP : 0) | \
|
||
|
(cpu_has_fsgsbase ? X86_CR4_FSGSBASE : 0) | \
|
||
|
((nestedhvm_enabled((_v)->domain) && cpu_has_vmx)\
|
||
|
? X86_CR4_VMXE : 0) | \
|