1b78387def
- fate#309901: Add Xen support for SVM TSC scaling in AMD family 15h - fate#311951: Ivy Bridge: XEN support for Supervisor Mode Execution Protection (SMEP) 23437-amd-fam15-TSC-scaling.patch 23462-libxc-cpu-feature.patch 23481-x86-SMEP.patch 23504-x86-SMEP-hvm.patch 23505-x86-cpu-add-arg-check.patch 23508-vmx-proc-based-ctls-probe.patch 23510-hvm-cpuid-DRNG.patch 23511-amd-fam15-no-flush-for-C3.patch 23516-cpuid-ERMS.patch 23538-hvm-pio-emul-no-host-crash.patch 23539-hvm-cpuid-FSGSBASE.patch 23543-x86_64-maddr_to_virt-assertion.patch 23546-fucomip.patch - Fix libxc reentrancy issues 23383-libxc-rm-static-vars.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=128
149 lines
5.8 KiB
Diff
149 lines
5.8 KiB
Diff
References: FATE#311951
|
|
|
|
# HG changeset patch
|
|
# User Tim Deegan <Tim.Deegan@citrix.com>
|
|
# Date 1307364408 -3600
|
|
# Node ID c34604d5a29336d902837542b915d3b09b27a361
|
|
# Parent 664c419b55681feb233b33e0028d0f0af371bedd
|
|
x86/hvm: add SMEP support to HVM guest
|
|
|
|
Intel new CPU supports SMEP (Supervisor Mode Execution Protection). SMEP
|
|
prevents software operating with CPL < 3 (supervisor mode) from fetching
|
|
instructions from any linear address with a valid translation for which the U/S
|
|
flag (bit 2) is 1 in every paging-structure entry controlling the translation
|
|
for the linear address.
|
|
|
|
This patch adds SMEP support to HVM guest.
|
|
|
|
Signed-off-by: Yang Wei <wei.y.yang@intel.com>
|
|
Signed-off-by: Shan Haitao <haitao.shan@intel.com>
|
|
Signed-off-by: Li Xin <xin.li@intel.com>
|
|
Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
|
|
|
|
--- a/tools/libxc/xc_cpufeature.h
|
|
+++ b/tools/libxc/xc_cpufeature.h
|
|
@@ -124,5 +124,6 @@
|
|
|
|
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx) */
|
|
#define X86_FEATURE_FSGSBASE 0 /* {RD,WR}{FS,GS}BASE instructions */
|
|
+#define X86_FEATURE_SMEP 7 /* Supervisor Mode Execution Protection */
|
|
|
|
#endif /* __LIBXC_CPUFEATURE_H */
|
|
--- a/tools/libxc/xc_cpuid_x86.c
|
|
+++ b/tools/libxc/xc_cpuid_x86.c
|
|
@@ -300,6 +300,14 @@ static void xc_cpuid_hvm_policy(
|
|
clear_bit(X86_FEATURE_PAE, regs[3]);
|
|
break;
|
|
|
|
+ case 0x00000007: /* Intel-defined CPU features */
|
|
+ if ( input[1] == 0 ) {
|
|
+ regs[1] &= bitmaskof(X86_FEATURE_SMEP);
|
|
+ } else
|
|
+ regs[1] = 0;
|
|
+ regs[0] = regs[2] = regs[3] = 0;
|
|
+ break;
|
|
+
|
|
case 0x0000000d:
|
|
xc_cpuid_config_xsave(xch, domid, xfeature_mask, input, regs);
|
|
break;
|
|
--- a/xen/arch/x86/hvm/hvm.c
|
|
+++ b/xen/arch/x86/hvm/hvm.c
|
|
@@ -1553,8 +1553,9 @@ int hvm_set_cr4(unsigned long value)
|
|
v->arch.hvm_vcpu.guest_cr[4] = value;
|
|
hvm_update_guest_cr(v, 4);
|
|
|
|
- /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
|
|
- if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
|
|
+ /* Modifying CR4.{PSE,PAE,PGE,SMEP} invalidates all TLB entries. */
|
|
+ if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE |
|
|
+ X86_CR4_PAE | X86_CR4_SMEP) )
|
|
paging_update_paging_modes(v);
|
|
|
|
return X86EMUL_OKAY;
|
|
@@ -2196,7 +2197,7 @@ enum hvm_copy_result hvm_copy_from_guest
|
|
enum hvm_copy_result hvm_fetch_from_guest_virt(
|
|
void *buf, unsigned long vaddr, int size, uint32_t pfec)
|
|
{
|
|
- if ( hvm_nx_enabled(current) )
|
|
+ if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) )
|
|
pfec |= PFEC_insn_fetch;
|
|
return __hvm_copy(buf, vaddr, size,
|
|
HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
|
|
@@ -2222,7 +2223,7 @@ enum hvm_copy_result hvm_copy_from_guest
|
|
enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
|
|
void *buf, unsigned long vaddr, int size, uint32_t pfec)
|
|
{
|
|
- if ( hvm_nx_enabled(current) )
|
|
+ if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) )
|
|
pfec |= PFEC_insn_fetch;
|
|
return __hvm_copy(buf, vaddr, size,
|
|
HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
|
|
@@ -2292,6 +2293,10 @@ void hvm_cpuid(unsigned int input, unsig
|
|
*ecx |= (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE) ?
|
|
cpufeat_mask(X86_FEATURE_OSXSAVE) : 0;
|
|
break;
|
|
+ case 0x7:
|
|
+ if ( (count == 0) && !cpu_has_smep )
|
|
+ *ebx &= ~cpufeat_mask(X86_FEATURE_SMEP);
|
|
+ break;
|
|
case 0xb:
|
|
/* Fix the x2APIC identifier. */
|
|
*edx = v->vcpu_id * 2;
|
|
--- a/xen/arch/x86/mm/guest_walk.c
|
|
+++ b/xen/arch/x86/mm/guest_walk.c
|
|
@@ -132,7 +132,7 @@ guest_walk_tables(struct vcpu *v, struct
|
|
guest_l4e_t *l4p;
|
|
#endif
|
|
uint32_t gflags, mflags, iflags, rc = 0;
|
|
- int pse;
|
|
+ int pse, smep;
|
|
|
|
perfc_incr(guest_walk);
|
|
memset(gw, 0, sizeof(*gw));
|
|
@@ -145,6 +145,15 @@ guest_walk_tables(struct vcpu *v, struct
|
|
mflags = mandatory_flags(v, pfec);
|
|
iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
|
|
|
|
+ /* SMEP: kernel-mode instruction fetches from user-mode mappings
|
|
+ * should fault. Unlike NX or invalid bits, we're looking for _all_
|
|
+ * entries in the walk to have _PAGE_USER set, so we need to do the
|
|
+ * whole walk as if it were a user-mode one and then invert the answer. */
|
|
+ smep = (is_hvm_vcpu(v) && hvm_smep_enabled(v)
|
|
+ && (pfec & PFEC_insn_fetch) && !(pfec & PFEC_user_mode) );
|
|
+ if ( smep )
|
|
+ mflags |= _PAGE_USER;
|
|
+
|
|
#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
|
|
#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
|
|
|
|
@@ -271,6 +280,10 @@ guest_walk_tables(struct vcpu *v, struct
|
|
rc |= ((gflags & mflags) ^ mflags);
|
|
}
|
|
|
|
+ /* Now re-invert the user-mode requirement for SMEP. */
|
|
+ if ( smep )
|
|
+ rc ^= _PAGE_USER;
|
|
+
|
|
/* Go back and set accessed and dirty bits only if the walk was a
|
|
* success. Although the PRMs say higher-level _PAGE_ACCESSED bits
|
|
* get set whenever a lower-level PT is used, at least some hardware
|
|
--- a/xen/include/asm-x86/hvm/hvm.h
|
|
+++ b/xen/include/asm-x86/hvm/hvm.h
|
|
@@ -181,6 +181,8 @@ int hvm_girq_dest_2_vcpu_id(struct domai
|
|
(!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_WP))
|
|
#define hvm_pae_enabled(v) \
|
|
(hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
|
|
+#define hvm_smep_enabled(v) \
|
|
+ (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
|
|
#define hvm_nx_enabled(v) \
|
|
(!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
|
|
|
|
@@ -294,6 +296,7 @@ static inline int hvm_do_pmu_interrupt(s
|
|
X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \
|
|
X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
|
|
X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \
|
|
+ (cpu_has_smep ? X86_CR4_SMEP : 0) | \
|
|
(xsave_enabled(_v) ? X86_CR4_OSXSAVE : 0))))
|
|
|
|
/* These exceptions must always be intercepted. */
|