3e51b51ba9
be booted from EFI" 53dba447-x86-ACPI-allow-CMOS-RTC-use-even-when-ACPI-says-there-is-none.patch - Upstream patches from Jan 53d7b781-x86-cpu-undo-BIOS-CPUID-max_leaf-limit-earlier.patch 53df71c7-lz4-check-for-underruns.patch 53df727b-x86-HVM-extend-LAPIC-shortcuts-around-P2M-lookups.patch 53e47d6b-x86_emulate-properly-do-IP-updates-and-other-side-effects.patch - Update to Xen Version 4.4.1-rc2 xen-4.4.1-testing-src.tar.bz2 - Dropped 60 upstream patches and xen-4.4.0-testing-src.tar.bz2 - bnc#820873 - The "long" option doesn't work with "xl list" 53d124e7-fix-list_domain_details-check-config-data-length-0.patch - bnc#888996 - Package 'xen-tool' contains 'SuSE' spelling in a filename and/or SPEC file Renamed README.SuSE -> README.SUSE Modified files: xen.spec, boot.local.xenU, init.pciback xend-config.patch, xend-vif-route-ifup.patch - bnc#882673 - Dom0 memory should enforce a minimum memory size (e.g. dom0_mem=min:512M) xen.spec (Mike Latimer) OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=324
170 lines
6.8 KiB
Diff
170 lines
6.8 KiB
Diff
# Commit dab11417da4e21f43625f4ebbb68158f07003d04
|
|
# Date 2014-06-25 14:40:34 +0200
|
|
# Author Jan Beulich <jbeulich@suse.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
x86/HVM: consolidate and sanitize CR4 guest reserved bit determination
|
|
|
|
First of all, this is needed by just a single source file, so it gets
|
|
moved there instead of getting fed to the compiler for most other
|
|
source files too. With that it becomes sensible for this to no longer
|
|
be a macro, allowing elimination of the mostly redundant helpers
|
|
hvm_vcpu_has_{smep,smap}(). And finally, following the model SMEP and
|
|
SMAP already used, tie the determination of reserved bits to the
|
|
features the guest is shown rather than the host's.
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
|
|
Index: xen-4.4.1-testing/xen/arch/x86/hvm/hvm.c
|
|
===================================================================
|
|
--- xen-4.4.1-testing.orig/xen/arch/x86/hvm/hvm.c
|
|
+++ xen-4.4.1-testing/xen/arch/x86/hvm/hvm.c
|
|
@@ -827,6 +827,73 @@ static bool_t hvm_efer_valid(struct doma
|
|
((value & (EFER_LME|EFER_LMA)) == EFER_LMA));
|
|
}
|
|
|
|
+/* These reserved bits in lower 32 remain 0 after any load of CR0 */
|
|
+#define HVM_CR0_GUEST_RESERVED_BITS \
|
|
+ (~((unsigned long) \
|
|
+ (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | \
|
|
+ X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | \
|
|
+ X86_CR0_WP | X86_CR0_AM | X86_CR0_NW | \
|
|
+ X86_CR0_CD | X86_CR0_PG)))
|
|
+
|
|
+/* These bits in CR4 cannot be set by the guest. */
|
|
+static unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v,
|
|
+ bool_t restore)
|
|
+{
|
|
+ unsigned int leaf1_ecx = 0, leaf1_edx = 0;
|
|
+ unsigned int leaf7_0_ebx = 0, leaf7_0_ecx = 0;
|
|
+
|
|
+ if ( likely(!restore) )
|
|
+ {
|
|
+ unsigned int level;
|
|
+
|
|
+ ASSERT(v == current);
|
|
+ hvm_cpuid(0, &level, NULL, NULL, NULL);
|
|
+ if ( level >= 1 )
|
|
+ hvm_cpuid(1, NULL, NULL, &leaf1_ecx, &leaf1_edx);
|
|
+ if ( level >= 7 )
|
|
+ hvm_cpuid(7, NULL, &leaf7_0_ebx, &leaf7_0_ecx, NULL);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ leaf1_edx = boot_cpu_data.x86_capability[X86_FEATURE_VME / 32];
|
|
+ leaf1_ecx = boot_cpu_data.x86_capability[X86_FEATURE_PCID / 32];
|
|
+ leaf7_0_ebx = boot_cpu_data.x86_capability[X86_FEATURE_FSGSBASE / 32];
|
|
+ }
|
|
+
|
|
+ return ~(unsigned long)
|
|
+ ((leaf1_edx & cpufeat_mask(X86_FEATURE_VME) ?
|
|
+ X86_CR4_VME | X86_CR4_PVI : 0) |
|
|
+ (leaf1_edx & cpufeat_mask(X86_FEATURE_TSC) ?
|
|
+ X86_CR4_TSD : 0) |
|
|
+ (leaf1_edx & cpufeat_mask(X86_FEATURE_DE) ?
|
|
+ X86_CR4_DE : 0) |
|
|
+ (leaf1_edx & cpufeat_mask(X86_FEATURE_PSE) ?
|
|
+ X86_CR4_PSE : 0) |
|
|
+ (leaf1_edx & cpufeat_mask(X86_FEATURE_PAE) ?
|
|
+ X86_CR4_PAE : 0) |
|
|
+ (leaf1_edx & (cpufeat_mask(X86_FEATURE_MCE) |
|
|
+ cpufeat_mask(X86_FEATURE_MCA)) ?
|
|
+ X86_CR4_MCE : 0) |
|
|
+ (leaf1_edx & cpufeat_mask(X86_FEATURE_PGE) ?
|
|
+ X86_CR4_PGE : 0) |
|
|
+ X86_CR4_PCE |
|
|
+ (leaf1_edx & cpufeat_mask(X86_FEATURE_FXSR) ?
|
|
+ X86_CR4_OSFXSR : 0) |
|
|
+ (leaf1_edx & cpufeat_mask(X86_FEATURE_XMM) ?
|
|
+ X86_CR4_OSXMMEXCPT : 0) |
|
|
+ ((restore || nestedhvm_enabled(v->domain)) &&
|
|
+ (leaf1_ecx & cpufeat_mask(X86_FEATURE_VMXE)) ?
|
|
+ X86_CR4_VMXE : 0) |
|
|
+ (leaf7_0_ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) ?
|
|
+ X86_CR4_FSGSBASE : 0) |
|
|
+ (leaf1_ecx & cpufeat_mask(X86_FEATURE_PCID) ?
|
|
+ X86_CR4_PCIDE : 0) |
|
|
+ (leaf1_ecx & cpufeat_mask(X86_FEATURE_XSAVE) ?
|
|
+ X86_CR4_OSXSAVE : 0) |
|
|
+ (leaf7_0_ebx & cpufeat_mask(X86_FEATURE_SMEP) ?
|
|
+ X86_CR4_SMEP : 0));
|
|
+}
|
|
+
|
|
static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
|
|
{
|
|
int vcpuid;
|
|
@@ -857,7 +924,7 @@ static int hvm_load_cpu_ctxt(struct doma
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if ( ctxt.cr4 & HVM_CR4_GUEST_RESERVED_BITS(v, 1) )
|
|
+ if ( ctxt.cr4 & hvm_cr4_guest_reserved_bits(v, 1) )
|
|
{
|
|
printk(XENLOG_G_ERR "HVM%d restore: bad CR4 %#" PRIx64 "\n",
|
|
d->domain_id, ctxt.cr4);
|
|
@@ -1981,7 +2048,7 @@ int hvm_set_cr4(unsigned long value)
|
|
struct vcpu *v = current;
|
|
unsigned long old_cr;
|
|
|
|
- if ( value & HVM_CR4_GUEST_RESERVED_BITS(v, 0) )
|
|
+ if ( value & hvm_cr4_guest_reserved_bits(v, 0) )
|
|
{
|
|
HVM_DBG_LOG(DBG_LEVEL_1,
|
|
"Guest attempts to set reserved bit in CR4: %lx",
|
|
Index: xen-4.4.1-testing/xen/include/asm-x86/hvm/hvm.h
|
|
===================================================================
|
|
--- xen-4.4.1-testing.orig/xen/include/asm-x86/hvm/hvm.h
|
|
+++ xen-4.4.1-testing/xen/include/asm-x86/hvm/hvm.h
|
|
@@ -347,51 +347,10 @@ static inline int hvm_event_pending(stru
|
|
return hvm_funcs.event_pending(v);
|
|
}
|
|
|
|
-static inline bool_t hvm_vcpu_has_smep(void)
|
|
-{
|
|
- unsigned int eax, ebx;
|
|
-
|
|
- hvm_cpuid(0, &eax, NULL, NULL, NULL);
|
|
-
|
|
- if ( eax < 7 )
|
|
- return 0;
|
|
-
|
|
- hvm_cpuid(7, NULL, &ebx, NULL, NULL);
|
|
- return !!(ebx & cpufeat_mask(X86_FEATURE_SMEP));
|
|
-}
|
|
-
|
|
-/* These reserved bits in lower 32 remain 0 after any load of CR0 */
|
|
-#define HVM_CR0_GUEST_RESERVED_BITS \
|
|
- (~((unsigned long) \
|
|
- (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | \
|
|
- X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | \
|
|
- X86_CR0_WP | X86_CR0_AM | X86_CR0_NW | \
|
|
- X86_CR0_CD | X86_CR0_PG)))
|
|
-
|
|
/* These bits in CR4 are owned by the host. */
|
|
#define HVM_CR4_HOST_MASK (mmu_cr4_features & \
|
|
(X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
|
|
|
|
-/* These bits in CR4 cannot be set by the guest. */
|
|
-#define HVM_CR4_GUEST_RESERVED_BITS(v, restore) ({ \
|
|
- const struct vcpu *_v = (v); \
|
|
- bool_t _restore = !!(restore); \
|
|
- ASSERT((_restore) || _v == current); \
|
|
- (~((unsigned long) \
|
|
- (X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | \
|
|
- X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \
|
|
- X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
|
|
- X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \
|
|
- (((_restore) ? cpu_has_smep : \
|
|
- hvm_vcpu_has_smep()) ? \
|
|
- X86_CR4_SMEP : 0) | \
|
|
- (cpu_has_fsgsbase ? X86_CR4_FSGSBASE : 0) | \
|
|
- ((nestedhvm_enabled(_v->domain) && cpu_has_vmx) \
|
|
- ? X86_CR4_VMXE : 0) | \
|
|
- (cpu_has_pcid ? X86_CR4_PCIDE : 0) | \
|
|
- (cpu_has_xsave ? X86_CR4_OSXSAVE : 0)))); \
|
|
-})
|
|
-
|
|
/* These exceptions must always be intercepted. */
|
|
#define HVM_TRAP_MASK ((1U << TRAP_machine_check) | (1U << TRAP_invalid_op))
|
|
|