0e9e131edf
25242-x86_64-hotplug-compat-m2p.patch 25247-SVM-no-rdtsc-intercept.patch 25267-x86-text-unlikely.patch 25269-x86-vMCE-addr-misc-write.patch 25271-x86_64-IST-index.patch 25327-pvdrv-no-asm-system-h.patch - Upstream patches from Jan 25168-x86-memset-size.patch 25191-x86-tdt-delta-calculation.patch 25195-x86-cpuidle-C2-no-flush-or-bm-check.patch 25196-x86-HAP-PAT-sr.patch 25200-x86_64-trap-bounce-flags.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=189
227 lines
6.9 KiB
Diff
227 lines
6.9 KiB
Diff
# HG changeset patch
|
|
# User Gianluca Guida <gianluca.guida@citrix.com>
|
|
# Date 1334647766 -3600
|
|
# Node ID 375fa55c7a6c88273bdd1f7f1105e5154da4eeba
|
|
# Parent a06e6cdeafe34964721df3ddb32774d4634fd7ad
|
|
Fix save/restore of guest PAT table in HAP paging mode.
|
|
|
|
HAP paging mode guests use direct MSR read/write into the VMCS/VMCB
|
|
for the guest PAT table, while the current save/restore code was
|
|
accessing only the pat_cr field in hvm_vcpu, used when intercepting
|
|
the MSR mostly in shadow mode (the Intel scenario is a bit more
|
|
complicated). This patch fixes this issue creating a new couple of
|
|
hvm_funcs, get/set_guest_pat, that access the right PAT table based on
|
|
the paging mode and guest configuration.
|
|
|
|
Signed-off-by: Gianluca Guida <gianluca.guida@citrix.com>
|
|
Acked-by: Tim Deegan <tim@xen.org>
|
|
Committed-by: Keir Fraser <keir@xen.org>
|
|
|
|
--- a/xen/arch/x86/hvm/hvm.c
|
|
+++ b/xen/arch/x86/hvm/hvm.c
|
|
@@ -209,6 +209,31 @@ void hvm_set_rdtsc_exiting(struct domain
|
|
hvm_funcs.set_rdtsc_exiting(v, enable);
|
|
}
|
|
|
|
+void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
|
|
+{
|
|
+ if ( !hvm_funcs.get_guest_pat(v, guest_pat) )
|
|
+ *guest_pat = v->arch.hvm_vcpu.pat_cr;
|
|
+}
|
|
+
|
|
+int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat)
|
|
+{
|
|
+ int i;
|
|
+ uint8_t *value = (uint8_t *)&guest_pat;
|
|
+
|
|
+ for ( i = 0; i < 8; i++ )
|
|
+ if ( unlikely(!(value[i] == 0 || value[i] == 1 ||
|
|
+ value[i] == 4 || value[i] == 5 ||
|
|
+ value[i] == 6 || value[i] == 7)) ) {
|
|
+ HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid guest PAT: %"PRIx64"\n",
|
|
+ guest_pat);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if ( !hvm_funcs.set_guest_pat(v, guest_pat) )
|
|
+ v->arch.hvm_vcpu.pat_cr = guest_pat;
|
|
+ return 1;
|
|
+}
|
|
+
|
|
void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
|
|
{
|
|
uint64_t tsc;
|
|
@@ -2483,7 +2508,7 @@ int hvm_msr_read_intercept(unsigned int
|
|
break;
|
|
|
|
case MSR_IA32_CR_PAT:
|
|
- *msr_content = v->arch.hvm_vcpu.pat_cr;
|
|
+ hvm_get_guest_pat(v, msr_content);
|
|
break;
|
|
|
|
case MSR_MTRRcap:
|
|
@@ -2599,7 +2624,7 @@ int hvm_msr_write_intercept(unsigned int
|
|
break;
|
|
|
|
case MSR_IA32_CR_PAT:
|
|
- if ( !pat_msr_set(&v->arch.hvm_vcpu.pat_cr, msr_content) )
|
|
+ if ( !hvm_set_guest_pat(v, msr_content) )
|
|
goto gp_fault;
|
|
break;
|
|
|
|
--- a/xen/arch/x86/hvm/mtrr.c
|
|
+++ b/xen/arch/x86/hvm/mtrr.c
|
|
@@ -406,26 +406,6 @@ uint32_t get_pat_flags(struct vcpu *v,
|
|
return pat_type_2_pte_flags(pat_entry_value);
|
|
}
|
|
|
|
-/* Helper funtions for seting mtrr/pat */
|
|
-bool_t pat_msr_set(uint64_t *pat, uint64_t msr_content)
|
|
-{
|
|
- uint8_t *value = (uint8_t*)&msr_content;
|
|
- int32_t i;
|
|
-
|
|
- if ( *pat != msr_content )
|
|
- {
|
|
- for ( i = 0; i < 8; i++ )
|
|
- if ( unlikely(!(value[i] == 0 || value[i] == 1 ||
|
|
- value[i] == 4 || value[i] == 5 ||
|
|
- value[i] == 6 || value[i] == 7)) )
|
|
- return 0;
|
|
-
|
|
- *pat = msr_content;
|
|
- }
|
|
-
|
|
- return 1;
|
|
-}
|
|
-
|
|
bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content)
|
|
{
|
|
uint8_t def_type = msr_content & 0xff;
|
|
@@ -636,7 +616,7 @@ static int hvm_save_mtrr_msr(struct doma
|
|
{
|
|
mtrr_state = &v->arch.hvm_vcpu.mtrr;
|
|
|
|
- hw_mtrr.msr_pat_cr = v->arch.hvm_vcpu.pat_cr;
|
|
+ hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr);
|
|
|
|
hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type
|
|
| (mtrr_state->enabled << 10);
|
|
@@ -681,7 +661,7 @@ static int hvm_load_mtrr_msr(struct doma
|
|
|
|
mtrr_state = &v->arch.hvm_vcpu.mtrr;
|
|
|
|
- pat_msr_set(&v->arch.hvm_vcpu.pat_cr, hw_mtrr.msr_pat_cr);
|
|
+ hvm_set_guest_pat(v, hw_mtrr.msr_pat_cr);
|
|
|
|
mtrr_state->mtrr_cap = hw_mtrr.msr_mtrr_cap;
|
|
|
|
--- a/xen/arch/x86/hvm/svm/svm.c
|
|
+++ b/xen/arch/x86/hvm/svm/svm.c
|
|
@@ -585,6 +585,28 @@ static void svm_set_segment_register(str
|
|
svm_vmload(vmcb);
|
|
}
|
|
|
|
+static int svm_set_guest_pat(struct vcpu *v, u64 gpat)
|
|
+{
|
|
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
|
|
+
|
|
+ if ( !paging_mode_hap(v->domain) )
|
|
+ return 0;
|
|
+
|
|
+ vmcb_set_g_pat(vmcb, gpat);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static int svm_get_guest_pat(struct vcpu *v, u64 *gpat)
|
|
+{
|
|
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
|
|
+
|
|
+ if ( !paging_mode_hap(v->domain) )
|
|
+ return 0;
|
|
+
|
|
+ *gpat = vmcb_get_g_pat(vmcb);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
|
|
{
|
|
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
|
|
@@ -1507,6 +1529,8 @@ static struct hvm_function_table __read_
|
|
.update_host_cr3 = svm_update_host_cr3,
|
|
.update_guest_cr = svm_update_guest_cr,
|
|
.update_guest_efer = svm_update_guest_efer,
|
|
+ .set_guest_pat = svm_set_guest_pat,
|
|
+ .get_guest_pat = svm_get_guest_pat,
|
|
.set_tsc_offset = svm_set_tsc_offset,
|
|
.inject_exception = svm_inject_exception,
|
|
.init_hypercall_page = svm_init_hypercall_page,
|
|
--- a/xen/arch/x86/hvm/vmx/vmx.c
|
|
+++ b/xen/arch/x86/hvm/vmx/vmx.c
|
|
@@ -921,6 +921,34 @@ static void vmx_set_segment_register(str
|
|
vmx_vmcs_exit(v);
|
|
}
|
|
|
|
+static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
|
|
+{
|
|
+ if ( !cpu_has_vmx_pat || !paging_mode_hap(v->domain) )
|
|
+ return 0;
|
|
+
|
|
+ vmx_vmcs_enter(v);
|
|
+ __vmwrite(GUEST_PAT, gpat);
|
|
+#ifdef __i386__
|
|
+ __vmwrite(GUEST_PAT_HIGH, gpat >> 32);
|
|
+#endif
|
|
+ vmx_vmcs_exit(v);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static int vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
|
|
+{
|
|
+ if ( !cpu_has_vmx_pat || !paging_mode_hap(v->domain) )
|
|
+ return 0;
|
|
+
|
|
+ vmx_vmcs_enter(v);
|
|
+ *gpat = __vmread(GUEST_PAT);
|
|
+#ifdef __i386__
|
|
+ *gpat |= (u64)__vmread(GUEST_PAT_HIGH) << 32;
|
|
+#endif
|
|
+ vmx_vmcs_exit(v);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
|
|
{
|
|
vmx_vmcs_enter(v);
|
|
@@ -1384,6 +1412,8 @@ static struct hvm_function_table __read_
|
|
.update_host_cr3 = vmx_update_host_cr3,
|
|
.update_guest_cr = vmx_update_guest_cr,
|
|
.update_guest_efer = vmx_update_guest_efer,
|
|
+ .set_guest_pat = vmx_set_guest_pat,
|
|
+ .get_guest_pat = vmx_get_guest_pat,
|
|
.set_tsc_offset = vmx_set_tsc_offset,
|
|
.inject_exception = vmx_inject_exception,
|
|
.init_hypercall_page = vmx_init_hypercall_page,
|
|
--- a/xen/include/asm-x86/hvm/hvm.h
|
|
+++ b/xen/include/asm-x86/hvm/hvm.h
|
|
@@ -116,6 +116,9 @@ struct hvm_function_table {
|
|
void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
|
|
void (*update_guest_efer)(struct vcpu *v);
|
|
|
|
+ int (*get_guest_pat)(struct vcpu *v, u64 *);
|
|
+ int (*set_guest_pat)(struct vcpu *v, u64);
|
|
+
|
|
void (*set_tsc_offset)(struct vcpu *v, u64 offset);
|
|
|
|
void (*inject_exception)(unsigned int trapnr, int errcode,
|
|
@@ -166,6 +169,9 @@ void hvm_vcpu_reset_state(struct vcpu *v
|
|
|
|
bool_t hvm_send_assist_req(struct vcpu *v);
|
|
|
|
+void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
|
|
+int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
|
|
+
|
|
void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc);
|
|
u64 hvm_get_guest_tsc(struct vcpu *v);
|
|
|