edf6bf0381
23955-x86-pv-cpuid-xsave.patch 23957-cpufreq-error-paths.patch - Upstream patches from Jan 23933-pt-bus2bridge-update.patch 23726-x86-intel-flexmigration-v2.patch 23925-x86-AMD-ARAT-Fam12.patch 23246-x86-xsave-enable.patch 23897-x86-mce-offline-again.patch - Update to Xen 4.1.2_rc3 c/s 23171 - bnc#720054 - Changed /etc/udev/rules.d/40-xen.rules to not run Xen's vif-bridge script when not running Xen. This is not a solution to the bug but an improvement in the rules regardless. Updated udev-rules.patch - Upstream patches from Jan 23868-vtd-RMRR-validation.patch 23871-x86-microcode-amd-silent.patch 23898-cc-option-grep.patch - Add pciback init script and sysconf file, giving users a simple mechanism to configure pciback. init.pciback sysconfig.pciback - update scripts to use xl -f, or xm if xend is running: xen-updown.sh, init.xendomains, xmclone.sh OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=146
436 lines
14 KiB
Diff
436 lines
14 KiB
Diff
References: FATE#309900
|
|
|
|
# HG changeset patch
|
|
# User Keir Fraser <keir@xen.org>
|
|
# Date 1303116432 -3600
|
|
# Node ID 1276926e3795b11ef6ac2f59df900d8e0ba9f54b
|
|
# Parent 07d832ad23021445bc56fafaeb2843c94d868005
|
|
vmx/hvm: move mov-cr handling functions to generic HVM code
|
|
|
|
Currently the handling of CR accesses intercepts is done much
|
|
differently in SVM and VMX. For future usage move the VMX part
|
|
into the generic HVM path and use the exported functions.
|
|
|
|
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
|
|
Signed-off-by: Keir Fraser <keir@xen.org>
|
|
|
|
Index: xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c
|
|
===================================================================
|
|
--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/hvm.c
|
|
+++ xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c
|
|
@@ -1298,6 +1298,86 @@ static void hvm_set_uc_mode(struct vcpu
|
|
return hvm_funcs.set_uc_mode(v);
|
|
}
|
|
|
|
+int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
|
|
+{
|
|
+ struct vcpu *curr = current;
|
|
+ unsigned long val, *reg;
|
|
+
|
|
+ if ( (reg = get_x86_gpr(guest_cpu_user_regs(), gpr)) == NULL )
|
|
+ {
|
|
+ gdprintk(XENLOG_ERR, "invalid gpr: %u\n", gpr);
|
|
+ goto exit_and_crash;
|
|
+ }
|
|
+
|
|
+ val = *reg;
|
|
+ HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(val));
|
|
+ HVM_DBG_LOG(DBG_LEVEL_1, "CR%u, value = %lx", cr, val);
|
|
+
|
|
+ switch ( cr )
|
|
+ {
|
|
+ case 0:
|
|
+ return hvm_set_cr0(val);
|
|
+
|
|
+ case 3:
|
|
+ return hvm_set_cr3(val);
|
|
+
|
|
+ case 4:
|
|
+ return hvm_set_cr4(val);
|
|
+
|
|
+ case 8:
|
|
+ vlapic_set_reg(vcpu_vlapic(curr), APIC_TASKPRI, ((val & 0x0f) << 4));
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
|
|
+ goto exit_and_crash;
|
|
+ }
|
|
+
|
|
+ return X86EMUL_OKAY;
|
|
+
|
|
+ exit_and_crash:
|
|
+ domain_crash(curr->domain);
|
|
+ return X86EMUL_UNHANDLEABLE;
|
|
+}
|
|
+
|
|
+int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
|
|
+{
|
|
+ struct vcpu *curr = current;
|
|
+ unsigned long val = 0, *reg;
|
|
+
|
|
+ if ( (reg = get_x86_gpr(guest_cpu_user_regs(), gpr)) == NULL )
|
|
+ {
|
|
+ gdprintk(XENLOG_ERR, "invalid gpr: %u\n", gpr);
|
|
+ goto exit_and_crash;
|
|
+ }
|
|
+
|
|
+ switch ( cr )
|
|
+ {
|
|
+ case 0:
|
|
+ case 2:
|
|
+ case 3:
|
|
+ case 4:
|
|
+ val = curr->arch.hvm_vcpu.guest_cr[cr];
|
|
+ break;
|
|
+ case 8:
|
|
+ val = (vlapic_get_reg(vcpu_vlapic(curr), APIC_TASKPRI) & 0xf0) >> 4;
|
|
+ break;
|
|
+ default:
|
|
+ gdprintk(XENLOG_ERR, "invalid cr: %u\n", cr);
|
|
+ goto exit_and_crash;
|
|
+ }
|
|
+
|
|
+ *reg = val;
|
|
+ HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(val));
|
|
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%u, value = %lx", cr, val);
|
|
+
|
|
+ return X86EMUL_OKAY;
|
|
+
|
|
+ exit_and_crash:
|
|
+ domain_crash(curr->domain);
|
|
+ return X86EMUL_UNHANDLEABLE;
|
|
+}
|
|
+
|
|
int hvm_set_cr0(unsigned long value)
|
|
{
|
|
struct vcpu *v = current;
|
|
Index: xen-4.1.2-testing/xen/arch/x86/hvm/vmx/vmx.c
|
|
===================================================================
|
|
--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/vmx/vmx.c
|
|
+++ xen-4.1.2-testing/xen/arch/x86/hvm/vmx/vmx.c
|
|
@@ -1545,182 +1545,42 @@ static void vmx_invlpg_intercept(unsigne
|
|
vpid_sync_vcpu_gva(curr, vaddr);
|
|
}
|
|
|
|
-#define CASE_SET_REG(REG, reg) \
|
|
- case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: regs->reg = value; break
|
|
-#define CASE_GET_REG(REG, reg) \
|
|
- case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: value = regs->reg; break
|
|
-
|
|
-#define CASE_EXTEND_SET_REG \
|
|
- CASE_EXTEND_REG(S)
|
|
-#define CASE_EXTEND_GET_REG \
|
|
- CASE_EXTEND_REG(G)
|
|
-
|
|
-#ifdef __i386__
|
|
-#define CASE_EXTEND_REG(T)
|
|
-#else
|
|
-#define CASE_EXTEND_REG(T) \
|
|
- CASE_ ## T ## ET_REG(R8, r8); \
|
|
- CASE_ ## T ## ET_REG(R9, r9); \
|
|
- CASE_ ## T ## ET_REG(R10, r10); \
|
|
- CASE_ ## T ## ET_REG(R11, r11); \
|
|
- CASE_ ## T ## ET_REG(R12, r12); \
|
|
- CASE_ ## T ## ET_REG(R13, r13); \
|
|
- CASE_ ## T ## ET_REG(R14, r14); \
|
|
- CASE_ ## T ## ET_REG(R15, r15)
|
|
-#endif
|
|
-
|
|
-static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
|
|
-{
|
|
- unsigned long value;
|
|
- struct vcpu *v = current;
|
|
- struct vlapic *vlapic = vcpu_vlapic(v);
|
|
- int rc = 0;
|
|
- unsigned long old;
|
|
-
|
|
- switch ( gp )
|
|
- {
|
|
- CASE_GET_REG(EAX, eax);
|
|
- CASE_GET_REG(ECX, ecx);
|
|
- CASE_GET_REG(EDX, edx);
|
|
- CASE_GET_REG(EBX, ebx);
|
|
- CASE_GET_REG(EBP, ebp);
|
|
- CASE_GET_REG(ESI, esi);
|
|
- CASE_GET_REG(EDI, edi);
|
|
- CASE_GET_REG(ESP, esp);
|
|
- CASE_EXTEND_GET_REG;
|
|
- default:
|
|
- gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
|
|
- goto exit_and_crash;
|
|
- }
|
|
-
|
|
- HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(value));
|
|
-
|
|
- HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
|
|
-
|
|
- switch ( cr )
|
|
- {
|
|
- case 0:
|
|
- old = v->arch.hvm_vcpu.guest_cr[0];
|
|
- rc = !hvm_set_cr0(value);
|
|
- if (rc)
|
|
- hvm_memory_event_cr0(value, old);
|
|
- return rc;
|
|
-
|
|
- case 3:
|
|
- old = v->arch.hvm_vcpu.guest_cr[3];
|
|
- rc = !hvm_set_cr3(value);
|
|
- if (rc)
|
|
- hvm_memory_event_cr3(value, old);
|
|
- return rc;
|
|
-
|
|
- case 4:
|
|
- old = v->arch.hvm_vcpu.guest_cr[4];
|
|
- rc = !hvm_set_cr4(value);
|
|
- if (rc)
|
|
- hvm_memory_event_cr4(value, old);
|
|
- return rc;
|
|
-
|
|
- case 8:
|
|
- vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
|
|
- break;
|
|
-
|
|
- default:
|
|
- gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
|
|
- goto exit_and_crash;
|
|
- }
|
|
-
|
|
- return 1;
|
|
-
|
|
- exit_and_crash:
|
|
- domain_crash(v->domain);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * Read from control registers. CR0 and CR4 are read from the shadow.
|
|
- */
|
|
-static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
|
|
+static int vmx_cr_access(unsigned long exit_qualification)
|
|
{
|
|
- unsigned long value = 0;
|
|
- struct vcpu *v = current;
|
|
- struct vlapic *vlapic = vcpu_vlapic(v);
|
|
-
|
|
- switch ( cr )
|
|
- {
|
|
- case 3:
|
|
- value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
|
|
- break;
|
|
- case 8:
|
|
- value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
|
|
- value = (value & 0xF0) >> 4;
|
|
- break;
|
|
- default:
|
|
- gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
|
|
- domain_crash(v->domain);
|
|
- break;
|
|
- }
|
|
-
|
|
- switch ( gp ) {
|
|
- CASE_SET_REG(EAX, eax);
|
|
- CASE_SET_REG(ECX, ecx);
|
|
- CASE_SET_REG(EDX, edx);
|
|
- CASE_SET_REG(EBX, ebx);
|
|
- CASE_SET_REG(EBP, ebp);
|
|
- CASE_SET_REG(ESI, esi);
|
|
- CASE_SET_REG(EDI, edi);
|
|
- CASE_SET_REG(ESP, esp);
|
|
- CASE_EXTEND_SET_REG;
|
|
- default:
|
|
- printk("invalid gp: %d\n", gp);
|
|
- domain_crash(v->domain);
|
|
- break;
|
|
- }
|
|
-
|
|
- HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(value));
|
|
-
|
|
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
|
|
-}
|
|
-
|
|
-static int vmx_cr_access(unsigned long exit_qualification,
|
|
- struct cpu_user_regs *regs)
|
|
-{
|
|
- unsigned int gp, cr;
|
|
- unsigned long value;
|
|
- struct vcpu *v = current;
|
|
+ struct vcpu *curr = current;
|
|
|
|
- switch ( exit_qualification & VMX_CONTROL_REG_ACCESS_TYPE )
|
|
+ switch ( VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification) )
|
|
{
|
|
- case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
|
|
- gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
|
|
- cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
|
|
- return mov_to_cr(gp, cr, regs);
|
|
- case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR:
|
|
- gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
|
|
- cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
|
|
- mov_from_cr(cr, gp, regs);
|
|
- break;
|
|
- case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
|
|
- {
|
|
- unsigned long old = v->arch.hvm_vcpu.guest_cr[0];
|
|
- v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
|
|
- vmx_update_guest_cr(v, 0);
|
|
-
|
|
- hvm_memory_event_cr0(v->arch.hvm_vcpu.guest_cr[0], old);
|
|
-
|
|
+ case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR: {
|
|
+ unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
|
|
+ unsigned long cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
|
|
+ return hvm_mov_to_cr(cr, gp);
|
|
+ }
|
|
+ case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR: {
|
|
+ unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
|
|
+ unsigned long cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
|
|
+ return hvm_mov_from_cr(cr, gp);
|
|
+ }
|
|
+ case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: {
|
|
+ unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
|
|
+ curr->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
|
|
+ vmx_update_guest_cr(curr, 0);
|
|
+ hvm_memory_event_cr0(curr->arch.hvm_vcpu.guest_cr[0], old);
|
|
HVMTRACE_0D(CLTS);
|
|
break;
|
|
}
|
|
- case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
|
|
- value = v->arch.hvm_vcpu.guest_cr[0];
|
|
+ case VMX_CONTROL_REG_ACCESS_TYPE_LMSW: {
|
|
+ unsigned long value = curr->arch.hvm_vcpu.guest_cr[0];
|
|
/* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
|
|
value = (value & ~0xe) | ((exit_qualification >> 16) & 0xf);
|
|
HVMTRACE_LONG_1D(LMSW, value);
|
|
- return !hvm_set_cr0(value);
|
|
+ return hvm_set_cr0(value);
|
|
+ }
|
|
default:
|
|
BUG();
|
|
}
|
|
|
|
- return 1;
|
|
+ return X86EMUL_OKAY;
|
|
}
|
|
|
|
static const struct lbr_info {
|
|
@@ -2525,7 +2385,7 @@ asmlinkage void vmx_vmexit_handler(struc
|
|
case EXIT_REASON_CR_ACCESS:
|
|
{
|
|
exit_qualification = __vmread(EXIT_QUALIFICATION);
|
|
- if ( vmx_cr_access(exit_qualification, regs) )
|
|
+ if ( vmx_cr_access(exit_qualification) == X86EMUL_OKAY )
|
|
update_guest_eip(); /* Safe: MOV Cn, LMSW, CLTS */
|
|
break;
|
|
}
|
|
Index: xen-4.1.2-testing/xen/arch/x86/traps.c
|
|
===================================================================
|
|
--- xen-4.1.2-testing.orig/xen/arch/x86/traps.c
|
|
+++ xen-4.1.2-testing/xen/arch/x86/traps.c
|
|
@@ -368,6 +368,36 @@ void vcpu_show_execution_state(struct vc
|
|
vcpu_unpause(v);
|
|
}
|
|
|
|
+unsigned long *get_x86_gpr(struct cpu_user_regs *regs, unsigned int modrm_reg)
|
|
+{
|
|
+ void *p;
|
|
+
|
|
+ switch ( modrm_reg )
|
|
+ {
|
|
+ case 0: p = ®s->eax; break;
|
|
+ case 1: p = ®s->ecx; break;
|
|
+ case 2: p = ®s->edx; break;
|
|
+ case 3: p = ®s->ebx; break;
|
|
+ case 4: p = ®s->esp; break;
|
|
+ case 5: p = ®s->ebp; break;
|
|
+ case 6: p = ®s->esi; break;
|
|
+ case 7: p = ®s->edi; break;
|
|
+#if defined(__x86_64__)
|
|
+ case 8: p = ®s->r8; break;
|
|
+ case 9: p = ®s->r9; break;
|
|
+ case 10: p = ®s->r10; break;
|
|
+ case 11: p = ®s->r11; break;
|
|
+ case 12: p = ®s->r12; break;
|
|
+ case 13: p = ®s->r13; break;
|
|
+ case 14: p = ®s->r14; break;
|
|
+ case 15: p = ®s->r15; break;
|
|
+#endif
|
|
+ default: p = NULL; break;
|
|
+ }
|
|
+
|
|
+ return p;
|
|
+}
|
|
+
|
|
static char *trapstr(int trapnr)
|
|
{
|
|
static char *strings[] = {
|
|
Index: xen-4.1.2-testing/xen/include/asm-x86/hvm/support.h
|
|
===================================================================
|
|
--- xen-4.1.2-testing.orig/xen/include/asm-x86/hvm/support.h
|
|
+++ xen-4.1.2-testing/xen/include/asm-x86/hvm/support.h
|
|
@@ -137,5 +137,7 @@ int hvm_set_cr3(unsigned long value);
|
|
int hvm_set_cr4(unsigned long value);
|
|
int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
|
|
int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content);
|
|
+int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
|
|
+int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
|
|
|
|
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
|
|
Index: xen-4.1.2-testing/xen/include/asm-x86/hvm/vmx/vmx.h
|
|
===================================================================
|
|
--- xen-4.1.2-testing.orig/xen/include/asm-x86/hvm/vmx/vmx.h
|
|
+++ xen-4.1.2-testing/xen/include/asm-x86/hvm/vmx/vmx.h
|
|
@@ -144,31 +144,15 @@ void vmx_update_cpu_exec_control(struct
|
|
* Exit Qualifications for MOV for Control Register Access
|
|
*/
|
|
/* 3:0 - control register number (CRn) */
|
|
-#define VMX_CONTROL_REG_ACCESS_NUM 0xf
|
|
+#define VMX_CONTROL_REG_ACCESS_NUM(eq) ((eq) & 0xf)
|
|
/* 5:4 - access type (CR write, CR read, CLTS, LMSW) */
|
|
-#define VMX_CONTROL_REG_ACCESS_TYPE 0x30
|
|
+#define VMX_CONTROL_REG_ACCESS_TYPE(eq) (((eq) >> 4) & 0x3)
|
|
+# define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR 0
|
|
+# define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR 1
|
|
+# define VMX_CONTROL_REG_ACCESS_TYPE_CLTS 2
|
|
+# define VMX_CONTROL_REG_ACCESS_TYPE_LMSW 3
|
|
/* 10:8 - general purpose register operand */
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR 0xf00
|
|
-#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR (0 << 4)
|
|
-#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR (1 << 4)
|
|
-#define VMX_CONTROL_REG_ACCESS_TYPE_CLTS (2 << 4)
|
|
-#define VMX_CONTROL_REG_ACCESS_TYPE_LMSW (3 << 4)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_EAX (0 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_ECX (1 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_EDX (2 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_EBX (3 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_ESP (4 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_EBP (5 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_ESI (6 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_EDI (7 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_R8 (8 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_R9 (9 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_R10 (10 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_R11 (11 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_R12 (12 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_R13 (13 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_R14 (14 << 8)
|
|
-#define VMX_CONTROL_REG_ACCESS_GPR_R15 (15 << 8)
|
|
+#define VMX_CONTROL_REG_ACCESS_GPR(eq) (((eq) >> 8) & 0xf)
|
|
|
|
/*
|
|
* Access Rights
|
|
Index: xen-4.1.2-testing/xen/include/asm-x86/processor.h
|
|
===================================================================
|
|
--- xen-4.1.2-testing.orig/xen/include/asm-x86/processor.h
|
|
+++ xen-4.1.2-testing/xen/include/asm-x86/processor.h
|
|
@@ -592,6 +592,8 @@ int wrmsr_hypervisor_regs(uint32_t idx,
|
|
int microcode_update(XEN_GUEST_HANDLE(const_void), unsigned long len);
|
|
int microcode_resume_cpu(int cpu);
|
|
|
|
+unsigned long *get_x86_gpr(struct cpu_user_regs *regs, unsigned int modrm_reg);
|
|
+
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* __ASM_X86_PROCESSOR_H */
|