0891920741
51e517e6-AMD-IOMMU-allocate-IRTEs.patch 51e5183f-AMD-IOMMU-untie-remap-and-vector-maps.patch 51e63df6-VMX-fix-interaction-of-APIC-V-and-Viridian-emulation.patch 52146070-ACPI-fix-acpi_os_map_memory.patch 5214d26a-VT-d-warn-about-CFI-being-enabled-by-firmware.patch 5215d094-Nested-VMX-Check-whether-interrupt-is-blocked-by-TPR.patch 5215d0c5-Nested-VMX-Force-check-ISR-when-L2-is-running.patch 5215d135-Nested-VMX-Clear-APIC-v-control-bit-in-vmcs02.patch 5215d2d5-Nested-VMX-Update-APIC-v-RVI-SVI-when-vmexit-to-L1.patch 5215d8b0-Correct-X2-APIC-HVM-emulation.patch - Dropped 520d417d-xen-Add-stdbool.h-workaround-for-BSD.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=267
248 lines
8.4 KiB
Diff
248 lines
8.4 KiB
Diff
# Commit 84e6af58707520baf59c1c86c29237419e439afb
|
|
# Date 2013-08-22 10:59:01 +0200
|
|
# Author Yang Zhang <yang.z.zhang@Intel.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
Nested VMX: Update APIC-v(RVI/SVI) when vmexit to L1
|
|
|
|
If enabling APIC-v, all interrupts to L1 are delivered through APIC-v.
|
|
But when L2 is running, external interrupt will casue L1 vmexit with
|
|
reason external interrupt. Then L1 will pick up the interrupt through
|
|
vmcs12. when L1 ack the interrupt, since the APIC-v is enabled when
|
|
L1 is running, so APIC-v hardware still will do vEOI updating. The problem
|
|
is that the interrupt is delivered not through APIC-v hardware, this means
|
|
SVI/RVI/vPPR are not setting, but hardware required them when doing vEOI
|
|
updating. The solution is that, when L1 tried to pick up the interrupt
|
|
from vmcs12, then hypervisor will help to update the SVI/RVI/vPPR to make
|
|
sure the following vEOI updating and vPPR updating corrently.
|
|
|
|
Also, since interrupt is delivered through vmcs12, so APIC-v hardware will
|
|
not cleare vIRR and hypervisor need to clear it before L1 running.
|
|
|
|
Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
|
|
Acked-by: "Dong, Eddie" <eddie.dong@intel.com>
|
|
|
|
--- a/xen/arch/x86/hvm/irq.c
|
|
+++ b/xen/arch/x86/hvm/irq.c
|
|
@@ -437,7 +437,7 @@ struct hvm_intack hvm_vcpu_ack_pending_i
|
|
intack.vector = (uint8_t)vector;
|
|
break;
|
|
case hvm_intsrc_lapic:
|
|
- if ( !vlapic_ack_pending_irq(v, intack.vector) )
|
|
+ if ( !vlapic_ack_pending_irq(v, intack.vector, 0) )
|
|
intack = hvm_intack_none;
|
|
break;
|
|
case hvm_intsrc_vector:
|
|
--- a/xen/arch/x86/hvm/vlapic.c
|
|
+++ b/xen/arch/x86/hvm/vlapic.c
|
|
@@ -168,6 +168,14 @@ static uint32_t vlapic_get_ppr(struct vl
|
|
return ppr;
|
|
}
|
|
|
|
+uint32_t vlapic_set_ppr(struct vlapic *vlapic)
|
|
+{
|
|
+ uint32_t ppr = vlapic_get_ppr(vlapic);
|
|
+
|
|
+ vlapic_set_reg(vlapic, APIC_PROCPRI, ppr);
|
|
+ return ppr;
|
|
+}
|
|
+
|
|
static int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t mda)
|
|
{
|
|
int result = 0;
|
|
@@ -1050,15 +1058,15 @@ int vlapic_has_pending_irq(struct vcpu *
|
|
return irr;
|
|
}
|
|
|
|
-int vlapic_ack_pending_irq(struct vcpu *v, int vector)
|
|
+int vlapic_ack_pending_irq(struct vcpu *v, int vector, bool_t force_ack)
|
|
{
|
|
struct vlapic *vlapic = vcpu_vlapic(v);
|
|
|
|
- if ( vlapic_virtual_intr_delivery_enabled() )
|
|
- return 1;
|
|
-
|
|
- vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
|
|
- vlapic_clear_irr(vector, vlapic);
|
|
+ if ( force_ack || !vlapic_virtual_intr_delivery_enabled() )
|
|
+ {
|
|
+ vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
|
|
+ vlapic_clear_irr(vector, vlapic);
|
|
+ }
|
|
|
|
return 1;
|
|
}
|
|
--- a/xen/arch/x86/hvm/vmx/intr.c
|
|
+++ b/xen/arch/x86/hvm/vmx/intr.c
|
|
@@ -185,7 +185,7 @@ static int nvmx_intr_intercept(struct vc
|
|
if ( !(ctrl & PIN_BASED_EXT_INTR_MASK) )
|
|
return 0;
|
|
|
|
- vmx_inject_extint(intack.vector);
|
|
+ vmx_inject_extint(intack.vector, intack.source);
|
|
|
|
ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, VM_EXIT_CONTROLS);
|
|
if ( ctrl & VM_EXIT_ACK_INTR_ON_EXIT )
|
|
@@ -314,7 +314,7 @@ void vmx_intr_assist(void)
|
|
else
|
|
{
|
|
HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
|
|
- vmx_inject_extint(intack.vector);
|
|
+ vmx_inject_extint(intack.vector, intack.source);
|
|
pt_intr_post(v, intack);
|
|
}
|
|
|
|
--- a/xen/arch/x86/hvm/vmx/vmx.c
|
|
+++ b/xen/arch/x86/hvm/vmx/vmx.c
|
|
@@ -1205,7 +1205,7 @@ static void vmx_update_guest_efer(struct
|
|
}
|
|
|
|
void nvmx_enqueue_n2_exceptions(struct vcpu *v,
|
|
- unsigned long intr_fields, int error_code)
|
|
+ unsigned long intr_fields, int error_code, uint8_t source)
|
|
{
|
|
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
|
|
|
|
@@ -1213,6 +1213,7 @@ void nvmx_enqueue_n2_exceptions(struct v
|
|
/* enqueue the exception till the VMCS switch back to L1 */
|
|
nvmx->intr.intr_info = intr_fields;
|
|
nvmx->intr.error_code = error_code;
|
|
+ nvmx->intr.source = source;
|
|
vcpu_nestedhvm(v).nv_vmexit_pending = 1;
|
|
return;
|
|
}
|
|
@@ -1224,7 +1225,8 @@ void nvmx_enqueue_n2_exceptions(struct v
|
|
|
|
static int nvmx_vmexit_trap(struct vcpu *v, struct hvm_trap *trap)
|
|
{
|
|
- nvmx_enqueue_n2_exceptions(v, trap->vector, trap->error_code);
|
|
+ nvmx_enqueue_n2_exceptions(v, trap->vector, trap->error_code,
|
|
+ hvm_intsrc_none);
|
|
return NESTEDHVM_VMEXIT_DONE;
|
|
}
|
|
|
|
@@ -1255,7 +1257,7 @@ static void __vmx_inject_exception(int t
|
|
curr->arch.hvm_vmx.vmx_emulate = 1;
|
|
}
|
|
|
|
-void vmx_inject_extint(int trap)
|
|
+void vmx_inject_extint(int trap, uint8_t source)
|
|
{
|
|
struct vcpu *v = current;
|
|
u32 pin_based_cntrl;
|
|
@@ -1266,7 +1268,7 @@ void vmx_inject_extint(int trap)
|
|
if ( pin_based_cntrl & PIN_BASED_EXT_INTR_MASK ) {
|
|
nvmx_enqueue_n2_exceptions (v,
|
|
INTR_INFO_VALID_MASK | (X86_EVENTTYPE_EXT_INTR<<8) | trap,
|
|
- HVM_DELIVER_NO_ERROR_CODE);
|
|
+ HVM_DELIVER_NO_ERROR_CODE, source);
|
|
return;
|
|
}
|
|
}
|
|
@@ -1285,7 +1287,7 @@ void vmx_inject_nmi(void)
|
|
if ( pin_based_cntrl & PIN_BASED_NMI_EXITING ) {
|
|
nvmx_enqueue_n2_exceptions (v,
|
|
INTR_INFO_VALID_MASK | (X86_EVENTTYPE_NMI<<8) | TRAP_nmi,
|
|
- HVM_DELIVER_NO_ERROR_CODE);
|
|
+ HVM_DELIVER_NO_ERROR_CODE, hvm_intsrc_nmi);
|
|
return;
|
|
}
|
|
}
|
|
@@ -1353,7 +1355,7 @@ static void vmx_inject_trap(struct hvm_t
|
|
{
|
|
nvmx_enqueue_n2_exceptions (curr,
|
|
INTR_INFO_VALID_MASK | (_trap.type<<8) | _trap.vector,
|
|
- _trap.error_code);
|
|
+ _trap.error_code, hvm_intsrc_none);
|
|
return;
|
|
}
|
|
else
|
|
--- a/xen/arch/x86/hvm/vmx/vvmx.c
|
|
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
|
|
@@ -1295,6 +1295,36 @@ static void sync_exception_state(struct
|
|
}
|
|
}
|
|
|
|
+static void nvmx_update_apicv(struct vcpu *v)
|
|
+{
|
|
+ struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
|
|
+ struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
|
|
+ unsigned long reason = __get_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_REASON);
|
|
+ uint32_t intr_info = __get_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_INTR_INFO);
|
|
+
|
|
+ if ( reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
|
|
+ nvmx->intr.source == hvm_intsrc_lapic &&
|
|
+ (intr_info & INTR_INFO_VALID_MASK) )
|
|
+ {
|
|
+ uint16_t status;
|
|
+ uint32_t rvi, ppr;
|
|
+ uint32_t vector = intr_info & 0xff;
|
|
+ struct vlapic *vlapic = vcpu_vlapic(v);
|
|
+
|
|
+ vlapic_ack_pending_irq(v, vector, 1);
|
|
+
|
|
+ ppr = vlapic_set_ppr(vlapic);
|
|
+ WARN_ON((ppr & 0xf0) != (vector & 0xf0));
|
|
+
|
|
+ status = vector << 8;
|
|
+ rvi = vlapic_has_pending_irq(v);
|
|
+ if ( rvi != -1 )
|
|
+ status |= rvi & 0xff;
|
|
+
|
|
+ __vmwrite(GUEST_INTR_STATUS, status);
|
|
+ }
|
|
+}
|
|
+
|
|
static void virtual_vmexit(struct cpu_user_regs *regs)
|
|
{
|
|
struct vcpu *v = current;
|
|
@@ -1340,6 +1370,9 @@ static void virtual_vmexit(struct cpu_us
|
|
/* updating host cr0 to sync TS bit */
|
|
__vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
|
|
|
|
+ if ( cpu_has_vmx_virtual_intr_delivery )
|
|
+ nvmx_update_apicv(v);
|
|
+
|
|
vmreturn(regs, VMSUCCEED);
|
|
}
|
|
|
|
--- a/xen/include/asm-x86/hvm/vlapic.h
|
|
+++ b/xen/include/asm-x86/hvm/vlapic.h
|
|
@@ -98,7 +98,7 @@ bool_t is_vlapic_lvtpc_enabled(struct vl
|
|
void vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig);
|
|
|
|
int vlapic_has_pending_irq(struct vcpu *v);
|
|
-int vlapic_ack_pending_irq(struct vcpu *v, int vector);
|
|
+int vlapic_ack_pending_irq(struct vcpu *v, int vector, bool_t force_ack);
|
|
|
|
int vlapic_init(struct vcpu *v);
|
|
void vlapic_destroy(struct vcpu *v);
|
|
@@ -110,6 +110,7 @@ void vlapic_tdt_msr_set(struct vlapic *v
|
|
uint64_t vlapic_tdt_msr_get(struct vlapic *vlapic);
|
|
|
|
int vlapic_accept_pic_intr(struct vcpu *v);
|
|
+uint32_t vlapic_set_ppr(struct vlapic *vlapic);
|
|
|
|
void vlapic_adjust_i8259_target(struct domain *d);
|
|
|
|
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
|
|
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
|
|
@@ -448,7 +448,7 @@ static inline int __vmxon(u64 addr)
|
|
|
|
void vmx_get_segment_register(struct vcpu *, enum x86_segment,
|
|
struct segment_register *);
|
|
-void vmx_inject_extint(int trap);
|
|
+void vmx_inject_extint(int trap, uint8_t source);
|
|
void vmx_inject_nmi(void);
|
|
|
|
int ept_p2m_init(struct p2m_domain *p2m);
|
|
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
|
|
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
|
|
@@ -36,6 +36,7 @@ struct nestedvmx {
|
|
struct {
|
|
unsigned long intr_info;
|
|
u32 error_code;
|
|
+ u8 source;
|
|
} intr;
|
|
struct {
|
|
bool_t enabled;
|