xen/26576-x86-APICV-migration.patch
Olaf Hering b9d38dfc8d - add xorg-x11-util-devel to BuildRequires to get lndir(1)
- remove xen.migrate.tools_notify_restore_to_hangup_during_migration_--abort_if_busy.patch
  It changed migration protocol and upstream wants a different solution

- bnc#802221 - fix xenpaging
  readd xenpaging.qemu.flush-cache.patch

- Upstream patches from Jan
  26891-x86-S3-Fix-cpu-pool-scheduling-after-suspend-resume.patch
  26930-x86-EFI-fix-runtime-call-status-for-compat-mode-Dom0.patch
- Additional fix for bnc#816159
  CVE-2013-1918-xsa45-followup.patch

- bnc#817068 - Xen guest with >1 sr-iov vf won't start
  xen-managed-pci-device.patch

- Update to Xen 4.2.2 c/s 26064
  The following recent security patches are included in the tarball
  CVE-2013-0151-xsa34.patch (bnc#797285)
  CVE-2012-6075-xsa41.patch (bnc#797523)
  CVE-2013-1917-xsa44.patch (bnc#813673)
  CVE-2013-1919-xsa46.patch (bnc#813675)

- Upstream patch from Jan
  26902-x86-EFI-pass-boot-services-variable-info-to-runtime-code.patch 

- bnc#816159 - VUL-0: xen: CVE-2013-1918: XSA-45: Several long
  latency operations are not preemptible
  CVE-2013-1918-xsa45-1-vcpu-destroy-pagetables-preemptible.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=237
2013-05-07 14:35:00 +00:00

109 lines
3.7 KiB
Diff

References: FATE#313605
# HG changeset patch
# User Jiongxi Li <jiongxi.li@intel.com>
# Date 1361176078 -3600
# Node ID 4c3355d776e115f979fd2abc135bb77ba710f0d4
# Parent 217a4fc4cd46e8de06f2f43eed727838891e9398
x86/VMX: fix live migration while enabling APICV
SVI should be restored in case guest is processing virtual interrupt
while saveing a domain state. Otherwise SVI would be missed when
virtual interrupt delivery is enabled.
Signed-off-by: Jiongxi Li <jiongxi.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
Acked-by: Jun Nakajima <jun.nakajima@intel.com>
Committed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1194,6 +1194,9 @@ static int lapic_load_regs(struct domain
if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 )
return -EINVAL;
+ if ( hvm_funcs.process_isr )
+ hvm_funcs.process_isr(vlapic_find_highest_isr(s), v);
+
vlapic_adjust_i8259_target(d);
lapic_rearm(s);
return 0;
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -290,8 +290,8 @@ void vmx_intr_assist(void)
vmx_set_eoi_exit_bitmap(v, pt_vector);
/* we need update the RVI field */
- status &= ~(unsigned long)0x0FF;
- status |= (unsigned long)0x0FF &
+ status &= ~VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
+ status |= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK &
intack.vector;
__vmwrite(GUEST_INTR_STATUS, status);
if (v->arch.hvm_vmx.eoi_exitmap_changed) {
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1523,6 +1523,29 @@ static int vmx_virtual_intr_delivery_ena
return cpu_has_vmx_virtual_intr_delivery;
}
+static void vmx_process_isr(int isr, struct vcpu *v)
+{
+ unsigned long status;
+ u8 old;
+
+ if ( !cpu_has_vmx_virtual_intr_delivery )
+ return;
+
+ if ( isr < 0 )
+ isr = 0;
+
+ vmx_vmcs_enter(v);
+ status = __vmread(GUEST_INTR_STATUS);
+ old = status >> VMX_GUEST_INTR_STATUS_SVI_OFFSET;
+ if ( isr != old )
+ {
+ status &= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
+ status |= isr << VMX_GUEST_INTR_STATUS_SVI_OFFSET;
+ __vmwrite(GUEST_INTR_STATUS, status);
+ }
+ vmx_vmcs_exit(v);
+}
+
static struct hvm_function_table __read_mostly vmx_function_table = {
.name = "VMX",
.cpu_up_prepare = vmx_cpu_up_prepare,
@@ -1571,7 +1594,8 @@ static struct hvm_function_table __read_
.nhvm_intr_blocked = nvmx_intr_blocked,
.nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
- .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled
+ .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled,
+ .process_isr = vmx_process_isr,
};
struct hvm_function_table * __init start_vmx(void)
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -184,6 +184,7 @@ struct hvm_function_table {
/* Virtual interrupt delivery */
void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
int (*virtual_intr_delivery_enabled)(void);
+ void (*process_isr)(int isr, struct vcpu *v);
};
extern struct hvm_function_table hvm_funcs;
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -246,6 +246,10 @@ extern bool_t cpu_has_vmx_ins_outs_instr
#define VMX_INTR_SHADOW_SMI 0x00000004
#define VMX_INTR_SHADOW_NMI 0x00000008
+/* Guest interrupt status */
+#define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK 0x0FF
+#define VMX_GUEST_INTR_STATUS_SVI_OFFSET 8
+
/* VMCS field encodings. */
enum vmcs_field {
VIRTUAL_PROCESSOR_ID = 0x00000000,