a11c33863f
5281fad4-numa-sched-leave-node-affinity-alone-if-not-in-auto-mode.patch 52820823-nested-SVM-adjust-guest-handling-of-structure-mappings.patch 52820863-VMX-don-t-crash-processing-d-debug-key.patch 5282492f-x86-eliminate-has_arch_mmios.patch 52864df2-credit-Update-other-parameters-when-setting-tslice_ms.patch 52864f30-fix-leaking-of-v-cpu_affinity_saved-on-domain-destruction.patch 5289d225-nested-VMX-don-t-ignore-mapping-errors.patch 528a0eb0-x86-consider-modules-when-cutting-off-memory.patch 528f606c-x86-hvm-reset-TSC-to-0-after-domain-resume-from-S3.patch 528f609c-x86-crash-disable-the-watchdog-NMIs-on-the-crashing-cpu.patch 52932418-x86-xsave-fix-nonlazy-state-handling.patch - Add missing requires to pciutils package for xend-tools - bnc#851749 - Xen service file does not call xend properly xend.service - bnc#851386 - VUL-0: xen: XSA-78: Insufficient TLB flushing in VT-d (iommu) code 528a0e5b-TLB-flushing-in-dma_pte_clear_one.patch - bnc#849667 - VUL-0: xen: XSA-74: Lock order reversal between page_alloc_lock and mm_rwlock CVE-2013-4553-xsa74.patch - bnc#849665 - VUL-0: CVE-2013-4551: xen: XSA-75: Host crash due to guest VMX instruction execution 52809208-nested-VMX-VMLANUCH-VMRESUME-emulation-must-check-permission-1st.patch - bnc#849668 - VUL-0: xen: XSA-76: Hypercalls exposed to privilege rings 1 and 2 of HVM guests OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=279
116 lines
4.0 KiB
Diff
116 lines
4.0 KiB
Diff
# Commit e02b14e531a95399fc9d8647ec3cc6f310a7d455
|
|
# Date 2013-11-18 09:39:01 +0100
|
|
# Author Jan Beulich <jbeulich@suse.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
nested VMX: don't ignore mapping errors
|
|
|
|
Rather than ignoring failures to map the virtual VMCS as well as MSR or
|
|
I/O port bitmaps, convert those into failures of the respective
|
|
instructions (avoiding to dereference NULL pointers). Ultimately such
|
|
failures should be handled transparently (by using transient mappings
|
|
when they actually need to be accessed, just like nested SVM does).
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
Acked-by: Eddie Dong <eddie.dong@intel.com>
|
|
|
|
--- a/xen/arch/x86/hvm/vmx/vvmx.c
|
|
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
|
|
@@ -746,7 +746,7 @@ static void __clear_current_vvmcs(struct
|
|
__vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
|
|
}
|
|
|
|
-static void __map_msr_bitmap(struct vcpu *v)
|
|
+static bool_t __must_check _map_msr_bitmap(struct vcpu *v)
|
|
{
|
|
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
|
|
unsigned long gpa;
|
|
@@ -755,9 +755,11 @@ static void __map_msr_bitmap(struct vcpu
|
|
hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
|
|
gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
|
|
nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
|
|
+
|
|
+ return nvmx->msrbitmap != NULL;
|
|
}
|
|
|
|
-static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
|
|
+static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
|
|
{
|
|
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
|
|
unsigned long gpa;
|
|
@@ -768,12 +770,14 @@ static void __map_io_bitmap(struct vcpu
|
|
hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
|
|
gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
|
|
nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
|
|
+
|
|
+ return nvmx->iobitmap[index] != NULL;
|
|
}
|
|
|
|
-static inline void map_io_bitmap_all(struct vcpu *v)
|
|
+static inline bool_t __must_check map_io_bitmap_all(struct vcpu *v)
|
|
{
|
|
- __map_io_bitmap (v, IO_BITMAP_A);
|
|
- __map_io_bitmap (v, IO_BITMAP_B);
|
|
+ return _map_io_bitmap(v, IO_BITMAP_A) &&
|
|
+ _map_io_bitmap(v, IO_BITMAP_B);
|
|
}
|
|
|
|
static void nvmx_purge_vvmcs(struct vcpu *v)
|
|
@@ -1609,9 +1613,15 @@ int nvmx_handle_vmptrld(struct cpu_user_
|
|
if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
|
|
{
|
|
nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
|
|
- nvcpu->nv_vvmcxaddr = gpa;
|
|
- map_io_bitmap_all (v);
|
|
- __map_msr_bitmap(v);
|
|
+ if ( nvcpu->nv_vvmcx )
|
|
+ nvcpu->nv_vvmcxaddr = gpa;
|
|
+ if ( !nvcpu->nv_vvmcx ||
|
|
+ !map_io_bitmap_all(v) ||
|
|
+ !_map_msr_bitmap(v) )
|
|
+ {
|
|
+ vmreturn(regs, VMFAIL_VALID);
|
|
+ goto out;
|
|
+ }
|
|
}
|
|
|
|
if ( cpu_has_vmx_vmcs_shadowing )
|
|
@@ -1723,6 +1733,7 @@ int nvmx_handle_vmwrite(struct cpu_user_
|
|
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
|
|
unsigned long operand;
|
|
u64 vmcs_encoding;
|
|
+ bool_t okay = 1;
|
|
|
|
if ( decode_vmx_inst(regs, &decode, &operand, 0)
|
|
!= X86EMUL_OKAY )
|
|
@@ -1731,16 +1742,21 @@ int nvmx_handle_vmwrite(struct cpu_user_
|
|
vmcs_encoding = reg_read(regs, decode.reg2);
|
|
__set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
|
|
|
|
- if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
|
|
- __map_io_bitmap (v, IO_BITMAP_A);
|
|
- else if ( vmcs_encoding == IO_BITMAP_B ||
|
|
- vmcs_encoding == IO_BITMAP_B_HIGH )
|
|
- __map_io_bitmap (v, IO_BITMAP_B);
|
|
+ switch ( vmcs_encoding )
|
|
+ {
|
|
+ case IO_BITMAP_A: case IO_BITMAP_A_HIGH:
|
|
+ okay = _map_io_bitmap(v, IO_BITMAP_A);
|
|
+ break;
|
|
+ case IO_BITMAP_B: case IO_BITMAP_B_HIGH:
|
|
+ okay = _map_io_bitmap(v, IO_BITMAP_B);
|
|
+ break;
|
|
+ case MSR_BITMAP: case MSR_BITMAP_HIGH:
|
|
+ okay = _map_msr_bitmap(v);
|
|
+ break;
|
|
+ }
|
|
|
|
- if ( vmcs_encoding == MSR_BITMAP || vmcs_encoding == MSR_BITMAP_HIGH )
|
|
- __map_msr_bitmap(v);
|
|
+ vmreturn(regs, okay ? VMSUCCEED : VMFAIL_VALID);
|
|
|
|
- vmreturn(regs, VMSUCCEED);
|
|
return X86EMUL_OKAY;
|
|
}
|
|
|