b244ce9e91
sles10. Patch pygrub to get the kernel and initrd from the image. pygrub-boot-legacy-sles.patch - bnc#842515 - VUL-0: CVE-2013-4375: XSA-71: xen: qemu disk backend (qdisk) resource leak CVE-2013-4375-xsa71.patch - Upstream patches from Jan 52496bea-x86-properly-handle-hvm_copy_from_guest_-phys-virt-errors.patch (Replaces CVE-2013-4355-xsa63.patch) 52496c11-x86-mm-shadow-Fix-initialization-of-PV-shadow-L4-tables.patch (Replaces CVE-2013-4356-xsa64.patch) 52496c32-x86-properly-set-up-fbld-emulation-operand-address.patch (Replaces CVE-2013-4361-xsa66.patch) 52497c6c-x86-don-t-blindly-create-L3-tables-for-the-direct-map.patch 524e971b-x86-idle-Fix-get_cpu_idle_time-s-interaction-with-offline-pcpus.patch 524e9762-x86-percpu-Force-INVALID_PERCPU_AREA-to-non-canonical.patch 524e983e-Nested-VMX-check-VMX-capability-before-read-VMX-related-MSRs.patch 524e98b1-Nested-VMX-fix-IA32_VMX_CR4_FIXED1-msr-emulation.patch 524e9dc0-xsm-forbid-PV-guest-console-reads.patch 5256a979-x86-check-segment-descriptor-read-result-in-64-bit-OUTS-emulation.patch 5256be57-libxl-fix-vif-rate-parsing.patch 5256be84-tools-ocaml-fix-erroneous-free-of-cpumap-in-stub_xc_vcpu_getaffinity.patch 5256be92-libxl-fix-out-of-memory-error-handling-in-libxl_list_cpupool.patch 5257a89a-x86-correct-LDT-checks.patch 5257a8e7-x86-add-address-validity-check-to-guest_map_l1e.patch 5257a944-x86-check-for-canonical-address-before-doing-page-walks.patch 525b95f4-scheduler-adjust-internal-locking-interface.patch 525b9617-sched-fix-race-between-sched_move_domain-and-vcpu_wake.patch 525e69e8-credit-unpause-parked-vcpu-before-destroying-it.patch 525faf5e-x86-print-relevant-tail-part-of-filename-for-warnings-and-crashes.patch - bnc#840196 - L3: MTU size on Dom0 gets reset when booting DomU OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=276
178 lines
6.0 KiB
Diff
178 lines
6.0 KiB
Diff
References: bnc#840592 CVE-2013-4355 XSA-63
|
|
|
|
# Commit 6bb838e7375f5b031e9ac346b353775c90de45dc
|
|
# Date 2013-09-30 14:17:46 +0200
|
|
# Author Jan Beulich <jbeulich@suse.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
x86: properly handle hvm_copy_from_guest_{phys,virt}() errors
|
|
|
|
Ignoring them generally implies using uninitialized data and, in all
|
|
but two of the cases dealt with here, potentially leaking hypervisor
|
|
stack contents to guests.
|
|
|
|
This is CVE-2013-4355 / XSA-63.
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Reviewed-by: Tim Deegan <tim@xen.org>
|
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
|
|
--- a/xen/arch/x86/hvm/hvm.c
|
|
+++ b/xen/arch/x86/hvm/hvm.c
|
|
@@ -2316,11 +2316,7 @@ void hvm_task_switch(
|
|
|
|
rc = hvm_copy_from_guest_virt(
|
|
&tss, prev_tr.base, sizeof(tss), PFEC_page_present);
|
|
- if ( rc == HVMCOPY_bad_gva_to_gfn )
|
|
- goto out;
|
|
- if ( rc == HVMCOPY_gfn_paged_out )
|
|
- goto out;
|
|
- if ( rc == HVMCOPY_gfn_shared )
|
|
+ if ( rc != HVMCOPY_okay )
|
|
goto out;
|
|
|
|
eflags = regs->eflags;
|
|
@@ -2365,13 +2361,11 @@ void hvm_task_switch(
|
|
|
|
rc = hvm_copy_from_guest_virt(
|
|
&tss, tr.base, sizeof(tss), PFEC_page_present);
|
|
- if ( rc == HVMCOPY_bad_gva_to_gfn )
|
|
- goto out;
|
|
- if ( rc == HVMCOPY_gfn_paged_out )
|
|
- goto out;
|
|
- /* Note: this could be optimised, if the callee functions knew we want RO
|
|
- * access */
|
|
- if ( rc == HVMCOPY_gfn_shared )
|
|
+ /*
|
|
+ * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
|
|
+ * functions knew we want RO access.
|
|
+ */
|
|
+ if ( rc != HVMCOPY_okay )
|
|
goto out;
|
|
|
|
|
|
--- a/xen/arch/x86/hvm/intercept.c
|
|
+++ b/xen/arch/x86/hvm/intercept.c
|
|
@@ -87,17 +87,28 @@ static int hvm_mmio_access(struct vcpu *
|
|
{
|
|
for ( i = 0; i < p->count; i++ )
|
|
{
|
|
- int ret;
|
|
-
|
|
- ret = hvm_copy_from_guest_phys(&data,
|
|
- p->data + (sign * i * p->size),
|
|
- p->size);
|
|
- if ( (ret == HVMCOPY_gfn_paged_out) ||
|
|
- (ret == HVMCOPY_gfn_shared) )
|
|
+ switch ( hvm_copy_from_guest_phys(&data,
|
|
+ p->data + sign * i * p->size,
|
|
+ p->size) )
|
|
{
|
|
+ case HVMCOPY_okay:
|
|
+ break;
|
|
+ case HVMCOPY_gfn_paged_out:
|
|
+ case HVMCOPY_gfn_shared:
|
|
rc = X86EMUL_RETRY;
|
|
break;
|
|
+ case HVMCOPY_bad_gfn_to_mfn:
|
|
+ data = ~0;
|
|
+ break;
|
|
+ case HVMCOPY_bad_gva_to_gfn:
|
|
+ ASSERT(0);
|
|
+ /* fall through */
|
|
+ default:
|
|
+ rc = X86EMUL_UNHANDLEABLE;
|
|
+ break;
|
|
}
|
|
+ if ( rc != X86EMUL_OKAY )
|
|
+ break;
|
|
rc = write_handler(v, p->addr + (sign * i * p->size), p->size,
|
|
data);
|
|
if ( rc != X86EMUL_OKAY )
|
|
@@ -165,8 +176,28 @@ static int process_portio_intercept(port
|
|
for ( i = 0; i < p->count; i++ )
|
|
{
|
|
data = 0;
|
|
- (void)hvm_copy_from_guest_phys(&data, p->data + sign*i*p->size,
|
|
- p->size);
|
|
+ switch ( hvm_copy_from_guest_phys(&data,
|
|
+ p->data + sign * i * p->size,
|
|
+ p->size) )
|
|
+ {
|
|
+ case HVMCOPY_okay:
|
|
+ break;
|
|
+ case HVMCOPY_gfn_paged_out:
|
|
+ case HVMCOPY_gfn_shared:
|
|
+ rc = X86EMUL_RETRY;
|
|
+ break;
|
|
+ case HVMCOPY_bad_gfn_to_mfn:
|
|
+ data = ~0;
|
|
+ break;
|
|
+ case HVMCOPY_bad_gva_to_gfn:
|
|
+ ASSERT(0);
|
|
+ /* fall through */
|
|
+ default:
|
|
+ rc = X86EMUL_UNHANDLEABLE;
|
|
+ break;
|
|
+ }
|
|
+ if ( rc != X86EMUL_OKAY )
|
|
+ break;
|
|
rc = action(IOREQ_WRITE, p->addr, p->size, &data);
|
|
if ( rc != X86EMUL_OKAY )
|
|
break;
|
|
--- a/xen/arch/x86/hvm/io.c
|
|
+++ b/xen/arch/x86/hvm/io.c
|
|
@@ -340,14 +340,24 @@ static int dpci_ioport_write(uint32_t mp
|
|
data = p->data;
|
|
if ( p->data_is_ptr )
|
|
{
|
|
- int ret;
|
|
-
|
|
- ret = hvm_copy_from_guest_phys(&data,
|
|
- p->data + (sign * i * p->size),
|
|
- p->size);
|
|
- if ( (ret == HVMCOPY_gfn_paged_out) &&
|
|
- (ret == HVMCOPY_gfn_shared) )
|
|
+ switch ( hvm_copy_from_guest_phys(&data,
|
|
+ p->data + sign * i * p->size,
|
|
+ p->size) )
|
|
+ {
|
|
+ case HVMCOPY_okay:
|
|
+ break;
|
|
+ case HVMCOPY_gfn_paged_out:
|
|
+ case HVMCOPY_gfn_shared:
|
|
return X86EMUL_RETRY;
|
|
+ case HVMCOPY_bad_gfn_to_mfn:
|
|
+ data = ~0;
|
|
+ break;
|
|
+ case HVMCOPY_bad_gva_to_gfn:
|
|
+ ASSERT(0);
|
|
+ /* fall through */
|
|
+ default:
|
|
+ return X86EMUL_UNHANDLEABLE;
|
|
+ }
|
|
}
|
|
|
|
switch ( p->size )
|
|
--- a/xen/arch/x86/hvm/vmx/realmode.c
|
|
+++ b/xen/arch/x86/hvm/vmx/realmode.c
|
|
@@ -39,7 +39,9 @@ static void realmode_deliver_exception(
|
|
|
|
again:
|
|
last_byte = (vector * 4) + 3;
|
|
- if ( idtr->limit < last_byte )
|
|
+ if ( idtr->limit < last_byte ||
|
|
+ hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) !=
|
|
+ HVMCOPY_okay )
|
|
{
|
|
/* Software interrupt? */
|
|
if ( insn_len != 0 )
|
|
@@ -64,8 +66,6 @@ static void realmode_deliver_exception(
|
|
}
|
|
}
|
|
|
|
- (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4);
|
|
-
|
|
frame[0] = regs->eip + insn_len;
|
|
frame[1] = csr->sel;
|
|
frame[2] = regs->eflags & ~X86_EFLAGS_RF;
|