da53445dea
recursive pagetable for 32-bit PV guests (XSA-185) 57d1563d-x86-32on64-don-t-allow-recursive-page-tables-from-L3.patch - bsc#995789 - VUL-0: CVE-2016-7093: xen: x86: Mishandling of instruction pointer truncation during emulation (XSA-186) 57d15679-x86-emulate-Correct-boundary-interactions-of-emulated-insns.patch 57d18642-hvm-fep-Allow-test-insns-crossing-1-0-boundary.patch - bsc#995792 - VUL-0: CVE-2016-7094: xen: x86 HVM: Overflow of sh_ctxt->seg_reg[] (XSA-187) 57d1569a-x86-shadow-Avoid-overflowing-sh_ctxt-seg_reg.patch 57d18642-x86-segment-Bounds-check-accesses-to-emulation-ctxt-seg_reg.patch - bsc#991934 - xen hypervisor crash in csched_acct 57c96df3-credit1-fix-a-race-when-picking-initial-pCPU.patch - Upstream patches from Jan 57c4412b-x86-HVM-add-guarding-logic-for-VMX-specific-code.patch 57c57f73-libxc-correct-max_pfn-calculation-for-saving-domain.patch 57c805bf-x86-levelling-restrict-non-architectural-OSXSAVE-handling.patch 57c805c1-x86-levelling-pass-vcpu-to-ctxt_switch_levelling.patch 57c805c3-x86-levelling-provide-architectural-OSXSAVE-handling.patch 57c82be2-x86-32on64-adjust-call-gate-emulation.patch 57c96e2c-x86-correct-PT_NOTE-file-position.patch 57cfed43-VMX-correct-feature-checks-for-MPX-and-XSAVES.patch - bsc#989679 - [pvusb feature] USB device not found when 'virsh detach-device guest usb.xml' 57c93e52-fix-error-in-libxl_device_usbdev_list.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=450
204 lines
7.2 KiB
Diff
204 lines
7.2 KiB
Diff
References: bsc#995792
|
|
|
|
# Commit 4fa0105d95be6e7145a1f6fd1036ccd43976228c
|
|
# Date 2016-09-08 16:39:46 +0100
|
|
# Author Andrew Cooper <andrew.cooper3@citrix.com>
|
|
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
|
|
x86/segment: Bounds check accesses to emulation ctxt->seg_reg[]
|
|
|
|
HVM HAP codepaths have space for all segment registers in the seg_reg[]
|
|
cache (with x86_seg_none still risking an array overrun), while the shadow
|
|
codepaths only have space for the user segments.
|
|
|
|
Range check the input segment of *_get_seg_reg() against the size of the array
|
|
used to cache the results, to avoid overruns in the case that the callers
|
|
don't filter their input suitably.
|
|
|
|
Subsume the is_x86_user_segment(seg) checks from the shadow code, which were
|
|
an incomplete attempt at range checking, and are now superceeded. Make
|
|
hvm_get_seg_reg() static, as it is not used outside of shadow/common.c
|
|
|
|
No functional change, but far easier to reason that no overflow is possible.
|
|
|
|
Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
Acked-by: Tim Deegan <tim@xen.org>
|
|
Acked-by: Jan Beulich <jbeulich@suse.com>
|
|
|
|
# Commit 4c47c47938ea24c73d9459f9f0b6923513772b5d
|
|
# Date 2016-09-09 15:31:01 +0100
|
|
# Author Andrew Cooper <andrew.cooper3@citrix.com>
|
|
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
|
|
xen/x86: Fix build with clang following c/s 4fa0105
|
|
|
|
https://travis-ci.org/xen-project/xen/jobs/158494027#L2344
|
|
|
|
Clang complains:
|
|
|
|
emulate.c:2016:14: error: comparison of unsigned enum expression < 0
|
|
is always false [-Werror,-Wtautological-compare]
|
|
if ( seg < 0 || seg >= ARRAY_SIZE(hvmemul_ctxt->seg_reg) )
|
|
~~~ ^ ~
|
|
|
|
Clang is wrong to raise a warning like this. The signed-ness of an enum is
|
|
implementation defined in C, and robust code must not assume the choices made
|
|
by the compiler.
|
|
|
|
In this case, dropping the < 0 check creates a latent bug which would result
|
|
in an array underflow when compiled with a compiler which chooses a signed
|
|
enum.
|
|
|
|
Work around the bug by explicitly pulling seg into an unsigned integer, and
|
|
only perform the upper bounds check.
|
|
|
|
No functional change.
|
|
|
|
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
Reviewed-by: Jan Beulich <jbeulich@suse.com>
|
|
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
|
|
|
|
--- a/xen/arch/x86/hvm/emulate.c
|
|
+++ b/xen/arch/x86/hvm/emulate.c
|
|
@@ -534,6 +534,8 @@ static int hvmemul_virtual_to_linear(
|
|
*reps = min_t(unsigned long, *reps, max_reps);
|
|
|
|
reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
|
|
+ if ( IS_ERR(reg) )
|
|
+ return -PTR_ERR(reg);
|
|
|
|
if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
|
|
{
|
|
@@ -1369,6 +1371,10 @@ static int hvmemul_read_segment(
|
|
struct hvm_emulate_ctxt *hvmemul_ctxt =
|
|
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
|
|
struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
|
|
+
|
|
+ if ( IS_ERR(sreg) )
|
|
+ return -PTR_ERR(sreg);
|
|
+
|
|
memcpy(reg, sreg, sizeof(struct segment_register));
|
|
return X86EMUL_OKAY;
|
|
}
|
|
@@ -1382,6 +1388,9 @@ static int hvmemul_write_segment(
|
|
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
|
|
struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
|
|
|
|
+ if ( IS_ERR(sreg) )
|
|
+ return -PTR_ERR(sreg);
|
|
+
|
|
memcpy(sreg, reg, sizeof(struct segment_register));
|
|
__set_bit(seg, &hvmemul_ctxt->seg_reg_dirty);
|
|
|
|
@@ -1934,13 +1943,22 @@ void hvm_emulate_writeback(
|
|
}
|
|
}
|
|
|
|
+/*
|
|
+ * Callers which pass a known in-range x86_segment can rely on the return
|
|
+ * pointer being valid. Other callers must explicitly check for errors.
|
|
+ */
|
|
struct segment_register *hvmemul_get_seg_reg(
|
|
enum x86_segment seg,
|
|
struct hvm_emulate_ctxt *hvmemul_ctxt)
|
|
{
|
|
- if ( !__test_and_set_bit(seg, &hvmemul_ctxt->seg_reg_accessed) )
|
|
- hvm_get_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]);
|
|
- return &hvmemul_ctxt->seg_reg[seg];
|
|
+ unsigned int idx = seg;
|
|
+
|
|
+ if ( idx >= ARRAY_SIZE(hvmemul_ctxt->seg_reg) )
|
|
+ return ERR_PTR(-X86EMUL_UNHANDLEABLE);
|
|
+
|
|
+ if ( !__test_and_set_bit(idx, &hvmemul_ctxt->seg_reg_accessed) )
|
|
+ hvm_get_segment_register(current, idx, &hvmemul_ctxt->seg_reg[idx]);
|
|
+ return &hvmemul_ctxt->seg_reg[idx];
|
|
}
|
|
|
|
static const char *guest_x86_mode_to_str(int mode)
|
|
--- a/xen/arch/x86/mm/shadow/common.c
|
|
+++ b/xen/arch/x86/mm/shadow/common.c
|
|
@@ -123,12 +123,22 @@ __initcall(shadow_audit_key_init);
|
|
/* x86 emulator support for the shadow code
|
|
*/
|
|
|
|
-struct segment_register *hvm_get_seg_reg(
|
|
+/*
|
|
+ * Callers which pass a known in-range x86_segment can rely on the return
|
|
+ * pointer being valid. Other callers must explicitly check for errors.
|
|
+ */
|
|
+static struct segment_register *hvm_get_seg_reg(
|
|
enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt)
|
|
{
|
|
- struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg];
|
|
- if ( !__test_and_set_bit(seg, &sh_ctxt->valid_seg_regs) )
|
|
- hvm_get_segment_register(current, seg, seg_reg);
|
|
+ unsigned int idx = seg;
|
|
+ struct segment_register *seg_reg;
|
|
+
|
|
+ if ( idx >= ARRAY_SIZE(sh_ctxt->seg_reg) )
|
|
+ return ERR_PTR(-X86EMUL_UNHANDLEABLE);
|
|
+
|
|
+ seg_reg = &sh_ctxt->seg_reg[idx];
|
|
+ if ( !__test_and_set_bit(idx, &sh_ctxt->valid_seg_regs) )
|
|
+ hvm_get_segment_register(current, idx, seg_reg);
|
|
return seg_reg;
|
|
}
|
|
|
|
@@ -143,14 +153,9 @@ static int hvm_translate_linear_addr(
|
|
struct segment_register *reg;
|
|
int okay;
|
|
|
|
- /*
|
|
- * Can arrive here with non-user segments. However, no such cirucmstance
|
|
- * is part of a legitimate pagetable update, so fail the emulation.
|
|
- */
|
|
- if ( !is_x86_user_segment(seg) )
|
|
- return X86EMUL_UNHANDLEABLE;
|
|
-
|
|
reg = hvm_get_seg_reg(seg, sh_ctxt);
|
|
+ if ( IS_ERR(reg) )
|
|
+ return -PTR_ERR(reg);
|
|
|
|
okay = hvm_virtual_to_linear_addr(
|
|
seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr);
|
|
@@ -253,9 +258,6 @@ hvm_emulate_write(enum x86_segment seg,
|
|
unsigned long addr;
|
|
int rc;
|
|
|
|
- if ( !is_x86_user_segment(seg) )
|
|
- return X86EMUL_UNHANDLEABLE;
|
|
-
|
|
/* How many emulations could we save if we unshadowed on stack writes? */
|
|
if ( seg == x86_seg_ss )
|
|
perfc_incr(shadow_fault_emulate_stack);
|
|
@@ -283,7 +285,7 @@ hvm_emulate_cmpxchg(enum x86_segment seg
|
|
unsigned long addr, old, new;
|
|
int rc;
|
|
|
|
- if ( !is_x86_user_segment(seg) || bytes > sizeof(long) )
|
|
+ if ( bytes > sizeof(long) )
|
|
return X86EMUL_UNHANDLEABLE;
|
|
|
|
rc = hvm_translate_linear_addr(
|
|
--- a/xen/arch/x86/mm/shadow/private.h
|
|
+++ b/xen/arch/x86/mm/shadow/private.h
|
|
@@ -740,8 +740,6 @@ const struct x86_emulate_ops *shadow_ini
|
|
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
|
|
void shadow_continue_emulation(
|
|
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
|
|
-struct segment_register *hvm_get_seg_reg(
|
|
- enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt);
|
|
|
|
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
|
|
/**************************************************************************/
|
|
--- a/xen/include/asm-x86/hvm/emulate.h
|
|
+++ b/xen/include/asm-x86/hvm/emulate.h
|
|
@@ -13,6 +13,7 @@
|
|
#define __ASM_X86_HVM_EMULATE_H__
|
|
|
|
#include <xen/config.h>
|
|
+#include <xen/err.h>
|
|
#include <asm/hvm/hvm.h>
|
|
#include <asm/x86_emulate.h>
|
|
|