diff --git a/CVE-2016-2391-qemuu-usb-null-pointer-dereference-in-ohci-module.patch b/CVE-2016-2391-qemuu-usb-null-pointer-dereference-in-ohci-module.patch
new file mode 100644
index 0000000..621c6e0
--- /dev/null
+++ b/CVE-2016-2391-qemuu-usb-null-pointer-dereference-in-ohci-module.patch
@@ -0,0 +1,80 @@
+References: bsc#967101 CVE-2016-2391
+
+From d1b07becc481e09225cfe905ec357807ae07f095 Mon Sep 17 00:00:00 2001
+From: Gerd Hoffmann
+Date: Tue, 16 Feb 2016 15:15:04 +0100
+Subject: [PATCH] ohci timer fix
+
+Signed-off-by: Gerd Hoffmann
+---
+ hw/usb/hcd-ohci.c | 31 +++++--------------------------
+ 1 file changed, 5 insertions(+), 26 deletions(-)
+
+Index: xen-4.6.1-testing/tools/qemu-xen-dir-remote/hw/usb/hcd-ohci.c
+===================================================================
+--- xen-4.6.1-testing.orig/tools/qemu-xen-dir-remote/hw/usb/hcd-ohci.c
++++ xen-4.6.1-testing/tools/qemu-xen-dir-remote/hw/usb/hcd-ohci.c
+@@ -1331,16 +1331,6 @@ static void ohci_frame_boundary(void *op
+ */
+ static int ohci_bus_start(OHCIState *ohci)
+ {
+- ohci->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+- ohci_frame_boundary,
+- ohci);
+-
+- if (ohci->eof_timer == NULL) {
+- trace_usb_ohci_bus_eof_timer_failed(ohci->name);
+- ohci_die(ohci);
+- return 0;
+- }
+-
+ trace_usb_ohci_start(ohci->name);
+
+ ohci_sof(ohci);
+@@ -1352,11 +1342,7 @@ static int ohci_bus_start(OHCIState *ohc
+ static void ohci_bus_stop(OHCIState *ohci)
+ {
+ trace_usb_ohci_stop(ohci->name);
+- if (ohci->eof_timer) {
+- timer_del(ohci->eof_timer);
+- timer_free(ohci->eof_timer);
+- }
+- ohci->eof_timer = NULL;
++ timer_del(ohci->eof_timer);
+ }
+
+ /* Sets a flag in a port status register but only set it if the port is
+@@ -1881,6 +1867,8 @@ static int usb_ohci_init(OHCIState *ohci
+ ohci->async_td = 0;
+ qemu_register_reset(ohci_reset, ohci);
+
++ ohci->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
++ ohci_frame_boundary, ohci);
+ return 0;
+ }
+
+@@ -1997,23 +1985,13 @@ static bool ohci_eof_timer_needed(void *
+ {
+ OHCIState *ohci = opaque;
+
+- return ohci->eof_timer != NULL;
+-}
+-
+-static int ohci_eof_timer_pre_load(void *opaque)
+-{
+- OHCIState *ohci = opaque;
+-
+- ohci_bus_start(ohci);
+-
+- return 0;
++ return timer_pending(ohci->eof_timer);
+ }
+
+ static const VMStateDescription vmstate_ohci_eof_timer = {
+ .name = "ohci-core/eof-timer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+- .pre_load = ohci_eof_timer_pre_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_TIMER(eof_timer, OHCIState),
+ VMSTATE_END_OF_LIST()
diff --git a/CVE-2016-2392-qemuu-usb-null-pointer-dereference-in-NDIS-message-handling.patch b/CVE-2016-2392-qemuu-usb-null-pointer-dereference-in-NDIS-message-handling.patch
new file mode 100644
index 0000000..04c8ed2
--- /dev/null
+++ b/CVE-2016-2392-qemuu-usb-null-pointer-dereference-in-NDIS-message-handling.patch
@@ -0,0 +1,27 @@
+References: bsc#967090 CVE-2016-2392
+
+When processing remote NDIS control message packets, the USB Net
+device emulator checks to see if the USB configuration descriptor
+object is of RNDIS type(2). But it does not check if it is null,
+which leads to a null dereference error. Add check to avoid it.
+
+Reported-by: Qinghao Tang
+Signed-off-by: Prasad J Pandit
+---
+ hw/usb/dev-network.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+Index: xen-4.6.1-testing/tools/qemu-xen-dir-remote/hw/usb/dev-network.c
+===================================================================
+--- xen-4.6.1-testing.orig/tools/qemu-xen-dir-remote/hw/usb/dev-network.c
++++ xen-4.6.1-testing/tools/qemu-xen-dir-remote/hw/usb/dev-network.c
+@@ -650,7 +650,8 @@ typedef struct USBNetState {
+
+ static int is_rndis(USBNetState *s)
+ {
+- return s->dev.config->bConfigurationValue == DEV_RNDIS_CONFIG_VALUE;
++ return s->dev.config ?
++ s->dev.config->bConfigurationValue == DEV_RNDIS_CONFIG_VALUE : 0;
+ }
+
+ static int ndis_query(USBNetState *s, uint32_t oid,
diff --git a/CVE-2016-2538-qemuu-usb-integer-overflow-in-remote-NDIS-message-handling.patch b/CVE-2016-2538-qemuu-usb-integer-overflow-in-remote-NDIS-message-handling.patch
new file mode 100644
index 0000000..2e512fb
--- /dev/null
+++ b/CVE-2016-2538-qemuu-usb-integer-overflow-in-remote-NDIS-message-handling.patch
@@ -0,0 +1,55 @@
+References: bsc#968004 CVE-2016-2538
+
+Subject: usb: check RNDIS buffer offsets & length
+From: Prasad J Pandit pjp@fedoraproject.org Wed Feb 17 00:23:41 2016 +0530
+Date: Tue Feb 23 10:38:01 2016 +0100:
+Git: fe3c546c5ff2a6210f9a4d8561cc64051ca8603e
+
+When processing remote NDIS control message packets,
+the USB Net device emulator uses a fixed length(4096) data buffer.
+The incoming informationBufferOffset & Length combination could
+overflow and cross that range. Check control message buffer
+offsets and length to avoid it.
+
+Reported-by: Qinghao Tang
+Signed-off-by: Prasad J Pandit
+Message-id: 1455648821-17340-3-git-send-email-ppandit@redhat.com
+Signed-off-by: Gerd Hoffmann
+
+Index: xen-4.6.1-testing/tools/qemu-xen-dir-remote/hw/usb/dev-network.c
+===================================================================
+--- xen-4.6.1-testing.orig/tools/qemu-xen-dir-remote/hw/usb/dev-network.c
++++ xen-4.6.1-testing/tools/qemu-xen-dir-remote/hw/usb/dev-network.c
+@@ -912,8 +912,9 @@ static int rndis_query_response(USBNetSt
+
+ bufoffs = le32_to_cpu(buf->InformationBufferOffset) + 8;
+ buflen = le32_to_cpu(buf->InformationBufferLength);
+- if (bufoffs + buflen > length)
++ if (buflen > length || bufoffs >= length || bufoffs + buflen > length) {
+ return USB_RET_STALL;
++ }
+
+ infobuflen = ndis_query(s, le32_to_cpu(buf->OID),
+ bufoffs + (uint8_t *) buf, buflen, infobuf,
+@@ -958,8 +959,9 @@ static int rndis_set_response(USBNetStat
+
+ bufoffs = le32_to_cpu(buf->InformationBufferOffset) + 8;
+ buflen = le32_to_cpu(buf->InformationBufferLength);
+- if (bufoffs + buflen > length)
++ if (buflen > length || bufoffs >= length || bufoffs + buflen > length) {
+ return USB_RET_STALL;
++ }
+
+ ret = ndis_set(s, le32_to_cpu(buf->OID),
+ bufoffs + (uint8_t *) buf, buflen);
+@@ -1209,8 +1211,9 @@ static void usb_net_handle_dataout(USBNe
+ if (le32_to_cpu(msg->MessageType) == RNDIS_PACKET_MSG) {
+ uint32_t offs = 8 + le32_to_cpu(msg->DataOffset);
+ uint32_t size = le32_to_cpu(msg->DataLength);
+- if (offs + size <= len)
++ if (offs < len && size < len && offs + size <= len) {
+ qemu_send_packet(qemu_get_queue(s->nic), s->out_buf + offs, size);
++ }
+ }
+ s->out_ptr -= len;
+ memmove(s->out_buf, &s->out_buf[len], s->out_ptr);
diff --git a/block-dmmd b/block-dmmd
index ca0d2dc..e681a43 100644
--- a/block-dmmd
+++ b/block-dmmd
@@ -2,13 +2,22 @@
# Usage: block-dmmd [add args | remove args]
#
-# the xm config file should have something like:
+# the dmmd device syntax (in xm commands/configs) is something like:
# dmmd:md;/dev/md0;md;/dev/md1;lvm;/dev/vg1/lv1
# or
# dmmd:lvm;/dev/vg1/lv1;lvm;/dev/vg1/lv2;md;/dev/md0
-# note the last device will be used for VM
-
+# device pairs (type;dev) are processed in order, with the last device
+# assigned to the VM
+#
+# md devices can optionally:
+# specify a config file through:
+# md;/dev/md100(/var/xen/config/mdadm.conf)
+# use an array name (mdadm -N option):
+# dmmd:md;My-MD-name;lvm;/dev/vg1/lv1
+#
# History:
+# 2013-07-03, loic.devulder@mpsa.com:
+# Partial rewrite of the script for supporting MD activation by name
# 2009-06-09, mh@novell.com:
# Emit debugging messages into a temporary file; if no longer needed,
# just comment the exec I/O redirection below
@@ -39,7 +48,7 @@ function run_mdadm()
local msg
local rc
- msg="`/sbin/mdadm $mdadm_cmd 2>&1`"
+ msg="$(/sbin/mdadm $mdadm_cmd 2>&1)"
rc=$?
case "$msg" in
*"has been started"* | *"already active"* )
@@ -59,11 +68,12 @@ function run_mdadm()
function activate_md()
{
+ # Make it explicitly local
local par=$1
- local already_active=0 cfg dev rc t
+ local cfg dev dev_path rc t mdadm_opts
if [ ${par} = ${par%%(*} ]; then
- # No configuration file specified:
+ # No configuration file specified
dev=$par
cfg=
else
@@ -71,23 +81,51 @@ function activate_md()
t=${par#*(}
cfg="-c ${t%%)*}"
fi
- if /sbin/mdadm -Q -D $dev; then
- already_active=1
+
+ # Looking for device name or aliase
+ if [ ${dev:0:1} = / ]; then
+ dev_path=${dev%/*}
+ mdadm_opts=
+ else
+ dev_path=/dev/md
+ mdadm_opts="-s -N"
fi
- run_mdadm "-A $dev $cfg"
+
+ # Is md device already active?
+ # We need to use full path name, aliase is not possible...
+ if [ -e $dev_path/${dev##*/} ]; then
+ /sbin/mdadm -Q -D $dev_path/${dev##*/} 2>/dev/null | grep -iq state.*\:.*inactive || return 0
+ fi
+
+ run_mdadm "-A $mdadm_opts $dev $cfg"
rc=$?
- if [ $already_active -eq 1 ] && [ $rc -eq 2 ]; then
- return 0
- fi
+ [ $rc -eq 2 ] && return 0
+
return $rc
}
function deactivate_md()
{
- local par=$1 # Make it explicitly local
+ local par=$1
+ local dev
+
+ if [ ${par} = ${par%%(*} ]; then
+ # No configuration file specified
+ dev=${par}
+ else
+ dev=${par%%(*}
+ fi
+
+ # Looking for device name or aliase
+ if [ ${dev:0:1} = / ]; then
+ dev_path=${dev%/*}
+ else
+ dev_path=/dev/md
+ fi
+
+ # We need the device name only while deactivating
+ /sbin/mdadm -S ${dev_path}/${dev##*/} > /dev/null 2>&1
- ## We need the device name only while deactivating
- /sbin/mdadm -S ${par%%(*}
return $?
}
@@ -96,9 +134,15 @@ function activate_lvm()
local run_timeout=90
local end_time
+ # First scan for PVs and VGs
+ # We need this for using md device as PV
+ /sbin/pvscan > /dev/null 2>&1
+# /sbin/vgscan --mknodes > /dev/null 2>&1
+
end_time=$(($(date +%s)+${run_timeout}))
while true; do
- /sbin/lvchange -aey $1
+ /sbin/lvchange -aey $1 > /dev/null 2>&1
+
if [ $? -eq 0 -a -e $1 ]; then
return 0
fi
@@ -114,7 +158,8 @@ function activate_lvm()
function deactivate_lvm()
{
- /sbin/lvchange -aen $1
+ /sbin/lvchange -aen $1 > /dev/null 2>&1
+
if [ $? -eq 0 ]; then
# We may have to deactivate the VG now, but can ignore errors:
# /sbin/vgchange -an ${1%/*} || :
@@ -219,7 +264,6 @@ function parse_par()
fi
fi
push "$t $s"
-
done
}
@@ -238,11 +282,11 @@ case "$command" in
fi
lastparam=${dmmd##*;}
usedevice=${lastparam%(*}
- xenstore-write $XENBUS_PATH/node "$usedevice"
- write_dev "$usedevice"
- release_lock "dmmd"
- exit 0
- ;;
+ xenstore-write $XENBUS_PATH/node "$usedevice"
+ write_dev "$usedevice"
+ release_lock "dmmd"
+ exit 0
+ ;;
remove)
p=`xenstore-read $XENBUS_PATH/params` || true
diff --git a/xen.changes b/xen.changes
index 046fc55..3d62ebb 100644
--- a/xen.changes
+++ b/xen.changes
@@ -1,3 +1,29 @@
+-------------------------------------------------------------------
+Wed Feb 24 08:05:02 MST 2016 - carnold@suse.com
+
+- bsc#968004 - VUL-0: CVE-2016-2538: xen: usb: integer overflow in
+ remote NDIS control message handling
+ CVE-2016-2538-qemuu-usb-integer-overflow-in-remote-NDIS-message-handling.patch
+
+-------------------------------------------------------------------
+Thu Feb 18 10:39:00 MST 2016 - carnold@suse.com
+
+- bsc#954872 - L3: script block-dmmd not working as expected -
+ libxl: error: libxl_dm.c
+ block-dmmd
+- Update libxl to recognize dmmd and npiv prefix in disk spec
+ xen.libxl.dmmd.patch
+
+-------------------------------------------------------------------
+Wed Feb 17 08:30:35 MST 2016 - carnold@suse.com
+
+- bsc#967101 - VUL-0: CVE-2016-2391: xen: usb: multiple eof_timers
+ in ohci module leads to null pointer dereference
+ CVE-2016-2391-qemuu-usb-null-pointer-dereference-in-ohci-module.patch
+- bsc#967090 - VUL-0: CVE-2016-2392: xen: usb: null pointer
+ dereference in remote NDIS control message handling
+ CVE-2016-2392-qemuu-usb-null-pointer-dereference-in-NDIS-message-handling.patch
+
-------------------------------------------------------------------
Thu Feb 11 09:29:01 MST 2016 - carnold@suse.com
@@ -35,6 +61,17 @@ Thu Feb 11 09:29:01 MST 2016 - carnold@suse.com
xsa167.patch
xsa168.patch
+-------------------------------------------------------------------
+Fri Feb 5 13:07:53 MST 2016 - carnold@suse.com
+
+- bsc#965315 - VUL-0: CVE-2016-2270: xen: x86: inconsistent
+ cachability flags on guest mappings (XSA-154)
+ xsa154.patch
+ xsa154-fix.patch
+- bsc#965317 - VUL-0: CVE-2016-2271: xen: VMX: guest user mode may
+ crash guest with non-canonical RIP (XSA-170)
+ xsa170.patch
+
-------------------------------------------------------------------
Fri Feb 5 08:51:16 MST 2016 - carnold@suse.com
diff --git a/xen.libxl.dmmd.patch b/xen.libxl.dmmd.patch
new file mode 100644
index 0000000..3d06601
--- /dev/null
+++ b/xen.libxl.dmmd.patch
@@ -0,0 +1,118 @@
+References: bsc#954872
+
+---
+ tools/libxl/libxl.c | 4 ++++
+ tools/libxl/libxl_device.c | 3 ++-
+ tools/libxl/libxl_dm.c | 34 +++++++++++++++++++++++++++++-----
+ tools/libxl/libxlu_disk_l.l | 2 ++
+ 4 files changed, 37 insertions(+), 6 deletions(-)
+
+Index: xen-4.6.1-testing/tools/libxl/libxl.c
+===================================================================
+--- xen-4.6.1-testing.orig/tools/libxl/libxl.c
++++ xen-4.6.1-testing/tools/libxl/libxl.c
+@@ -2791,6 +2791,10 @@ static void device_disk_add(libxl__egc *
+ /* now create a phy device to export the device to the guest */
+ goto do_backend_phy;
+ case LIBXL_DISK_BACKEND_QDISK:
++ if (disk->script) {
++ script = libxl__abs_path(gc, disk->script, libxl__xen_script_dir_path());
++ flexarray_append_pair(back, "script", script);
++ }
+ flexarray_append(back, "params");
+ flexarray_append(back, libxl__sprintf(gc, "%s:%s",
+ libxl__device_disk_string_of_format(disk->format), disk->pdev_path));
+Index: xen-4.6.1-testing/tools/libxl/libxl_device.c
+===================================================================
+--- xen-4.6.1-testing.orig/tools/libxl/libxl_device.c
++++ xen-4.6.1-testing/tools/libxl/libxl_device.c
+@@ -235,7 +235,8 @@ static int disk_try_backend(disk_try_bac
+ return backend;
+
+ case LIBXL_DISK_BACKEND_QDISK:
+- if (a->disk->script) goto bad_script;
++ LOG(DEBUG, "Disk vdev=%s, uses script=%s on %s backend",
++ a->disk->vdev, a->disk->script, libxl_disk_backend_to_string(backend));
+ return backend;
+
+ default:
+Index: xen-4.6.1-testing/tools/libxl/libxl_dm.c
+===================================================================
+--- xen-4.6.1-testing.orig/tools/libxl/libxl_dm.c
++++ xen-4.6.1-testing/tools/libxl/libxl_dm.c
+@@ -700,6 +700,30 @@ static char *dm_spice_options(libxl__gc
+ return opt;
+ }
+
++static void libxl__suse_node_to_path(libxl__gc *gc, int domid, const libxl_device_disk *dp, const char **pdev_path)
++{
++ libxl_ctx *ctx = libxl__gc_owner(gc);
++ char *be_path, *node;
++ libxl__device device;
++ libxl_device_disk disk;
++ int rc;
++
++ disk = *dp;
++ rc = libxl__device_from_disk(gc, domid, &disk, &device);
++ if (rc) {
++ LIBXL__LOG(ctx, LIBXL__LOG_WARNING, "libxl__device_from_disk failed %d", rc);
++ return;
++ }
++ be_path = libxl__device_backend_path(gc, &device);
++
++ node = libxl__xs_read(gc, XBT_NULL, libxl__sprintf(gc, "%s/node", be_path));
++ if (!node)
++ return;
++
++ LIBXL__LOG(ctx, LIBXL__LOG_WARNING, "replacing '%s' with '%s' from %s/node, just for qemu-xen", *pdev_path, node, be_path);
++ *pdev_path = node;
++}
++
+ static int libxl__build_device_model_args_new(libxl__gc *gc,
+ const char *dm, int guest_domid,
+ const libxl_domain_config *guest_config,
+@@ -1099,7 +1123,9 @@ static int libxl__build_device_model_arg
+ libxl__device_disk_dev_number(disks[i].vdev, &disk, &part);
+ const char *format = qemu_disk_format_string(disks[i].format);
+ char *drive;
+- const char *pdev_path;
++ const char *pdev_path = disks[i].pdev_path;
++
++ libxl__suse_node_to_path(gc, guest_domid, disks + i, &pdev_path);
+
+ if (dev_number == -1) {
+ LIBXL__LOG(ctx, LIBXL__LOG_WARNING, "unable to determine"
+@@ -1115,7 +1141,7 @@ static int libxl__build_device_model_arg
+ else
+ drive = libxl__sprintf
+ (gc, "file=%s,if=ide,index=%d,readonly=%s,media=cdrom,format=%s,cache=writeback,id=ide-%i",
+- disks[i].pdev_path, disk, disks[i].readwrite ? "off" : "on", format, dev_number);
++ pdev_path, disk, disks[i].readwrite ? "off" : "on", format, dev_number);
+ } else {
+ if (disks[i].format == LIBXL_DISK_FORMAT_EMPTY) {
+ LIBXL__LOG(ctx, LIBXL__LOG_WARNING, "cannot support"
+@@ -1131,10 +1157,8 @@ static int libxl__build_device_model_arg
+
+ if (disks[i].backend == LIBXL_DISK_BACKEND_TAP) {
+ format = qemu_disk_format_string(LIBXL_DISK_FORMAT_RAW);
+- pdev_path = libxl__blktap_devpath(gc, disks[i].pdev_path,
++ pdev_path = libxl__blktap_devpath(gc, pdev_path,
+ disks[i].format);
+- } else {
+- pdev_path = disks[i].pdev_path;
+ }
+
+ /*
+Index: xen-4.6.1-testing/tools/libxl/libxlu_disk_l.l
+===================================================================
+--- xen-4.6.1-testing.orig/tools/libxl/libxlu_disk_l.l
++++ xen-4.6.1-testing/tools/libxl/libxlu_disk_l.l
+@@ -210,6 +210,8 @@ target=.* { STRIP(','); SAVESTRING("targ
+ free(newscript);
+ }
+
++dmmd:/.* { DPC->had_depr_prefix=1; DEPRECATE(0); }
++npiv:/.* { DPC->had_depr_prefix=1; DEPRECATE(0); }
+ tapdisk:/.* { DPC->had_depr_prefix=1; DEPRECATE(0); }
+ tap2?:/.* { DPC->had_depr_prefix=1; DEPRECATE(0); }
+ aio:/.* { DPC->had_depr_prefix=1; DEPRECATE(0); }
diff --git a/xen.spec b/xen.spec
index e13b442..a00602c 100644
--- a/xen.spec
+++ b/xen.spec
@@ -1,7 +1,7 @@
#
# spec file for package xen
#
-# Copyright (c) 2016 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2016 SUSE LINUX Products GmbH, Nuernberg, Germany.
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
@@ -15,7 +15,6 @@
# Please submit bugfixes or comments via http://bugs.opensuse.org/
#
-
# needssslcertforbuild
Name: xen
@@ -207,11 +206,14 @@ Patch1: 55f7f9d2-libxl-slightly-refine-pci-assignable-add-remove-handlin
Patch2: 5628fc67-libxl-No-emulated-disk-driver-for-xvdX-disk.patch
Patch3: 5644b756-x86-HVM-don-t-inject-DB-with-error-code.patch
Patch4: 5649bcbe-libxl-relax-readonly-check-introduced-by-XSA-142-fix.patch
+Patch15401: xsa154.patch
+Patch15402: xsa154-fix.patch
Patch15501: xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch
Patch15502: xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
Patch15503: xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
Patch162: xsa162-qemuu.patch
Patch164: xsa164.patch
+Patch170: xsa170.patch
# Upstream qemu
Patch250: VNC-Support-for-ExtendedKeyEvent-client-message.patch
Patch251: 0001-net-move-the-tap-buffer-into-TAPState.patch
@@ -254,6 +256,9 @@ Patch288: CVE-2013-4533-qemut-pxa2xx-buffer-overrun-on-incoming-migration.
Patch289: CVE-2015-5278-qemut-Infinite-loop-in-ne2000_receive-function.patch
Patch290: CVE-2015-6855-qemuu-ide-divide-by-zero-issue.patch
Patch291: CVE-2015-8619-qemuu-stack-based-OOB-write-in-hmp_sendkey-routine.patch
+Patch292: CVE-2016-2392-qemuu-usb-null-pointer-dereference-in-NDIS-message-handling.patch
+Patch293: CVE-2016-2391-qemuu-usb-null-pointer-dereference-in-ohci-module.patch
+Patch294: CVE-2016-2538-qemuu-usb-integer-overflow-in-remote-NDIS-message-handling.patch
# Our platform specific patches
Patch321: xen-destdir.patch
Patch322: vif-bridge-no-iptables.patch
@@ -309,6 +314,7 @@ Patch471: qemu-xen-enable-spice-support.patch
Patch472: tigervnc-long-press.patch
Patch473: xendomains-libvirtd-conflict.patch
Patch474: CVE-2014-0222-blktap-qcow1-validate-l2-table-size.patch
+Patch475: xen.libxl.dmmd.patch
# Hypervisor and PV driver Patches
Patch501: x86-ioapic-ack-default.patch
Patch502: x86-cpufreq-report.patch
@@ -528,11 +534,14 @@ Authors:
%patch2 -p1
%patch3 -p1
%patch4 -p1
+%patch15401 -p1
+%patch15402 -p1
%patch15501 -p1
%patch15502 -p1
%patch15503 -p1
%patch162 -p1
%patch164 -p1
+%patch170 -p1
# Upstream qemu patches
%patch250 -p1
%patch251 -p1
@@ -575,6 +584,9 @@ Authors:
%patch289 -p1
%patch290 -p1
%patch291 -p1
+%patch292 -p1
+%patch293 -p1
+%patch294 -p1
# Our platform specific patches
%patch321 -p1
%patch322 -p1
@@ -630,6 +642,7 @@ Authors:
%patch472 -p1
%patch473 -p1
%patch474 -p1
+%patch475 -p1
# Hypervisor and PV driver Patches
%patch501 -p1
%patch502 -p1
diff --git a/xsa154-fix.patch b/xsa154-fix.patch
new file mode 100644
index 0000000..5cf2294
--- /dev/null
+++ b/xsa154-fix.patch
@@ -0,0 +1,31 @@
+Subject: x86: fix unintended fallthrough case from XSA-154
+From: Andrew Cooper andrew.cooper3@citrix.com Thu Feb 18 15:10:07 2016 +0100
+Date: Thu Feb 18 15:10:07 2016 +0100:
+Git: 8dd6d1c099865ee5f5916616a0ca79cd943c46f9
+
+... and annotate the other deliberate one: Coverity objects otherwise.
+
+Signed-off-by: Andrew Cooper
+
+One of the two instances was actually a bug.
+
+Signed-off-by: Jan Beulich
+
+Index: xen-4.6.1-testing/xen/arch/x86/mm.c
+===================================================================
+--- xen-4.6.1-testing.orig/xen/arch/x86/mm.c
++++ xen-4.6.1-testing/xen/arch/x86/mm.c
+@@ -853,9 +853,11 @@ get_page_from_l1e(
+ case 0:
+ break;
+ case 1:
+- if ( is_hardware_domain(l1e_owner) )
++ if ( !is_hardware_domain(l1e_owner) )
++ break;
++ /* fallthrough */
+ case -1:
+- return 0;
++ return 0;
+ default:
+ ASSERT_UNREACHABLE();
+ }
diff --git a/xsa154.patch b/xsa154.patch
new file mode 100644
index 0000000..8fb4c8e
--- /dev/null
+++ b/xsa154.patch
@@ -0,0 +1,375 @@
+References: bsc#965315 - CVE-2016-2270 XSA-154
+
+x86: enforce consistent cachability of MMIO mappings
+
+We've been told by Intel that inconsistent cachability between
+multiple mappings of the same page can affect system stability only
+when the affected page is an MMIO one. Since the stale data issue is
+of no relevance to the hypervisor (since all guest memory accesses go
+through proper accessors and validation), handling of RAM pages
+remains unchanged here. Any MMIO mapped by domains however needs to be
+done consistently (all cachable mappings or all uncachable ones), in
+order to avoid Machine Check exceptions. Since converting existing
+cachable mappings to uncachable (at the time an uncachable mapping
+gets established) would in the PV case require tracking all mappings,
+allow MMIO to only get mapped uncachable (UC, UC-, or WC).
+
+This also implies that in the PV case we mustn't use the L1 PTE update
+fast path when cachability flags get altered.
+
+Since in the HVM case at least for now we want to continue honoring
+pinned cachability attributes for pages not mapped by the hypervisor,
+special case handling of r/o MMIO pages (forcing UC) gets added there.
+Arguably the counterpart change to p2m-pt.c may not be necessary, since
+UC- (which already gets enforced there) is probably strict enough.
+
+Note that the shadow code changes include fixing the write protection
+of r/o MMIO ranges: shadow_l1e_remove_flags() and its siblings, other
+than l1e_remove_flags() and alike, return the new PTE (and hence
+ignoring their return values makes them no-ops).
+
+This is CVE-2016-2270 / XSA-154.
+
+Signed-off-by: Jan Beulich
+Acked-by: Andrew Cooper
+
+Index: xen-4.6.1-testing/docs/misc/xen-command-line.markdown
+===================================================================
+--- xen-4.6.1-testing.orig/docs/misc/xen-command-line.markdown
++++ xen-4.6.1-testing/docs/misc/xen-command-line.markdown
+@@ -1080,6 +1080,15 @@ limit is ignored by Xen.
+
+ Specify if the MMConfig space should be enabled.
+
++### mmio-relax
++> `= | all`
++
++> Default: `false`
++
++By default, domains may not create cached mappings to MMIO regions.
++This option relaxes the check for Domain 0 (or when using `all`, all PV
++domains), to permit the use of cacheable MMIO mappings.
++
+ ### msi
+ > `= `
+
+Index: xen-4.6.1-testing/xen/arch/x86/hvm/mtrr.c
+===================================================================
+--- xen-4.6.1-testing.orig/xen/arch/x86/hvm/mtrr.c
++++ xen-4.6.1-testing/xen/arch/x86/hvm/mtrr.c
+@@ -807,8 +807,17 @@ int epte_get_entry_emt(struct domain *d,
+ if ( v->domain != d )
+ v = d->vcpu ? d->vcpu[0] : NULL;
+
+- if ( !mfn_valid(mfn_x(mfn)) )
++ if ( !mfn_valid(mfn_x(mfn)) ||
++ rangeset_contains_range(mmio_ro_ranges, mfn_x(mfn),
++ mfn_x(mfn) + (1UL << order) - 1) )
++ {
++ *ipat = 1;
+ return MTRR_TYPE_UNCACHABLE;
++ }
++
++ if ( rangeset_overlaps_range(mmio_ro_ranges, mfn_x(mfn),
++ mfn_x(mfn) + (1UL << order) - 1) )
++ return -1;
+
+ switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
+ {
+Index: xen-4.6.1-testing/xen/arch/x86/mm/p2m-pt.c
+===================================================================
+--- xen-4.6.1-testing.orig/xen/arch/x86/mm/p2m-pt.c
++++ xen-4.6.1-testing/xen/arch/x86/mm/p2m-pt.c
+@@ -107,6 +107,8 @@ static unsigned long p2m_type_to_flags(p
+ case p2m_mmio_direct:
+ if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
+ flags |= _PAGE_RW;
++ else
++ flags |= _PAGE_PWT;
+ return flags | P2M_BASE_FLAGS | _PAGE_PCD;
+ }
+ }
+Index: xen-4.6.1-testing/xen/arch/x86/mm/shadow/multi.c
+===================================================================
+--- xen-4.6.1-testing.orig/xen/arch/x86/mm/shadow/multi.c
++++ xen-4.6.1-testing/xen/arch/x86/mm/shadow/multi.c
+@@ -519,6 +519,7 @@ _sh_propagate(struct vcpu *v,
+ gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
+ u32 pass_thru_flags;
+ u32 gflags, sflags;
++ bool_t mmio_mfn;
+
+ /* We don't shadow PAE l3s */
+ ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
+@@ -559,7 +560,10 @@ _sh_propagate(struct vcpu *v,
+ // mfn means that we can not usefully shadow anything, and so we
+ // return early.
+ //
+- if ( !mfn_valid(target_mfn)
++ mmio_mfn = !mfn_valid(target_mfn)
++ || (level == 1
++ && page_get_owner(mfn_to_page(target_mfn)) == dom_io);
++ if ( mmio_mfn
+ && !(level == 1 && (!shadow_mode_refcounts(d)
+ || p2mt == p2m_mmio_direct)) )
+ {
+@@ -577,7 +581,7 @@ _sh_propagate(struct vcpu *v,
+ _PAGE_RW | _PAGE_PRESENT);
+ if ( guest_supports_nx(v) )
+ pass_thru_flags |= _PAGE_NX_BIT;
+- if ( !shadow_mode_refcounts(d) && !mfn_valid(target_mfn) )
++ if ( level == 1 && !shadow_mode_refcounts(d) && mmio_mfn )
+ pass_thru_flags |= _PAGE_PAT | _PAGE_PCD | _PAGE_PWT;
+ sflags = gflags & pass_thru_flags;
+
+@@ -676,10 +680,14 @@ _sh_propagate(struct vcpu *v,
+ }
+
+ /* Read-only memory */
+- if ( p2m_is_readonly(p2mt) ||
+- (p2mt == p2m_mmio_direct &&
+- rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn))) )
++ if ( p2m_is_readonly(p2mt) )
+ sflags &= ~_PAGE_RW;
++ else if ( p2mt == p2m_mmio_direct &&
++ rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn)) )
++ {
++ sflags &= ~(_PAGE_RW | _PAGE_PAT);
++ sflags |= _PAGE_PCD | _PAGE_PWT;
++ }
+
+ // protect guest page tables
+ //
+@@ -1185,22 +1193,28 @@ static int shadow_set_l1e(struct domain
+ && !sh_l1e_is_magic(new_sl1e) )
+ {
+ /* About to install a new reference */
+- if ( shadow_mode_refcounts(d) ) {
++ if ( shadow_mode_refcounts(d) )
++ {
++#define PAGE_FLIPPABLE (_PAGE_RW | _PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
++ int rc;
++
+ TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF);
+- switch ( shadow_get_page_from_l1e(new_sl1e, d, new_type) )
++ switch ( rc = shadow_get_page_from_l1e(new_sl1e, d, new_type) )
+ {
+ default:
+ /* Doesn't look like a pagetable. */
+ flags |= SHADOW_SET_ERROR;
+ new_sl1e = shadow_l1e_empty();
+ break;
+- case 1:
+- shadow_l1e_remove_flags(new_sl1e, _PAGE_RW);
++ case PAGE_FLIPPABLE & -PAGE_FLIPPABLE ... PAGE_FLIPPABLE:
++ ASSERT(!(rc & ~PAGE_FLIPPABLE));
++ new_sl1e = shadow_l1e_flip_flags(new_sl1e, rc);
+ /* fall through */
+ case 0:
+ shadow_vram_get_l1e(new_sl1e, sl1e, sl1mfn, d);
+ break;
+ }
++#undef PAGE_FLIPPABLE
+ }
+ }
+
+Index: xen-4.6.1-testing/xen/arch/x86/mm/shadow/types.h
+===================================================================
+--- xen-4.6.1-testing.orig/xen/arch/x86/mm/shadow/types.h
++++ xen-4.6.1-testing/xen/arch/x86/mm/shadow/types.h
+@@ -99,6 +99,9 @@ static inline u32 shadow_l4e_get_flags(s
+ static inline shadow_l1e_t
+ shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
+ { l1e_remove_flags(sl1e, flags); return sl1e; }
++static inline shadow_l1e_t
++shadow_l1e_flip_flags(shadow_l1e_t sl1e, u32 flags)
++{ l1e_flip_flags(sl1e, flags); return sl1e; }
+
+ static inline shadow_l1e_t shadow_l1e_empty(void)
+ { return l1e_empty(); }
+Index: xen-4.6.1-testing/xen/include/asm-x86/page.h
+===================================================================
+--- xen-4.6.1-testing.orig/xen/include/asm-x86/page.h
++++ xen-4.6.1-testing/xen/include/asm-x86/page.h
+@@ -157,6 +157,9 @@ static inline l4_pgentry_t l4e_from_padd
+ #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
+ #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
+
++/* Flip flags in an existing L1 PTE. */
++#define l1e_flip_flags(x, flags) ((x).l1 ^= put_pte_flags(flags))
++
+ /* Check if a pte's page mapping or significant access flags have changed. */
+ #define l1e_has_changed(x,y,flags) \
+ ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
+Index: xen-4.6.1-testing/xen/arch/x86/mm.c
+===================================================================
+--- xen-4.6.1-testing.orig/xen/arch/x86/mm.c
++++ xen-4.6.1-testing/xen/arch/x86/mm.c
+@@ -178,6 +178,18 @@ static uint32_t base_disallow_mask;
+ is_pv_domain(d)) ? \
+ L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS))
+
++static s8 __read_mostly opt_mmio_relax;
++static void __init parse_mmio_relax(const char *s)
++{
++ if ( !*s )
++ opt_mmio_relax = 1;
++ else
++ opt_mmio_relax = parse_bool(s);
++ if ( opt_mmio_relax < 0 && strcmp(s, "all") )
++ opt_mmio_relax = 0;
++}
++custom_param("mmio-relax", parse_mmio_relax);
++
+ static void __init init_frametable_chunk(void *start, void *end)
+ {
+ unsigned long s = (unsigned long)start;
+@@ -799,10 +811,7 @@ get_page_from_l1e(
+ if ( !mfn_valid(mfn) ||
+ (real_pg_owner = page_get_owner_and_reference(page)) == dom_io )
+ {
+-#ifndef NDEBUG
+- const unsigned long *ro_map;
+- unsigned int seg, bdf;
+-#endif
++ int flip = 0;
+
+ /* Only needed the reference to confirm dom_io ownership. */
+ if ( mfn_valid(mfn) )
+@@ -836,24 +845,55 @@ get_page_from_l1e(
+ return -EINVAL;
+ }
+
+- if ( !(l1f & _PAGE_RW) ||
+- !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
+- return 0;
++ if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
++ {
++ /* MMIO pages must not be mapped cachable unless requested so. */
++ switch ( opt_mmio_relax )
++ {
++ case 0:
++ break;
++ case 1:
++ if ( is_hardware_domain(l1e_owner) )
++ case -1:
++ return 0;
++ default:
++ ASSERT_UNREACHABLE();
++ }
++ }
++ else if ( l1f & _PAGE_RW )
++ {
+ #ifndef NDEBUG
+- if ( !pci_mmcfg_decode(mfn, &seg, &bdf) ||
+- ((ro_map = pci_get_ro_map(seg)) != NULL &&
+- test_bit(bdf, ro_map)) )
+- printk(XENLOG_G_WARNING
+- "d%d: Forcing read-only access to MFN %lx\n",
+- l1e_owner->domain_id, mfn);
+- else
+- rangeset_report_ranges(mmio_ro_ranges, 0, ~0UL,
+- print_mmio_emul_range,
+- &(struct mmio_emul_range_ctxt){
+- .d = l1e_owner,
+- .mfn = mfn });
++ const unsigned long *ro_map;
++ unsigned int seg, bdf;
++
++ if ( !pci_mmcfg_decode(mfn, &seg, &bdf) ||
++ ((ro_map = pci_get_ro_map(seg)) != NULL &&
++ test_bit(bdf, ro_map)) )
++ printk(XENLOG_G_WARNING
++ "d%d: Forcing read-only access to MFN %lx\n",
++ l1e_owner->domain_id, mfn);
++ else
++ rangeset_report_ranges(mmio_ro_ranges, 0, ~0UL,
++ print_mmio_emul_range,
++ &(struct mmio_emul_range_ctxt){
++ .d = l1e_owner,
++ .mfn = mfn });
+ #endif
+- return 1;
++ flip = _PAGE_RW;
++ }
++
++ switch ( l1f & PAGE_CACHE_ATTRS )
++ {
++ case 0: /* WB */
++ flip |= _PAGE_PWT | _PAGE_PCD;
++ break;
++ case _PAGE_PWT: /* WT */
++ case _PAGE_PWT | _PAGE_PAT: /* WP */
++ flip |= _PAGE_PCD | (l1f & _PAGE_PAT);
++ break;
++ }
++
++ return flip;
+ }
+
+ if ( unlikely( (real_pg_owner != pg_owner) &&
+@@ -1243,8 +1283,9 @@ static int alloc_l1_table(struct page_in
+ goto fail;
+ case 0:
+ break;
+- case 1:
+- l1e_remove_flags(pl1e[i], _PAGE_RW);
++ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
++ ASSERT(!(ret & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
++ l1e_flip_flags(pl1e[i], ret);
+ break;
+ }
+
+@@ -1759,8 +1800,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
+ return -EINVAL;
+ }
+
+- /* Fast path for identical mapping, r/w and presence. */
+- if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
++ /* Fast path for identical mapping, r/w, presence, and cachability. */
++ if ( !l1e_has_changed(ol1e, nl1e,
++ PAGE_CACHE_ATTRS | _PAGE_RW | _PAGE_PRESENT) )
+ {
+ adjust_guest_l1e(nl1e, pt_dom);
+ if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
+@@ -1783,8 +1825,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
+ return rc;
+ case 0:
+ break;
+- case 1:
+- l1e_remove_flags(nl1e, _PAGE_RW);
++ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
++ ASSERT(!(rc & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
++ l1e_flip_flags(nl1e, rc);
+ rc = 0;
+ break;
+ }
+@@ -5000,6 +5043,7 @@ static int ptwr_emulated_update(
+ l1_pgentry_t pte, ol1e, nl1e, *pl1e;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
++ int ret;
+
+ /* Only allow naturally-aligned stores within the original %cr2 page. */
+ if ( unlikely(((addr^ptwr_ctxt->cr2) & PAGE_MASK) || (addr & (bytes-1))) )
+@@ -5047,7 +5091,7 @@ static int ptwr_emulated_update(
+
+ /* Check the new PTE. */
+ nl1e = l1e_from_intpte(val);
+- switch ( get_page_from_l1e(nl1e, d, d) )
++ switch ( ret = get_page_from_l1e(nl1e, d, d) )
+ {
+ default:
+ if ( is_pv_32bit_domain(d) && (bytes == 4) && (unaligned_addr & 4) &&
+@@ -5071,8 +5115,9 @@ static int ptwr_emulated_update(
+ break;
+ case 0:
+ break;
+- case 1:
+- l1e_remove_flags(nl1e, _PAGE_RW);
++ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
++ ASSERT(!(ret & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
++ l1e_flip_flags(nl1e, ret);
+ break;
+ }
+
diff --git a/xsa170.patch b/xsa170.patch
new file mode 100644
index 0000000..d3a5de0
--- /dev/null
+++ b/xsa170.patch
@@ -0,0 +1,83 @@
+References: bsc#965317 CVE-2016-2271 XSA-170
+
+x86/VMX: sanitize rIP before re-entering guest
+
+... to prevent guest user mode arranging for a guest crash (due to
+failed VM entry). (On the AMD system I checked, hardware is doing
+exactly the canonicalization being added here.)
+
+Note that fixing this in an architecturally correct way would be quite
+a bit more involved: Making the x86 instruction emulator check all
+branch targets for validity, plus dealing with invalid rIP resulting
+from update_guest_eip() or incoming directly during a VM exit. The only
+way to get the latter right would be by not having hardware do the
+injection.
+
+Note further that there are a two early returns from
+vmx_vmexit_handler(): One (through vmx_failed_vmentry()) leads to
+domain_crash() anyway, and the other covers real mode only and can
+neither occur with a non-canonical rIP nor result in an altered rIP,
+so we don't need to force those paths through the checking logic.
+
+This is XSA-170.
+
+Reported-by: 刘令
+Signed-off-by: Jan Beulich
+Reviewed-by: Andrew Cooper
+Tested-by: Andrew Cooper
+
+Index: xen-4.6.1-testing/xen/arch/x86/hvm/vmx/vmx.c
+===================================================================
+--- xen-4.6.1-testing.orig/xen/arch/x86/hvm/vmx/vmx.c
++++ xen-4.6.1-testing/xen/arch/x86/hvm/vmx/vmx.c
+@@ -2879,7 +2879,7 @@ static int vmx_handle_apic_write(void)
+ void vmx_vmexit_handler(struct cpu_user_regs *regs)
+ {
+ unsigned long exit_qualification, exit_reason, idtv_info, intr_info = 0;
+- unsigned int vector = 0;
++ unsigned int vector = 0, mode;
+ struct vcpu *v = current;
+
+ __vmread(GUEST_RIP, ®s->rip);
+@@ -3468,6 +3468,41 @@ void vmx_vmexit_handler(struct cpu_user_
+ out:
+ if ( nestedhvm_vcpu_in_guestmode(v) )
+ nvmx_idtv_handling();
++
++ /*
++ * VM entry will fail (causing the guest to get crashed) if rIP (and
++ * rFLAGS, but we don't have an issue there) doesn't meet certain
++ * criteria. As we must not allow less than fully privileged mode to have
++ * such an effect on the domain, we correct rIP in that case (accepting
++ * this not being architecturally correct behavior, as the injected #GP
++ * fault will then not see the correct [invalid] return address).
++ * And since we know the guest will crash, we crash it right away if it
++ * already is in most privileged mode.
++ */
++ mode = vmx_guest_x86_mode(v);
++ if ( mode == 8 ? !is_canonical_address(regs->rip)
++ : regs->rip != regs->_eip )
++ {
++ struct segment_register ss;
++
++ gprintk(XENLOG_WARNING, "Bad rIP %lx for mode %u\n", regs->rip, mode);
++
++ vmx_get_segment_register(v, x86_seg_ss, &ss);
++ if ( ss.attr.fields.dpl )
++ {
++ __vmread(VM_ENTRY_INTR_INFO, &intr_info);
++ if ( !(intr_info & INTR_INFO_VALID_MASK) )
++ hvm_inject_hw_exception(TRAP_gp_fault, 0);
++ /* Need to fix rIP nevertheless. */
++ if ( mode == 8 )
++ regs->rip = (long)(regs->rip << (64 - VADDR_BITS)) >>
++ (64 - VADDR_BITS);
++ else
++ regs->rip = regs->_eip;
++ }
++ else
++ domain_crash(v->domain);
++ }
+ }
+
+ void vmx_vmenter_helper(const struct cpu_user_regs *regs)