Accepting request 586966 from home:jfehlig:branches:Virtualization

- qemu: avoid denial of service reading from QEMU guest agent
  CVE-2018-1064
  fbf31e1a-CVE-2018-1064.patch
  bsc#1083625

- virtlockd: fix loosing lock on re-exec
  464889ff-rpc-aquire-ref-dispatch.patch,
  c6f1d519-rpc-simplify-dispatch.patch,
  06e7ebb6-rpc-invoke-dispatch-unlocked.patch,
  86cae503-rpc-fix-pre-exec.patch,
  eefabb38-rpc-virtlockd-virtlogd-single-thread.patch
  bsc#1076861

- libvirtd: fix potential deadlock when reloading
  33c6eb96-fix-libvirtd-reload-deadlock.patch
  bsc#1079150

OBS-URL: https://build.opensuse.org/request/show/586966
OBS-URL: https://build.opensuse.org/package/show/Virtualization/libvirt?expand=0&rev=676
This commit is contained in:
James Fehlig 2018-03-14 14:29:10 +00:00 committed by Git OBS Bridge
parent c5e448dfb3
commit a026aabb02
9 changed files with 715 additions and 0 deletions

View File

@ -0,0 +1,237 @@
commit 06e7ebb60894ab43b5224752514049c1a286ee06
Author: Daniel P. Berrangé <berrange@redhat.com>
Date: Tue Mar 6 17:05:16 2018 +0000
rpc: invoke the message dispatch callback with client unlocked
Currently if the virNetServer instance is created with max_workers==0 to
request a non-threaded dispatch process, we deadlock during dispatch
#0 0x00007fb845f6f42d in __lll_lock_wait () from /lib64/libpthread.so.0
#1 0x00007fb845f681d3 in pthread_mutex_lock () from /lib64/libpthread.so.0
#2 0x000055a6628bb305 in virMutexLock (m=<optimized out>) at util/virthread.c:89
#3 0x000055a6628a984b in virObjectLock (anyobj=<optimized out>) at util/virobject.c:435
#4 0x000055a66286fcde in virNetServerClientIsAuthenticated (client=client@entry=0x55a663a7b960)
at rpc/virnetserverclient.c:1565
#5 0x000055a66286cc17 in virNetServerProgramDispatchCall (msg=0x55a663a7bc50, client=0x55a663a7b960,
server=0x55a663a77550, prog=0x55a663a78020) at rpc/virnetserverprogram.c:407
#6 virNetServerProgramDispatch (prog=prog@entry=0x55a663a78020, server=server@entry=0x55a663a77550,
client=client@entry=0x55a663a7b960, msg=msg@entry=0x55a663a7bc50) at rpc/virnetserverprogram.c:307
#7 0x000055a662871d56 in virNetServerProcessMsg (msg=0x55a663a7bc50, prog=0x55a663a78020, client=0x55a663a7b960,
srv=0x55a663a77550) at rpc/virnetserver.c:148
#8 virNetServerDispatchNewMessage (client=0x55a663a7b960, msg=0x55a663a7bc50, opaque=0x55a663a77550)
at rpc/virnetserver.c:227
#9 0x000055a66286e4c0 in virNetServerClientDispatchRead (client=client@entry=0x55a663a7b960)
at rpc/virnetserverclient.c:1322
#10 0x000055a66286e813 in virNetServerClientDispatchEvent (sock=<optimized out>, events=1, opaque=0x55a663a7b960)
at rpc/virnetserverclient.c:1507
#11 0x000055a662899be0 in virEventPollDispatchHandles (fds=0x55a663a7bdc0, nfds=<optimized out>)
at util/vireventpoll.c:508
#12 virEventPollRunOnce () at util/vireventpoll.c:657
#13 0x000055a6628982f1 in virEventRunDefaultImpl () at util/virevent.c:327
#14 0x000055a6628716d5 in virNetDaemonRun (dmn=0x55a663a771b0) at rpc/virnetdaemon.c:858
#15 0x000055a662864c1d in main (argc=<optimized out>,
#argv=0x7ffd105b4838) at logging/log_daemon.c:1235
Reviewed-by: John Ferlan <jferlan@redhat.com>
Reviewed-by: Jim Fehlig <jfehlig@suse.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Index: libvirt-4.1.0/src/rpc/virnetserverclient.c
===================================================================
--- libvirt-4.1.0.orig/src/rpc/virnetserverclient.c
+++ libvirt-4.1.0/src/rpc/virnetserverclient.c
@@ -143,7 +143,7 @@ VIR_ONCE_GLOBAL_INIT(virNetServerClient)
static void virNetServerClientDispatchEvent(virNetSocketPtr sock, int events, void *opaque);
static void virNetServerClientUpdateEvent(virNetServerClientPtr client);
-static void virNetServerClientDispatchRead(virNetServerClientPtr client);
+static virNetMessagePtr virNetServerClientDispatchRead(virNetServerClientPtr client);
static int virNetServerClientSendMessageLocked(virNetServerClientPtr client,
virNetMessagePtr msg);
@@ -340,18 +340,40 @@ virNetServerClientCheckAccess(virNetServ
}
#endif
+static void virNetServerClientDispatchMessage(virNetServerClientPtr client,
+ virNetMessagePtr msg)
+{
+ virObjectLock(client);
+ if (!client->dispatchFunc) {
+ virNetMessageFree(msg);
+ client->wantClose = true;
+ virObjectUnlock(client);
+ } else {
+ virObjectUnlock(client);
+ /* Accessing 'client' is safe, because virNetServerClientSetDispatcher
+ * only permits setting 'dispatchFunc' once, so if non-NULL, it will
+ * never change again
+ */
+ client->dispatchFunc(client, msg, client->dispatchOpaque);
+ }
+}
+
static void virNetServerClientSockTimerFunc(int timer,
void *opaque)
{
virNetServerClientPtr client = opaque;
+ virNetMessagePtr msg = NULL;
virObjectLock(client);
virEventUpdateTimeout(timer, -1);
/* Although client->rx != NULL when this timer is enabled, it might have
* changed since the client was unlocked in the meantime. */
if (client->rx)
- virNetServerClientDispatchRead(client);
+ msg = virNetServerClientDispatchRead(client);
virObjectUnlock(client);
+
+ if (msg)
+ virNetServerClientDispatchMessage(client, msg);
}
@@ -950,8 +972,13 @@ void virNetServerClientSetDispatcher(vir
void *opaque)
{
virObjectLock(client);
- client->dispatchFunc = func;
- client->dispatchOpaque = opaque;
+ /* Only set dispatcher if not already set, to avoid race
+ * with dispatch code that runs without locks held
+ */
+ if (!client->dispatchFunc) {
+ client->dispatchFunc = func;
+ client->dispatchOpaque = opaque;
+ }
virObjectUnlock(client);
}
@@ -1196,26 +1223,32 @@ static ssize_t virNetServerClientRead(vi
/*
- * Read data until we get a complete message to process
+ * Read data until we get a complete message to process.
+ * If a complete message is available, it will be returned
+ * from this method, for dispatch by the caller.
+ *
+ * Returns a complete message for dispatch, or NULL if none is
+ * yet available, or an error occurred. On error, the wantClose
+ * flag will be set.
*/
-static void virNetServerClientDispatchRead(virNetServerClientPtr client)
+static virNetMessagePtr virNetServerClientDispatchRead(virNetServerClientPtr client)
{
readmore:
if (client->rx->nfds == 0) {
if (virNetServerClientRead(client) < 0) {
client->wantClose = true;
- return; /* Error */
+ return NULL; /* Error */
}
}
if (client->rx->bufferOffset < client->rx->bufferLength)
- return; /* Still not read enough */
+ return NULL; /* Still not read enough */
/* Either done with length word header */
if (client->rx->bufferLength == VIR_NET_MESSAGE_LEN_MAX) {
if (virNetMessageDecodeLength(client->rx) < 0) {
client->wantClose = true;
- return;
+ return NULL;
}
virNetServerClientUpdateEvent(client);
@@ -1236,7 +1269,7 @@ static void virNetServerClientDispatchRe
virNetMessageQueueServe(&client->rx);
virNetMessageFree(msg);
client->wantClose = true;
- return;
+ return NULL;
}
/* Now figure out if we need to read more data to get some
@@ -1246,7 +1279,7 @@ static void virNetServerClientDispatchRe
virNetMessageQueueServe(&client->rx);
virNetMessageFree(msg);
client->wantClose = true;
- return; /* Error */
+ return NULL; /* Error */
}
/* Try getting the file descriptors (may fail if blocking) */
@@ -1256,7 +1289,7 @@ static void virNetServerClientDispatchRe
virNetMessageQueueServe(&client->rx);
virNetMessageFree(msg);
client->wantClose = true;
- return;
+ return NULL;
}
if (rv == 0) /* Blocking */
break;
@@ -1270,7 +1303,7 @@ static void virNetServerClientDispatchRe
* again next time we run this method
*/
client->rx->bufferOffset = client->rx->bufferLength;
- return;
+ return NULL;
}
}
@@ -1313,16 +1346,6 @@ static void virNetServerClientDispatchRe
}
}
- /* Send off to for normal dispatch to workers */
- if (msg) {
- if (!client->dispatchFunc) {
- virNetMessageFree(msg);
- client->wantClose = true;
- } else {
- client->dispatchFunc(client, msg, client->dispatchOpaque);
- }
- }
-
/* Possibly need to create another receive buffer */
if (client->nrequests < client->nrequests_max) {
if (!(client->rx = virNetMessageNew(true))) {
@@ -1338,6 +1361,8 @@ static void virNetServerClientDispatchRe
}
}
virNetServerClientUpdateEvent(client);
+
+ return msg;
}
}
@@ -1482,6 +1507,7 @@ static void
virNetServerClientDispatchEvent(virNetSocketPtr sock, int events, void *opaque)
{
virNetServerClientPtr client = opaque;
+ virNetMessagePtr msg = NULL;
virObjectLock(client);
@@ -1504,7 +1530,7 @@ virNetServerClientDispatchEvent(virNetSo
virNetServerClientDispatchWrite(client);
if (events & VIR_EVENT_HANDLE_READABLE &&
client->rx)
- virNetServerClientDispatchRead(client);
+ msg = virNetServerClientDispatchRead(client);
#if WITH_GNUTLS
}
#endif
@@ -1517,6 +1543,9 @@ virNetServerClientDispatchEvent(virNetSo
client->wantClose = true;
virObjectUnlock(client);
+
+ if (msg)
+ virNetServerClientDispatchMessage(client, msg);
}

View File

@ -0,0 +1,115 @@
commit 33c6eb9689eb51dfe31dd05b24b3b6b1c948c267
Author: Jim Fehlig <jfehlig@suse.com>
Date: Thu Mar 8 15:04:48 2018 -0700
libvirtd: fix potential deadlock when reloading
It is possible to deadlock libvirtd when concurrently starting a domain
and restarting the daemon. Threads involved in the deadlock are
Thread 4 (Thread 0x7fc13b53e700 (LWP 64084)):
/lib64/libpthread.so.0
at util/virthread.c:154
at qemu/qemu_monitor.c:1083
cmd=0x7fc110017700, scm_fd=-1, reply=0x7fc13b53d318) at
qemu/qemu_monitor_json.c:305
cmd=0x7fc110017700,
reply=0x7fc13b53d318) at qemu/qemu_monitor_json.c:335
at qemu/qemu_monitor_json.c:1298
at qemu/qemu_monitor.c:1697
vm=0x7fc110003d00, asyncJob=QEMU_ASYNC_JOB_START) at qemu/qemu_process.c:1763
vm=0x7fc110003d00,
asyncJob=6, logCtxt=0x7fc1100089c0) at qemu/qemu_process.c:1835
vm=0x7fc110003d00, asyncJob=6, logCtxt=0x7fc1100089c0) at
qemu/qemu_process.c:2180
driver=0x7fc12004e1e0,
vm=0x7fc110003d00, asyncJob=QEMU_ASYNC_JOB_START, incoming=0x0, snapshot=0x0,
vmop=VIR_NETDEV_VPORT_PROFILE_OP_CREATE, flags=17) at qemu/qemu_process.c:6111
driver=0x7fc12004e1e0,
vm=0x7fc110003d00, updatedCPU=0x0, asyncJob=QEMU_ASYNC_JOB_START,
migrateFrom=0x0,
migrateFd=-1, migratePath=0x0, snapshot=0x0,
vmop=VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
flags=17) at qemu/qemu_process.c:6334
xml=0x7fc110000ed0 "<!--\nWARNING: THIS IS AN AUTO-GENERATED FILE.
CHANGES TO IT ARE LIKELY TO BE\nOVERWRITTEN AND LOST. Changes to this xml
configuration should be made using:\n virsh edit testvv\nor other
applicati"..., flags=0) at qemu/qemu_driver.c:1776
...
Thread 1 (Thread 0x7fc143c66880 (LWP 64081)):
/lib64/libpthread.so.0
at util/virthread.c:122
conf/nwfilter_conf.c:159
sig=0x7ffe0a831e30,
opaque=0x0) at remote/remote_daemon.c:724
opaque=0x558c5328b230) at rpc/virnetdaemon.c:654
at util/vireventpoll.c:508
rpc/virnetdaemon.c:858
remote/remote_daemon.c:1496
(gdb) thr 1
[Switching to thread 1 (Thread 0x7fc143c66880 (LWP 64081))]
/lib64/libpthread.so.0
(gdb) f 1
at util/virthread.c:122
122 pthread_rwlock_wrlock(&m->lock);
(gdb) p updateLock
$1 = {lock = {__data = {__lock = 0, __nr_readers = 1, __readers_wakeup = 0,
__writer_wakeup = 0, __nr_readers_queued = 0, __nr_writers_queued = 1,
__writer = 0,
__shared = 0, __rwelision = 0 '\000', __pad1 = "\000\000\000\000\000\000",
__pad2 = 0, __flags = 0},
__size = "\000\000\000\000\001", '\000' <repeats 15 times>, "\001",
'\000' <repeats 34 times>, __align = 4294967296}}
Reloading of the nwfilter driver is stuck waiting for a write lock, which
already has a reader (from qemuDomainCreateXML) in the critical section.
Since the reload occurs in the context of the main event loop thread,
libvirtd becomes deadlocked. The deadlock can be avoided by offloading
the reload work to a thread.
Signed-off-by: Jim Fehlig <jfehlig@suse.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Index: libvirt-4.1.0/src/remote/remote_daemon.c
===================================================================
--- libvirt-4.1.0.orig/src/remote/remote_daemon.c
+++ libvirt-4.1.0/src/remote/remote_daemon.c
@@ -709,20 +709,32 @@ static void daemonShutdownHandler(virNet
virNetDaemonQuit(dmn);
}
+static void daemonReloadHandlerThread(void *opague ATTRIBUTE_UNUSED)
+{
+ VIR_INFO("Reloading configuration on SIGHUP");
+ virHookCall(VIR_HOOK_DRIVER_DAEMON, "-",
+ VIR_HOOK_DAEMON_OP_RELOAD, SIGHUP, "SIGHUP", NULL, NULL);
+ if (virStateReload() < 0)
+ VIR_WARN("Error while reloading drivers");
+}
+
static void daemonReloadHandler(virNetDaemonPtr dmn ATTRIBUTE_UNUSED,
siginfo_t *sig ATTRIBUTE_UNUSED,
void *opaque ATTRIBUTE_UNUSED)
{
+ virThread thr;
+
if (!driversInitialized) {
VIR_WARN("Drivers are not initialized, reload ignored");
return;
}
- VIR_INFO("Reloading configuration on SIGHUP");
- virHookCall(VIR_HOOK_DRIVER_DAEMON, "-",
- VIR_HOOK_DAEMON_OP_RELOAD, SIGHUP, "SIGHUP", NULL, NULL);
- if (virStateReload() < 0)
- VIR_WARN("Error while reloading drivers");
+ if (virThreadCreate(&thr, false, daemonReloadHandlerThread, NULL) < 0) {
+ /*
+ * Not much we can do on error here except log it.
+ */
+ VIR_ERROR(_("Failed to create thread to handle daemon restart"));
+ }
}
static int daemonSetupSignals(virNetDaemonPtr dmn)

View File

@ -0,0 +1,49 @@
commit 464889fff8174f560316c998f9f38814c9a57771
Author: Daniel P. Berrangé <berrange@redhat.com>
Date: Tue Mar 6 16:07:35 2018 +0000
rpc: push ref acquisition into RPC dispatch function
There's no reason why the virNetServerClientDispatchRead method needs to
acquire an extra reference on the "client" object. An extra reference is
only needed if the registered dispatch callback is going to keep hold of
the "client" for work in the background. Thus we can push reference
acquisition into virNetServerDispatchNewMessage.
Reviewed-by: John Ferlan <jferlan@redhat.com>
Reviewed-by: Jim Fehlig <jfehlig@suse.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Index: libvirt-4.1.0/src/rpc/virnetserver.c
===================================================================
--- libvirt-4.1.0.orig/src/rpc/virnetserver.c
+++ libvirt-4.1.0/src/rpc/virnetserver.c
@@ -217,9 +217,11 @@ static int virNetServerDispatchNewMessag
priority = virNetServerProgramGetPriority(prog, msg->header.proc);
}
+ virObjectRef(client);
ret = virThreadPoolSendJob(srv->workers, priority, job);
if (ret < 0) {
+ virObjectUnref(client);
VIR_FREE(job);
virObjectUnref(prog);
}
Index: libvirt-4.1.0/src/rpc/virnetserverclient.c
===================================================================
--- libvirt-4.1.0.orig/src/rpc/virnetserverclient.c
+++ libvirt-4.1.0/src/rpc/virnetserverclient.c
@@ -1315,12 +1315,10 @@ static void virNetServerClientDispatchRe
/* Send off to for normal dispatch to workers */
if (msg) {
- virObjectRef(client);
if (!client->dispatchFunc ||
client->dispatchFunc(client, msg, client->dispatchOpaque) < 0) {
virNetMessageFree(msg);
client->wantClose = true;
- virObjectUnref(client);
return;
}
}

View File

@ -0,0 +1,40 @@
commit 86cae503a4404e068a11285564a0ee3862d1570c
Author: Daniel P. Berrangé <berrange@redhat.com>
Date: Tue Mar 6 17:56:57 2018 +0000
rpc: avoid crashing in pre-exec if no workers are present
If max_workers is set to zero, then the worker thread pool won't be
created, so when serializing state for pre-exec we must set various
parameters to zero.
Reviewed-by: John Ferlan <jferlan@redhat.com>
Reviewed-by: Jim Fehlig <jfehlig@suse.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Index: libvirt-4.1.0/src/rpc/virnetserver.c
===================================================================
--- libvirt-4.1.0.orig/src/rpc/virnetserver.c
+++ libvirt-4.1.0/src/rpc/virnetserver.c
@@ -580,18 +580,21 @@ virJSONValuePtr virNetServerPreExecResta
goto error;
if (virJSONValueObjectAppendNumberUint(object, "min_workers",
+ srv->workers == NULL ? 0 :
virThreadPoolGetMinWorkers(srv->workers)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Cannot set min_workers data in JSON document"));
goto error;
}
if (virJSONValueObjectAppendNumberUint(object, "max_workers",
+ srv->workers == NULL ? 0 :
virThreadPoolGetMaxWorkers(srv->workers)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Cannot set max_workers data in JSON document"));
goto error;
}
if (virJSONValueObjectAppendNumberUint(object, "priority_workers",
+ srv->workers == NULL ? 0 :
virThreadPoolGetPriorityWorkers(srv->workers)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Cannot set priority_workers data in JSON document"));

View File

@ -0,0 +1,117 @@
commit c6f1d5190bbe62dae6b32081c0edd141ee19e82f
Author: Daniel P. Berrangé <berrange@redhat.com>
Date: Tue Mar 6 16:44:34 2018 +0000
rpc: simplify calling convention of virNetServerClientDispatchFunc
Currently virNetServerClientDispatchFunc implementations are only
responsible for free'ing the "msg" parameter upon success. Simplify the
calling convention by making it their unconditional responsibility to
free the "msg", and close the client if desired.
Reviewed-by: John Ferlan <jferlan@redhat.com>
Reviewed-by: Jim Fehlig <jfehlig@suse.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Index: libvirt-4.1.0/src/rpc/virnetserver.c
===================================================================
--- libvirt-4.1.0.orig/src/rpc/virnetserver.c
+++ libvirt-4.1.0/src/rpc/virnetserver.c
@@ -182,15 +182,14 @@ static void virNetServerHandleJob(void *
VIR_FREE(job);
}
-static int virNetServerDispatchNewMessage(virNetServerClientPtr client,
- virNetMessagePtr msg,
- void *opaque)
+static void virNetServerDispatchNewMessage(virNetServerClientPtr client,
+ virNetMessagePtr msg,
+ void *opaque)
{
virNetServerPtr srv = opaque;
virNetServerProgramPtr prog = NULL;
unsigned int priority = 0;
size_t i;
- int ret = -1;
VIR_DEBUG("server=%p client=%p message=%p",
srv, client, msg);
@@ -207,7 +206,7 @@ static int virNetServerDispatchNewMessag
virNetServerJobPtr job;
if (VIR_ALLOC(job) < 0)
- goto cleanup;
+ goto error;
job->client = client;
job->msg = msg;
@@ -218,21 +217,24 @@ static int virNetServerDispatchNewMessag
}
virObjectRef(client);
- ret = virThreadPoolSendJob(srv->workers, priority, job);
-
- if (ret < 0) {
+ if (virThreadPoolSendJob(srv->workers, priority, job) < 0) {
virObjectUnref(client);
VIR_FREE(job);
virObjectUnref(prog);
+ goto error;
}
} else {
- ret = virNetServerProcessMsg(srv, client, prog, msg);
+ if (virNetServerProcessMsg(srv, client, prog, msg) < 0)
+ goto error;
}
- cleanup:
virObjectUnlock(srv);
+ return;
- return ret;
+ error:
+ virNetMessageFree(msg);
+ virNetServerClientClose(client);
+ virObjectUnlock(srv);
}
/**
Index: libvirt-4.1.0/src/rpc/virnetserverclient.c
===================================================================
--- libvirt-4.1.0.orig/src/rpc/virnetserverclient.c
+++ libvirt-4.1.0/src/rpc/virnetserverclient.c
@@ -1315,11 +1315,11 @@ static void virNetServerClientDispatchRe
/* Send off to for normal dispatch to workers */
if (msg) {
- if (!client->dispatchFunc ||
- client->dispatchFunc(client, msg, client->dispatchOpaque) < 0) {
+ if (!client->dispatchFunc) {
virNetMessageFree(msg);
client->wantClose = true;
- return;
+ } else {
+ client->dispatchFunc(client, msg, client->dispatchOpaque);
}
}
Index: libvirt-4.1.0/src/rpc/virnetserverclient.h
===================================================================
--- libvirt-4.1.0.orig/src/rpc/virnetserverclient.h
+++ libvirt-4.1.0/src/rpc/virnetserverclient.h
@@ -36,9 +36,12 @@ typedef virNetServer *virNetServerPtr;
typedef struct _virNetServerClient virNetServerClient;
typedef virNetServerClient *virNetServerClientPtr;
-typedef int (*virNetServerClientDispatchFunc)(virNetServerClientPtr client,
- virNetMessagePtr msg,
- void *opaque);
+/* This function owns the "msg" pointer it is passed and
+ * must arrange for virNetMessageFree to be called on it
+ */
+typedef void (*virNetServerClientDispatchFunc)(virNetServerClientPtr client,
+ virNetMessagePtr msg,
+ void *opaque);
typedef int (*virNetServerClientFilterFunc)(virNetServerClientPtr client,
virNetMessagePtr msg,

View File

@ -0,0 +1,66 @@
commit eefabb38c34cb61edcb4a233ebc7c764547e515e
Author: Daniel P. Berrangé <berrange@redhat.com>
Date: Tue Mar 6 17:12:20 2018 +0000
rpc: switch virtlockd and virtlogd to use single-threaded dispatch
Currently both virtlogd and virtlockd use a single worker thread for
dispatching RPC messages. Even this is overkill and their RPC message
handling callbacks all run in short, finite time and so blocking the
main loop is not an issue like you'd see in libvirtd with long running
QEMU commands.
By setting max_workers==0, we can turn off the worker thread and run
these daemons single threaded. This in turn fixes a serious problem in
the virtlockd daemon whereby it loses all fcntl() locks at re-exec due
to multiple threads existing. fcntl() locks only get preserved if the
process is single threaded at time of exec().
Reviewed-by: John Ferlan <jferlan@redhat.com>
Reviewed-by: Jim Fehlig <jfehlig@suse.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Index: libvirt-4.1.0/src/locking/lock_daemon.c
===================================================================
--- libvirt-4.1.0.orig/src/locking/lock_daemon.c
+++ libvirt-4.1.0/src/locking/lock_daemon.c
@@ -165,7 +165,7 @@ virLockDaemonNew(virLockDaemonConfigPtr
goto error;
if (!(srv = virNetServerNew("virtlockd", 1,
- 1, 1, 0, config->max_clients,
+ 0, 0, 0, config->max_clients,
config->max_clients, -1, 0,
NULL,
virLockDaemonClientNew,
@@ -180,7 +180,7 @@ virLockDaemonNew(virLockDaemonConfigPtr
srv = NULL;
if (!(srv = virNetServerNew("admin", 1,
- 1, 1, 0, config->admin_max_clients,
+ 0, 0, 0, config->admin_max_clients,
config->admin_max_clients, -1, 0,
NULL,
remoteAdmClientNew,
Index: libvirt-4.1.0/src/logging/log_daemon.c
===================================================================
--- libvirt-4.1.0.orig/src/logging/log_daemon.c
+++ libvirt-4.1.0/src/logging/log_daemon.c
@@ -154,7 +154,7 @@ virLogDaemonNew(virLogDaemonConfigPtr co
goto error;
if (!(srv = virNetServerNew("virtlogd", 1,
- 1, 1, 0, config->max_clients,
+ 0, 0, 0, config->max_clients,
config->max_clients, -1, 0,
NULL,
virLogDaemonClientNew,
@@ -169,7 +169,7 @@ virLogDaemonNew(virLogDaemonConfigPtr co
srv = NULL;
if (!(srv = virNetServerNew("admin", 1,
- 1, 1, 0, config->admin_max_clients,
+ 0, 0, 0, config->admin_max_clients,
config->admin_max_clients, -1, 0,
NULL,
remoteAdmClientNew,

View File

@ -0,0 +1,51 @@
commit fbf31e1a4cd19d6f6e33e0937a009775cd7d9513
Author: Daniel P. Berrangé <berrange@redhat.com>
Date: Thu Mar 1 14:55:26 2018 +0000
qemu: avoid denial of service reading from QEMU guest agent (CVE-2018-1064)
We read from the agent until seeing a \r\n pair to indicate a completed
reply or event. To avoid memory denial-of-service though, we must have a
size limit on amount of data we buffer. 10 MB is large enough that it
ought to cope with normal agent replies, and small enough that we're not
consuming unreasonable mem.
This is identical to the flaw we had reading from the QEMU monitor
as CVE-2018-5748, so rather embarrassing that we forgot to fix
the agent code at the same time.
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Index: libvirt-4.1.0/src/qemu/qemu_agent.c
===================================================================
--- libvirt-4.1.0.orig/src/qemu/qemu_agent.c
+++ libvirt-4.1.0/src/qemu/qemu_agent.c
@@ -53,6 +53,15 @@ VIR_LOG_INIT("qemu.qemu_agent");
#define DEBUG_IO 0
#define DEBUG_RAW_IO 0
+/* We read from QEMU until seeing a \r\n pair to indicate a
+ * completed reply or event. To avoid memory denial-of-service
+ * though, we must have a size limit on amount of data we
+ * buffer. 10 MB is large enough that it ought to cope with
+ * normal QEMU replies, and small enough that we're not
+ * consuming unreasonable mem.
+ */
+#define QEMU_AGENT_MAX_RESPONSE (10 * 1024 * 1024)
+
/* When you are the first to uncomment this,
* don't forget to uncomment the corresponding
* part in qemuAgentIOProcessEvent as well.
@@ -535,6 +544,12 @@ qemuAgentIORead(qemuAgentPtr mon)
int ret = 0;
if (avail < 1024) {
+ if (mon->bufferLength >= QEMU_AGENT_MAX_RESPONSE) {
+ virReportSystemError(ERANGE,
+ _("No complete agent response found in %d bytes"),
+ QEMU_AGENT_MAX_RESPONSE);
+ return -1;
+ }
if (VIR_REALLOC_N(mon->buffer,
mon->bufferLength + 1024) < 0)
return -1;

View File

@ -1,3 +1,29 @@
-------------------------------------------------------------------
Wed Mar 14 13:52:55 UTC 2018 - jfehlig@suse.com
- qemu: avoid denial of service reading from QEMU guest agent
CVE-2018-1064
fbf31e1a-CVE-2018-1064.patch
bsc#1083625
-------------------------------------------------------------------
Tue Mar 13 22:09:26 UTC 2018 - jfehlig@suse.com
- virtlockd: fix loosing lock on re-exec
464889ff-rpc-aquire-ref-dispatch.patch,
c6f1d519-rpc-simplify-dispatch.patch,
06e7ebb6-rpc-invoke-dispatch-unlocked.patch,
86cae503-rpc-fix-pre-exec.patch,
eefabb38-rpc-virtlockd-virtlogd-single-thread.patch
bsc#1076861
-------------------------------------------------------------------
Tue Mar 13 21:55:47 UTC 2018 - jfehlig@suse.com
- libvirtd: fix potential deadlock when reloading
33c6eb96-fix-libvirtd-reload-deadlock.patch
bsc#1079150
-------------------------------------------------------------------
Mon Mar 5 15:55:07 UTC 2018 - jfehlig@suse.com

View File

@ -318,6 +318,13 @@ Source99: baselibs.conf
Source100: %{name}-rpmlintrc
# Upstream patches
Patch0: 6b3d716e-keycodemap-py3.patch
Patch1: 33c6eb96-fix-libvirtd-reload-deadlock.patch
Patch2: 464889ff-rpc-aquire-ref-dispatch.patch
Patch3: c6f1d519-rpc-simplify-dispatch.patch
Patch4: 06e7ebb6-rpc-invoke-dispatch-unlocked.patch
Patch5: 86cae503-rpc-fix-pre-exec.patch
Patch6: eefabb38-rpc-virtlockd-virtlogd-single-thread.patch
Patch7: fbf31e1a-CVE-2018-1064.patch
# Patches pending upstream review
Patch100: libxl-dom-reset.patch
Patch101: network-don-t-use-dhcp-authoritative-on-static-netwo.patch
@ -924,6 +931,13 @@ libvirt plugin for NSS for translating domain names into IP addresses.
pushd src/keycodemapdb
%patch0 -p1
popd
%patch1 -p1
%patch2 -p1
%patch3 -p1
%patch4 -p1
%patch5 -p1
%patch6 -p1
%patch7 -p1
%patch100 -p1
%patch101 -p1
%patch150 -p1