Marcus Meissner
8ddc06dd17
- Add upstream patches with the feature of "prepare" and "check" watchers. That feature is needed by envoy-proxy: * 0001-evwatch-Add-prepare-and-check-watchers.patch * 0002-evwatch-fix-race-condition.patch OBS-URL: https://build.opensuse.org/request/show/741457 OBS-URL: https://build.opensuse.org/package/show/devel:libraries:c_c++/libevent?expand=0&rev=46
1151 lines
38 KiB
Diff
1151 lines
38 KiB
Diff
From ed07dfb510a830d9435a9a7a1af76256a4233b61 Mon Sep 17 00:00:00 2001
|
|
From: Dan Rosen <mergeconflict@google.com>
|
|
Date: Tue, 26 Mar 2019 13:33:57 -0400
|
|
Subject: [PATCH 1/2] evwatch: Add "prepare" and "check" watchers.
|
|
|
|
Adds two new callbacks: "prepare" watchers, which fire immediately
|
|
before we poll for I/O, and "check" watchers, which fire immediately
|
|
after we finish polling and before we process events. This allows other
|
|
event loops to be embedded into libevent's, and enables certain
|
|
performance monitoring.
|
|
|
|
Closes: #710
|
|
---
|
|
CMakeLists.txt | 9 +-
|
|
Makefile.am | 1 +
|
|
event-internal.h | 49 ++++++
|
|
event.c | 34 +++-
|
|
include/event2/watch.h | 134 ++++++++++++++++
|
|
include/include.am | 1 +
|
|
sample/include.am | 5 +-
|
|
sample/watch-timing.c | 344 +++++++++++++++++++++++++++++++++++++++++
|
|
test/include.am | 1 +
|
|
test/regress.h | 1 +
|
|
test/regress_main.c | 1 +
|
|
test/regress_watch.c | 243 +++++++++++++++++++++++++++++
|
|
watch.c | 82 ++++++++++
|
|
13 files changed, 902 insertions(+), 3 deletions(-)
|
|
create mode 100644 include/event2/watch.h
|
|
create mode 100644 sample/watch-timing.c
|
|
create mode 100644 test/regress_watch.c
|
|
create mode 100644 watch.c
|
|
|
|
diff --git a/CMakeLists.txt b/CMakeLists.txt
|
|
index 70acb696..b00af0b2 100644
|
|
--- a/CMakeLists.txt
|
|
+++ b/CMakeLists.txt
|
|
@@ -761,6 +761,7 @@ set(HDR_PUBLIC
|
|
include/event2/event.h
|
|
include/event2/event_compat.h
|
|
include/event2/event_struct.h
|
|
+ include/event2/watch.h
|
|
include/event2/http.h
|
|
include/event2/http_compat.h
|
|
include/event2/http_struct.h
|
|
@@ -789,6 +790,7 @@ set(SRC_CORE
|
|
evutil.c
|
|
evutil_rand.c
|
|
evutil_time.c
|
|
+ watch.c
|
|
listener.c
|
|
log.c
|
|
signal.c
|
|
@@ -977,11 +979,15 @@ if (NOT EVENT__DISABLE_SAMPLES)
|
|
hello-world
|
|
signal-test
|
|
http-connect
|
|
- time-test)
|
|
+ time-test
|
|
+ watch-timing)
|
|
|
|
foreach(SAMPLE ${SAMPLES})
|
|
add_sample_prog(OFF ${SAMPLE} sample/${SAMPLE}.c)
|
|
endforeach()
|
|
+ if (NOT WIN32)
|
|
+ target_link_libraries(watch-timing m)
|
|
+ endif()
|
|
|
|
if (NOT EVENT__DISABLE_OPENSSL)
|
|
add_sample_prog(ON https-client
|
|
@@ -1084,6 +1090,7 @@ if (NOT EVENT__DISABLE_TESTS)
|
|
test/regress_testutils.c
|
|
test/regress_testutils.h
|
|
test/regress_util.c
|
|
+ test/regress_watch.c
|
|
test/tinytest.c)
|
|
|
|
if (WIN32)
|
|
diff --git a/Makefile.am b/Makefile.am
|
|
index dd905026..2919df00 100644
|
|
--- a/Makefile.am
|
|
+++ b/Makefile.am
|
|
@@ -239,6 +239,7 @@ CORE_SRC = \
|
|
evutil.c \
|
|
evutil_rand.c \
|
|
evutil_time.c \
|
|
+ watch.c \
|
|
listener.c \
|
|
log.c \
|
|
$(SYS_SRC)
|
|
diff --git a/event-internal.h b/event-internal.h
|
|
index 92941b71..ed36fb0b 100644
|
|
--- a/event-internal.h
|
|
+++ b/event-internal.h
|
|
@@ -32,6 +32,7 @@ extern "C" {
|
|
#endif
|
|
|
|
#include "event2/event-config.h"
|
|
+#include "event2/watch.h"
|
|
#include "evconfig-private.h"
|
|
|
|
#include <time.h>
|
|
@@ -205,6 +206,52 @@ struct event_once {
|
|
void *arg;
|
|
};
|
|
|
|
+/** Contextual information passed from event_base_loop to the "prepare" watcher
|
|
+ * callbacks. We define this as a struct rather than individual parameters to
|
|
+ * the callback function for the sake of future extensibility. */
|
|
+struct evwatch_prepare_cb_info {
|
|
+ /** The timeout duration passed to the underlying implementation's `dispatch`.
|
|
+ * See evwatch_prepare_get_timeout. */
|
|
+ const struct timeval *timeout;
|
|
+};
|
|
+
|
|
+/** Contextual information passed from event_base_loop to the "check" watcher
|
|
+ * callbacks. We define this as a struct rather than individual parameters to
|
|
+ * the callback function for the sake of future extensibility. */
|
|
+struct evwatch_check_cb_info {
|
|
+ /** Placeholder, since empty struct is not allowed by some compilers. */
|
|
+ void *unused;
|
|
+};
|
|
+
|
|
+/** Watcher types (prepare and check, perhaps others in the future). */
|
|
+#define EVWATCH_PREPARE 0
|
|
+#define EVWATCH_CHECK 1
|
|
+#define EVWATCH_MAX 2
|
|
+
|
|
+/** Handle to a "prepare" or "check" callback, registered in event_base. */
|
|
+union evwatch_cb {
|
|
+ evwatch_prepare_cb prepare;
|
|
+ evwatch_check_cb check;
|
|
+};
|
|
+struct evwatch {
|
|
+ /** Tail queue pointers, called "next" by convention in libevent.
|
|
+ * See <sys/queue.h> */
|
|
+ TAILQ_ENTRY(evwatch) next;
|
|
+
|
|
+ /** Pointer to owning event loop */
|
|
+ struct event_base *base;
|
|
+
|
|
+ /** Watcher type (see above) */
|
|
+ unsigned type;
|
|
+
|
|
+ /** Callback function */
|
|
+ union evwatch_cb callback;
|
|
+
|
|
+ /** User-defined argument for callback function */
|
|
+ void *arg;
|
|
+};
|
|
+TAILQ_HEAD(evwatch_list, evwatch);
|
|
+
|
|
struct event_base {
|
|
/** Function pointers and other data to describe this event_base's
|
|
* backend. */
|
|
@@ -346,6 +393,8 @@ struct event_base {
|
|
/** List of event_onces that have not yet fired. */
|
|
LIST_HEAD(once_event_list, event_once) once_events;
|
|
|
|
+ /** "Prepare" and "check" watchers. */
|
|
+ struct evwatch_list watchers[EVWATCH_MAX];
|
|
};
|
|
|
|
struct event_config_entry {
|
|
diff --git a/event.c b/event.c
|
|
index b2ad3410..5e41ae04 100644
|
|
--- a/event.c
|
|
+++ b/event.c
|
|
@@ -59,6 +59,7 @@
|
|
#include "event2/event.h"
|
|
#include "event2/event_struct.h"
|
|
#include "event2/event_compat.h"
|
|
+#include "event2/watch.h"
|
|
#include "event-internal.h"
|
|
#include "defer-internal.h"
|
|
#include "evthread-internal.h"
|
|
@@ -737,6 +738,10 @@ event_base_new_with_config(const struct event_config *cfg)
|
|
event_base_start_iocp_(base, cfg->n_cpus_hint);
|
|
#endif
|
|
|
|
+ /* initialize watcher lists */
|
|
+ for (i = 0; i < EVWATCH_MAX; ++i)
|
|
+ TAILQ_INIT(&base->watchers[i]);
|
|
+
|
|
return (base);
|
|
}
|
|
|
|
@@ -839,6 +844,7 @@ event_base_free_(struct event_base *base, int run_finalizers)
|
|
{
|
|
int i, n_deleted=0;
|
|
struct event *ev;
|
|
+ struct evwatch *watcher;
|
|
/* XXXX grab the lock? If there is contention when one thread frees
|
|
* the base, then the contending thread will be very sad soon. */
|
|
|
|
@@ -939,6 +945,15 @@ event_base_free_(struct event_base *base, int run_finalizers)
|
|
EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
|
|
EVTHREAD_FREE_COND(base->current_event_cond);
|
|
|
|
+ /* Free all event watchers */
|
|
+ for (i = 0; i < EVWATCH_MAX; ++i) {
|
|
+ while (!TAILQ_EMPTY(&base->watchers[i])) {
|
|
+ watcher = TAILQ_FIRST(&base->watchers[i]);
|
|
+ TAILQ_REMOVE(&base->watchers[i], watcher, next);
|
|
+ mm_free(watcher);
|
|
+ }
|
|
+ }
|
|
+
|
|
/* If we're freeing current_base, there won't be a current_base. */
|
|
if (base == current_base)
|
|
current_base = NULL;
|
|
@@ -1932,9 +1947,12 @@ event_base_loop(struct event_base *base, int flags)
|
|
struct timeval tv;
|
|
struct timeval *tv_p;
|
|
int res, done, retval = 0;
|
|
+ struct evwatch_prepare_cb_info prepare_info;
|
|
+ struct evwatch_check_cb_info check_info;
|
|
+ struct evwatch *watcher;
|
|
|
|
/* Grab the lock. We will release it inside evsel.dispatch, and again
|
|
- * as we invoke user callbacks. */
|
|
+ * as we invoke watchers and user callbacks. */
|
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
|
|
if (base->running_loop) {
|
|
@@ -1993,6 +2011,13 @@ event_base_loop(struct event_base *base, int flags)
|
|
|
|
event_queue_make_later_events_active(base);
|
|
|
|
+ /* Invoke prepare watchers before polling for events */
|
|
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
+ prepare_info.timeout = tv_p;
|
|
+ TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_PREPARE], next)
|
|
+ (*watcher->callback.prepare)(watcher, &prepare_info, watcher->arg);
|
|
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
+
|
|
clear_time_cache(base);
|
|
|
|
res = evsel->dispatch(base, tv_p);
|
|
@@ -2006,6 +2031,13 @@ event_base_loop(struct event_base *base, int flags)
|
|
|
|
update_time_cache(base);
|
|
|
|
+ /* Invoke check watchers after polling for events, and before
|
|
+ * processing them */
|
|
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
+ TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_CHECK], next)
|
|
+ (*watcher->callback.check)(watcher, &check_info, watcher->arg);
|
|
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
+
|
|
timeout_process(base);
|
|
|
|
if (N_ACTIVE_CALLBACKS(base)) {
|
|
diff --git a/include/event2/watch.h b/include/event2/watch.h
|
|
new file mode 100644
|
|
index 00000000..e3a6e609
|
|
--- /dev/null
|
|
+++ b/include/event2/watch.h
|
|
@@ -0,0 +1,134 @@
|
|
+/*
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The name of the author may not be used to endorse or promote products
|
|
+ * derived from this software without specific prior written permission.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+#ifndef EVENT2_WATCH_H_INCLUDED_
|
|
+#define EVENT2_WATCH_H_INCLUDED_
|
|
+
|
|
+/** @file event2/watch.h
|
|
+
|
|
+ "Prepare" and "check" watchers. A "prepare" watcher is a callback that fires
|
|
+ immediately before polling for I/O. A "check" watcher is a callback that
|
|
+ fires immediately after polling and before processing any active events. This
|
|
+ may be useful for embedding other libraries' event loops (e.g. UI toolkits)
|
|
+ into libevent's.
|
|
+
|
|
+ */
|
|
+
|
|
+#ifdef __cplusplus
|
|
+extern "C" {
|
|
+#endif
|
|
+
|
|
+#include <event2/visibility.h>
|
|
+
|
|
+struct event_base;
|
|
+struct evwatch;
|
|
+struct evwatch_prepare_cb_info;
|
|
+struct evwatch_check_cb_info;
|
|
+struct timeval;
|
|
+
|
|
+/**
|
|
+ Prepare callback, invoked by event_base_loop immediately before polling for
|
|
+ I/O.
|
|
+
|
|
+ @param watcher the prepare watcher that invoked this callback.
|
|
+ @param info contextual information passed from event_base_loop.
|
|
+ @param arg additional user-defined argument, set in `evwatch_prepare_new`.
|
|
+ */
|
|
+typedef void (*evwatch_prepare_cb)(struct evwatch *, const struct evwatch_prepare_cb_info *, void *);
|
|
+
|
|
+/**
|
|
+ Check callback, invoked by event_base_loop immediately after polling for I/O
|
|
+ and before processing any active events.
|
|
+
|
|
+ @param watcher the check watcher that invoked this callback.
|
|
+ @param info contextual information passed from event_base_loop.
|
|
+ @param arg additional user-defined argument, set in `evwatch_check_new`.
|
|
+ */
|
|
+typedef void (*evwatch_check_cb)(struct evwatch *, const struct evwatch_check_cb_info *, void *);
|
|
+
|
|
+/**
|
|
+ Register a new "prepare" watcher, to be called in the event loop prior to
|
|
+ polling for events. Watchers will be called in the order they were
|
|
+ registered.
|
|
+
|
|
+ @param base the event_base to operate on.
|
|
+ @param callback the callback function to invoke.
|
|
+ @param arg additional user-defined argument provided to the callback.
|
|
+ @return a pointer to the newly allocated event watcher.
|
|
+ */
|
|
+EVENT2_EXPORT_SYMBOL
|
|
+struct evwatch *evwatch_prepare_new(struct event_base *base, evwatch_prepare_cb callback, void *arg);
|
|
+
|
|
+/**
|
|
+ Register a new "check" watcher, to be called in the event loop after polling
|
|
+ for events and before handling them. Watchers will be called in the order
|
|
+ they were registered.
|
|
+
|
|
+ @param base the event_base to operate on.
|
|
+ @param callback the callback function to invoke.
|
|
+ @param arg additional user-defined argument provided to the callback.
|
|
+ @return a pointer to the newly allocated event watcher.
|
|
+ */
|
|
+EVENT2_EXPORT_SYMBOL
|
|
+struct evwatch *evwatch_check_new(struct event_base *base, evwatch_check_cb callback, void *arg);
|
|
+
|
|
+/**
|
|
+ Get the event_base that a given evwatch is registered with.
|
|
+
|
|
+ @param watcher the watcher to get the event_base for.
|
|
+ @return the event_base for the given watcher.
|
|
+ */
|
|
+EVENT2_EXPORT_SYMBOL
|
|
+struct event_base *evwatch_base(struct evwatch *watcher);
|
|
+
|
|
+/**
|
|
+ Deregister and deallocate a watcher. Any watchers not freed using
|
|
+ evwatch_free will eventually be deallocated in event_base_free
|
|
+ (calling evwatch_free on a watcher after event_base_free has been
|
|
+ called on its corresponding event_base is an error).
|
|
+
|
|
+ @param watcher the watcher to deregister and deallocate.
|
|
+ */
|
|
+EVENT2_EXPORT_SYMBOL
|
|
+void evwatch_free(struct evwatch *watcher);
|
|
+
|
|
+/**
|
|
+ Get the timeout (the expected polling duration) passed to the underlying
|
|
+ implementation's `dispatch`. This value will only be set if there are pending
|
|
+ EV_TIMEOUT events and if the event_base isn't in EVLOOP_NONBLOCK mode. It may
|
|
+ be a useful performance statistic to compare the expected polling duration
|
|
+ against the actual polling duration (that is, the time difference measured
|
|
+ between this prepare callback and the following check callback).
|
|
+
|
|
+ @param info the "prepare" callback info.
|
|
+ @param timeout address of a timeval to write the polling duration to.
|
|
+ @return 1 if a value was written to *timeout, or 0 if not.
|
|
+ */
|
|
+EVENT2_EXPORT_SYMBOL
|
|
+int evwatch_prepare_get_timeout(const struct evwatch_prepare_cb_info *info, struct timeval *timeout);
|
|
+
|
|
+#ifdef __cplusplus
|
|
+}
|
|
+#endif
|
|
+
|
|
+#endif /* EVENT2_WATCH_H_INCLUDED_ */
|
|
diff --git a/include/include.am b/include/include.am
|
|
index aaa2042a..3f4a5522 100644
|
|
--- a/include/include.am
|
|
+++ b/include/include.am
|
|
@@ -18,6 +18,7 @@ EVENT2_EXPORT = \
|
|
include/event2/event.h \
|
|
include/event2/event_compat.h \
|
|
include/event2/event_struct.h \
|
|
+ include/event2/watch.h \
|
|
include/event2/http.h \
|
|
include/event2/http_compat.h \
|
|
include/event2/http_struct.h \
|
|
diff --git a/sample/include.am b/sample/include.am
|
|
index cc003b78..f33e850b 100644
|
|
--- a/sample/include.am
|
|
+++ b/sample/include.am
|
|
@@ -11,7 +11,8 @@ SAMPLES = \
|
|
sample/http-server \
|
|
sample/http-connect \
|
|
sample/signal-test \
|
|
- sample/time-test
|
|
+ sample/time-test \
|
|
+ sample/watch-timing
|
|
|
|
if OPENSSL
|
|
SAMPLES += sample/le-proxy
|
|
@@ -51,3 +52,5 @@ sample_http_server_SOURCES = sample/http-server.c
|
|
sample_http_server_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
|
|
sample_http_connect_SOURCES = sample/http-connect.c
|
|
sample_http_connect_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
|
|
+sample_watch_timing_SOURCES = sample/watch-timing.c
|
|
+sample_watch_timing_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la -lm
|
|
diff --git a/sample/watch-timing.c b/sample/watch-timing.c
|
|
new file mode 100644
|
|
index 00000000..9babd32b
|
|
--- /dev/null
|
|
+++ b/sample/watch-timing.c
|
|
@@ -0,0 +1,344 @@
|
|
+#include <math.h>
|
|
+#include <signal.h>
|
|
+#include <stdio.h>
|
|
+#include <stdlib.h>
|
|
+#ifdef EVENT__HAVE_SYS_TIME_H
|
|
+#include <sys/time.h>
|
|
+#endif
|
|
+#include <time.h>
|
|
+
|
|
+#include <event2/event.h>
|
|
+#include <event2/util.h>
|
|
+#include <event2/watch.h>
|
|
+
|
|
+/**
|
|
+ An approximate histogram in constant space, based on Ben-Haim & Yom-Tov, "A
|
|
+ Streaming Parallel Decision Tree Algorithm" [1] and a previous implementation
|
|
+ in Java by Dan Rosen [2]. The histogram is represented as an array of
|
|
+ contiguous bins of non-uniform width. Each bin is centered on a certain point,
|
|
+ called its "centroid," and summarizes some "count" of observations. The bins
|
|
+ are ordered in the array by their centroids; an array is used rather than a
|
|
+ linked structure for CPU cache friendliness.
|
|
+
|
|
+ When the histogram is updated with a new observation, a new bin is created for
|
|
+ it, and then the pair of bins with the closest centroids are merged. Since
|
|
+ bins are stored in contiguous memory, this update process requires bins to be
|
|
+ shifted in worst-case linear time. The novel contribution of this
|
|
+ implementation is to maintain an insertion gap adjacent to the most recently
|
|
+ merged bin, such that for "well behaved" input (such as a normal
|
|
+ distribution), the number of shift operations required by an update should be
|
|
+ much less than the total number of bins on average.
|
|
+
|
|
+ This implementation is almost entirely untested. Don't trust it for
|
|
+ production code.
|
|
+
|
|
+ [1] http://www.jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf
|
|
+ [2] https://github.com/mergeconflict/histogram
|
|
+ */
|
|
+
|
|
+/** Compare two doubles for equality without the compiler warning. This is
|
|
+ * probably the wrong thing to do, but this is just sample code :) */
|
|
+static inline int
|
|
+eq(double a, double b)
|
|
+{
|
|
+#pragma GCC diagnostic push
|
|
+#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
+ return a == b;
|
|
+#pragma GCC diagnostic pop
|
|
+}
|
|
+
|
|
+struct bin {
|
|
+ double centroid;
|
|
+ unsigned long count;
|
|
+};
|
|
+
|
|
+struct histogram {
|
|
+ struct bin *bins;
|
|
+ unsigned max_bins;
|
|
+ unsigned num_bins;
|
|
+ unsigned gap;
|
|
+ unsigned long count;
|
|
+ double min;
|
|
+ double max;
|
|
+};
|
|
+
|
|
+static struct histogram *
|
|
+histogram_new(unsigned max_bins)
|
|
+{
|
|
+ struct histogram *h = malloc(sizeof(struct histogram));
|
|
+ h->bins = calloc(max_bins + 1, sizeof(struct bin));
|
|
+ h->max_bins = max_bins;
|
|
+ h->num_bins = 0;
|
|
+ h->gap = 0;
|
|
+ h->count = 0;
|
|
+ h->min = INFINITY;
|
|
+ h->max = -INFINITY;
|
|
+ return h;
|
|
+}
|
|
+
|
|
+static void
|
|
+histogram_free(struct histogram *h)
|
|
+{
|
|
+ free(h->bins);
|
|
+ free(h);
|
|
+}
|
|
+
|
|
+static void
|
|
+histogram_update(struct histogram *h, double observation)
|
|
+{
|
|
+ unsigned bin;
|
|
+ double delta;
|
|
+ double min_delta = INFINITY;
|
|
+
|
|
+ /* Update population count, min and max */
|
|
+ ++(h->count);
|
|
+ if (observation < h->min)
|
|
+ h->min = observation;
|
|
+ if (observation > h->max)
|
|
+ h->max = observation;
|
|
+
|
|
+ /* Shift the insertion gap to the left or right so that the new bin
|
|
+ * containing the given observation as its centroid will be in the right
|
|
+ * order with respect to the other bins. */
|
|
+ while (1) {
|
|
+ /* Look at the bin to the left of the gap... */
|
|
+ if (h->gap != 0) {
|
|
+ /* If its centroid is greater than the observation, move
|
|
+ * the gap to the left and try again... */
|
|
+ if (h->bins[h->gap - 1].centroid > observation) {
|
|
+ h->bins[h->gap] = h->bins[h->gap - 1];
|
|
+ --(h->gap);
|
|
+ continue;
|
|
+ }
|
|
+ /* If its centroid is equal to the observation, just
|
|
+ * update its count in place. */
|
|
+ if (eq(h->bins[h->gap - 1].centroid, observation)) {
|
|
+ ++(h->bins[h->gap - 1].count);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Look at the bin to the right of the gap... */
|
|
+ if (h->gap != h->num_bins) {
|
|
+ /* If its centroid is less than the observation, move
|
|
+ * the gap to the right and try again... */
|
|
+ if (h->bins[h->gap + 1].centroid < observation) {
|
|
+ h->bins[h->gap] = h->bins[h->gap + 1];
|
|
+ ++(h->gap);
|
|
+ continue;
|
|
+ }
|
|
+ /* If its centroid is equal to the observation, just
|
|
+ * update its count in place. */
|
|
+ if (eq(h->bins[h->gap + 1].centroid, observation)) {
|
|
+ ++(h->bins[h->gap + 1].count);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* If the gap is in the right place, we're ready to insert. */
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Insert the observation into a new bin at the gap. */
|
|
+ h->bins[h->gap].centroid = observation;
|
|
+ h->bins[h->gap].count = 1;
|
|
+
|
|
+ /* If the histogram isn't full yet, don't bother merging bins, just
|
|
+ * stick the gap back at the end. */
|
|
+ if (h->num_bins != h->max_bins) {
|
|
+ h->gap = ++(h->num_bins);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Find the two adjacent bins with the closest centroids and merge them.
|
|
+ * The choice whether to leave the gap on the left or right is
|
|
+ * arbitrary (we choose the left). */
|
|
+ for (bin = 0; bin < h->num_bins; ++bin) {
|
|
+ delta = h->bins[bin + 1].centroid - h->bins[bin].centroid;
|
|
+ if (delta < min_delta) {
|
|
+ min_delta = delta;
|
|
+ h->gap = bin;
|
|
+ }
|
|
+ }
|
|
+ /* The merged centroid is the weighted average of the two, and the
|
|
+ * merged count is the sum of the two. */
|
|
+ h->bins[h->gap + 1].centroid =
|
|
+ (h->bins[h->gap].centroid * h->bins[h->gap].count +
|
|
+ h->bins[h->gap + 1].centroid * h->bins[h->gap + 1].count) /
|
|
+ (h->bins[h->gap].count + h->bins[h->gap + 1].count);
|
|
+ h->bins[h->gap + 1].count += h->bins[h->gap].count;
|
|
+}
|
|
+
|
|
+static double
|
|
+histogram_query(const struct histogram *h, double quantile)
|
|
+{
|
|
+ unsigned lhs = 0, rhs = 0;
|
|
+ struct bin lhs_bin = { 0, 0 }, rhs_bin = { 0, 0 };
|
|
+ double lhs_total = 0, rhs_total = 0;
|
|
+ double a = 0, b = 0, c = 0, z = 0;
|
|
+
|
|
+ /* The "needle" is the n'th value represented by the histogram. For
|
|
+ * example, if the histogram summarizes 100 entries and we're querying
|
|
+ * for the 50th percentile, the needle is 50. */
|
|
+ double needle = h->count * quantile;
|
|
+ if (quantile <= 0)
|
|
+ return h->min;
|
|
+ if (quantile >= 1)
|
|
+ return h->max;
|
|
+
|
|
+ /* Divide the histogram into slices: the first slice starts at h->min
|
|
+ * and ends at h->bins[0].centroid, the last slice starts at
|
|
+ * h->bins[h->num_bins].centroid and ends at h->max, and the slices
|
|
+ * in the middle are between adjacent centroids (minding the gap). The
|
|
+ * "count" in each slice is the average of the count in the two bins
|
|
+ * that define it. Find the slice containing the needle by keeping a
|
|
+ * running total of the slice counts. */
|
|
+ while (rhs_total < needle) {
|
|
+ /* Determine the left-hand side bin of the current slice. Note
|
|
+ * that the first slice has bin 0 on its right-hand side! */
|
|
+ if (rhs == 0) {
|
|
+ lhs_bin.centroid = h->min;
|
|
+ lhs_bin.count = 0;
|
|
+ } else {
|
|
+ lhs_bin = h->bins[lhs];
|
|
+ }
|
|
+
|
|
+ /* Determine the right-hand side bin of the current slice... */
|
|
+ if (rhs > h->num_bins) {
|
|
+ lhs_bin.centroid = h->max;
|
|
+ rhs_bin.count = 0;
|
|
+ } else {
|
|
+ rhs_bin = h->bins[rhs];
|
|
+ }
|
|
+
|
|
+ /* Update the running totals: the lhs total is whatever the rhs
|
|
+ * total was previously, and the new rhs total includes the
|
|
+ * count for this slice. */
|
|
+ lhs_total = rhs_total;
|
|
+ rhs_total += 0.5 * (lhs_bin.count + rhs_bin.count);
|
|
+
|
|
+ /* Next iteration's left-hand side is the current iteration's
|
|
+ * right-hand side, and next iteration's right-hand side is one
|
|
+ * bin further right (minding the gap). */
|
|
+ lhs = rhs++;
|
|
+ if (rhs == h->gap)
|
|
+ rhs++;
|
|
+ }
|
|
+
|
|
+ /* Approximate the value at the requested quantile... */
|
|
+ a = rhs_bin.count - lhs_bin.count;
|
|
+ if (eq(a, 0)) {
|
|
+ b = rhs_total - lhs_total;
|
|
+ z = eq(b, 0) ? 0 : (needle - lhs_total) / b;
|
|
+ } else {
|
|
+ b = 2 * lhs_bin.count;
|
|
+ c = 2 * (lhs_total - needle);
|
|
+ z = (-b + sqrt(b * b - 4 * a * c)) / (2 * a);
|
|
+ }
|
|
+ return lhs_bin.centroid + (rhs_bin.centroid - lhs_bin.centroid) * z;
|
|
+}
|
|
+
|
|
+/**
|
|
+ This is an example of one way in which "prepare" and "check" watchers can be
|
|
+ useful. We track histograms of two timing metrics:
|
|
+
|
|
+ The first is "duration," which is the amount of time between a "check" and the
|
|
+ next "prepare" (in the next iteration of the event loop). This corresponds
|
|
+ pretty closely to the amount of time spent in event handlers (such as the
|
|
+ `on_timeout` handler in this example). In a real-world server, this would
|
|
+ provide a way to monitor whether any of your handlers are blocking or
|
|
+ otherwise performing heavy computation.
|
|
+
|
|
+ The second is "delay," which is the difference between the actual and expected
|
|
+ polling duration. The actual polling duration is the amount of time between a
|
|
+ "prepare" and the next "check" (in the same iteration of the event loop), and
|
|
+ the expected duration is obtained from `evwatch_prepare_get_timeout`. In a
|
|
+ real-world server, this provides an indication of kernel scheduling delays.
|
|
+ For example, if your server is lightly loaded, this delay should usually be
|
|
+ close to your kernel's scheduling quantum (e.g. 1 millisecond).
|
|
+ */
|
|
+
|
|
+static struct event_base *base;
|
|
+static struct timeval
|
|
+ prepare_time = { 0, 0 },
|
|
+ check_time = { 0, 0 },
|
|
+ expected = { 0, 0 };
|
|
+static struct histogram *durations, *delays;
|
|
+
|
|
+static void on_prepare(struct evwatch *watcher, const struct evwatch_prepare_cb_info *info, void *arg)
|
|
+{
|
|
+ struct timeval duration;
|
|
+ evutil_gettimeofday(&prepare_time, NULL);
|
|
+ evwatch_prepare_get_timeout(info, &expected);
|
|
+ if (check_time.tv_sec != 0) {
|
|
+ evutil_timersub(&prepare_time, &check_time, &duration);
|
|
+ histogram_update(durations, duration.tv_sec + duration.tv_usec / 1000000.0l);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void on_check(struct evwatch *watcher, const struct evwatch_check_cb_info *info, void *arg)
|
|
+{
|
|
+ struct timeval actual, delay;
|
|
+ evutil_gettimeofday(&check_time, NULL);
|
|
+ evutil_timersub(&check_time, &prepare_time, &actual);
|
|
+ evutil_timersub(&actual, &expected, &delay);
|
|
+ if (delay.tv_sec >= 0)
|
|
+ histogram_update(delays, delay.tv_sec + delay.tv_usec / 1000000.0l);
|
|
+}
|
|
+
|
|
+static void
|
|
+on_timeout(evutil_socket_t fd, short events, void *arg)
|
|
+{
|
|
+ printf("durations: [p50 = %fs, p95 = %fs], delays: [p50 = %fs, p95 = %fs]\n",
|
|
+ histogram_query(durations, 0.5),
|
|
+ histogram_query(durations, 0.95),
|
|
+ histogram_query(delays, 0.5),
|
|
+ histogram_query(delays, 0.95));
|
|
+}
|
|
+
|
|
+
|
|
+static void
|
|
+on_sigint(evutil_socket_t sig, short events, void *arg)
|
|
+{
|
|
+ event_base_loopbreak(base);
|
|
+}
|
|
+
|
|
+int
|
|
+main(int argc, char **argv)
|
|
+{
|
|
+ struct timeval one_second = { 1, 0 };
|
|
+ struct event *timeout_event, *sigint_event;
|
|
+
|
|
+ base = event_base_new();
|
|
+ durations = histogram_new(100);
|
|
+ delays = histogram_new(100);
|
|
+
|
|
+ /* add prepare and check watchers; no need to hang on to their pointers,
|
|
+ * since they will be freed for us in event_base_free. */
|
|
+ evwatch_prepare_new(base, &on_prepare, NULL);
|
|
+ evwatch_check_new(base, &on_check, NULL);
|
|
+
|
|
+ /* set a persistent one second timeout */
|
|
+ timeout_event = event_new(base, -1, EV_PERSIST, &on_timeout, NULL);
|
|
+ if (!timeout_event)
|
|
+ return EXIT_FAILURE;
|
|
+ event_add(timeout_event, &one_second);
|
|
+
|
|
+ /* set a handler for interrupt, so we can quit cleanly */
|
|
+ sigint_event = evsignal_new(base, SIGINT, &on_sigint, NULL);
|
|
+ if (!sigint_event)
|
|
+ return EXIT_FAILURE;
|
|
+ event_add(sigint_event, NULL);
|
|
+
|
|
+ /* run the event loop until interrupted */
|
|
+ event_base_dispatch(base);
|
|
+
|
|
+ /* clean up */
|
|
+ event_free(timeout_event);
|
|
+ event_free(sigint_event);
|
|
+ event_base_free(base);
|
|
+ histogram_free(durations);
|
|
+ histogram_free(delays);
|
|
+
|
|
+ return EXIT_SUCCESS;
|
|
+}
|
|
diff --git a/test/include.am b/test/include.am
|
|
index 04375247..ec11b6bf 100644
|
|
--- a/test/include.am
|
|
+++ b/test/include.am
|
|
@@ -120,6 +120,7 @@ test_regress_SOURCES = \
|
|
test/regress_testutils.c \
|
|
test/regress_testutils.h \
|
|
test/regress_util.c \
|
|
+ test/regress_watch.c \
|
|
test/tinytest.c \
|
|
$(regress_thread_SOURCES) \
|
|
$(regress_zlib_SOURCES)
|
|
diff --git a/test/regress.h b/test/regress.h
|
|
index 643b82ba..8486e019 100644
|
|
--- a/test/regress.h
|
|
+++ b/test/regress.h
|
|
@@ -53,6 +53,7 @@ extern struct testcase_t ssl_testcases[];
|
|
extern struct testcase_t listener_testcases[];
|
|
extern struct testcase_t listener_iocp_testcases[];
|
|
extern struct testcase_t thread_testcases[];
|
|
+extern struct testcase_t watch_testcases[];
|
|
|
|
extern struct evutil_weakrand_state test_weakrand_state;
|
|
|
|
diff --git a/test/regress_main.c b/test/regress_main.c
|
|
index c9372825..6c64b0de 100644
|
|
--- a/test/regress_main.c
|
|
+++ b/test/regress_main.c
|
|
@@ -380,6 +380,7 @@ struct testgroup_t testgroups[] = {
|
|
{ "rpc/", rpc_testcases },
|
|
{ "thread/", thread_testcases },
|
|
{ "listener/", listener_testcases },
|
|
+ { "watch/", watch_testcases },
|
|
#ifdef _WIN32
|
|
{ "iocp/", iocp_testcases },
|
|
{ "iocp/bufferevent/", bufferevent_iocp_testcases },
|
|
diff --git a/test/regress_watch.c b/test/regress_watch.c
|
|
new file mode 100644
|
|
index 00000000..9e340618
|
|
--- /dev/null
|
|
+++ b/test/regress_watch.c
|
|
@@ -0,0 +1,243 @@
|
|
+/*
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The name of the author may not be used to endorse or promote products
|
|
+ * derived from this software without specific prior written permission.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#include <stdlib.h>
|
|
+#ifdef EVENT__HAVE_SYS_TIME_H
|
|
+#include <sys/time.h>
|
|
+#endif
|
|
+#include <time.h>
|
|
+
|
|
+#include "event2/event.h"
|
|
+#include "event2/watch.h"
|
|
+#include "regress.h"
|
|
+
|
|
+static int iteration = 0;
|
|
+static int prepare_callback_1_count = 0;
|
|
+static int prepare_callback_2_count = 0;
|
|
+static int check_callback_1_count = 0;
|
|
+static int check_callback_2_count = 0;
|
|
+static struct timeval start_time = { 0, 0 };
|
|
+static struct timeval end_time = { 0, 0 };
|
|
+static int user_arg = 8675309;
|
|
+
|
|
+static void
|
|
+prepare_callback_1(struct evwatch *watcher, const struct evwatch_prepare_cb_info *info, void *arg)
|
|
+{
|
|
+ struct timeval timeout;
|
|
+ int timeout_msec;
|
|
+
|
|
+ /* user argument should be passed properly */
|
|
+ tt_ptr_op(arg, ==, &user_arg);
|
|
+
|
|
+ ++prepare_callback_1_count;
|
|
+
|
|
+ /* prepare_callback_1 should always fire before prepare_callback_2, and
|
|
+ * before both check callbacks */
|
|
+ tt_int_op(prepare_callback_1_count, >, prepare_callback_2_count);
|
|
+ tt_int_op(prepare_callback_1_count, >, check_callback_1_count);
|
|
+ tt_int_op(prepare_callback_1_count, >, check_callback_2_count);
|
|
+
|
|
+ /* if we've just scheduled the timeout event at the beginning of the
|
|
+ * iteration, save the current time and assert that the timeout is
|
|
+ * roughly what we set (this won't be exact on some platforms) */
|
|
+ if (start_time.tv_sec == 0) {
|
|
+ event_base_gettimeofday_cached(evwatch_base(watcher), &start_time);
|
|
+ tt_int_op(evwatch_prepare_get_timeout(info, &timeout), ==, 1);
|
|
+
|
|
+ timeout_msec = (timeout.tv_sec * 1000) + (timeout.tv_usec / 1000);
|
|
+ tt_int_op(timeout_msec, >=, 995);
|
|
+ tt_int_op(timeout_msec, <=, 1005);
|
|
+ }
|
|
+end:
|
|
+ ;
|
|
+}
|
|
+
|
|
+static void
|
|
+prepare_callback_2(struct evwatch *watcher, const struct evwatch_prepare_cb_info *info, void *arg)
|
|
+{
|
|
+ /* user argument should be passed properly */
|
|
+ tt_ptr_op(arg, ==, &user_arg);
|
|
+
|
|
+ ++prepare_callback_2_count;
|
|
+
|
|
+ /* prepare_callback_2 should only fire on the first iteration, and
|
|
+ * should fire before both check callbacks */
|
|
+ tt_int_op(iteration, ==, 0);
|
|
+ tt_int_op(prepare_callback_2_count, >, check_callback_1_count);
|
|
+ tt_int_op(prepare_callback_2_count, >, check_callback_2_count);
|
|
+end:
|
|
+ ;
|
|
+}
|
|
+
|
|
+static void
|
|
+check_callback_1(struct evwatch *watcher, const struct evwatch_check_cb_info *info, void *arg)
|
|
+{
|
|
+ /* user argument should be passed properly */
|
|
+ tt_ptr_op(arg, ==, &user_arg);
|
|
+
|
|
+ ++check_callback_1_count;
|
|
+
|
|
+ /* check_callback_1 should always fire before check_callback_2 */
|
|
+ tt_int_op(check_callback_1_count, >, check_callback_2_count);
|
|
+
|
|
+ /* save the end time, in case the timeout fires this time through the
|
|
+ * event loop */
|
|
+ event_base_gettimeofday_cached(evwatch_base(watcher), &end_time);
|
|
+end:
|
|
+ ;
|
|
+}
|
|
+
|
|
+static void
|
|
+check_callback_2(struct evwatch *watcher, const struct evwatch_check_cb_info *info, void *arg)
|
|
+{
|
|
+ /* user argument should be passed properly */
|
|
+ tt_ptr_op(arg, ==, &user_arg);
|
|
+
|
|
+ ++check_callback_2_count;
|
|
+
|
|
+ /* check_callback_2 should only fire on the first iteration */
|
|
+ tt_int_op(iteration, ==, 0);
|
|
+end:
|
|
+ ;
|
|
+}
|
|
+
|
|
+static void
|
|
+timeout_callback(evutil_socket_t fd, short events, void *arg)
|
|
+{
|
|
+ /* the duration between the start and end times should be at least 1
|
|
+ * second */
|
|
+ tt_int_op(end_time.tv_sec, >=, start_time.tv_sec + 1);
|
|
+end:
|
|
+ ;
|
|
+}
|
|
+
|
|
+/**
|
|
+ This tests a few important properties of "prepare" and "check" watchers:
|
|
+ - Watchers should be called in the order they were registered.
|
|
+ - Prepare watchers should be called before check watchers.
|
|
+ - Freeing a watcher will stop callbacks to it, but not to other watchers.
|
|
+ - Reported durations should align with the registered timeouts.
|
|
+ - It should be possible to call back into libevent from a callback without a
|
|
+ recursive lock.
|
|
+ - If this test is compiled with ASAN or similar, this test also illustrates
|
|
+ that event_base_free will free any watchers not previously freed by
|
|
+ evwatch_free.
|
|
+ */
|
|
+static void
|
|
+test_callback_ordering(void *ptr)
|
|
+{
|
|
+ struct basic_test_data *data = ptr;
|
|
+ struct event_base *base = data->base;
|
|
+ struct evwatch *prepare_callback_2_watcher;
|
|
+ struct evwatch *check_callback_2_watcher;
|
|
+ struct timeval timeout;
|
|
+
|
|
+ /* install prepare and check watchers */
|
|
+ evwatch_prepare_new(base, &prepare_callback_1, &user_arg);
|
|
+ evwatch_check_new(base, &check_callback_1, &user_arg);
|
|
+ prepare_callback_2_watcher = evwatch_prepare_new(base, &prepare_callback_2, &user_arg);
|
|
+ check_callback_2_watcher = evwatch_check_new(base, &check_callback_2, &user_arg);
|
|
+
|
|
+ /* schedule an 1 second timeout event, and run the event loop until the
|
|
+ * timeout fires */
|
|
+ timeout.tv_sec = 1;
|
|
+ timeout.tv_usec = 0;
|
|
+ event_base_once(base, -1, EV_TIMEOUT, &timeout_callback, 0, &timeout);
|
|
+ event_base_dispatch(base);
|
|
+
|
|
+ /* second iteration: free two of the watchers, schedule a timeout and
|
|
+ * run the event loop again */
|
|
+ iteration = 1;
|
|
+ start_time.tv_sec = 0;
|
|
+ evwatch_free(prepare_callback_2_watcher);
|
|
+ evwatch_free(check_callback_2_watcher);
|
|
+ event_base_once(base, -1, EV_TIMEOUT, &timeout_callback, 0, &timeout);
|
|
+ event_base_dispatch(base);
|
|
+}
|
|
+
|
|
+static void
|
|
+prepare_callback_3(struct evwatch *watcher, const struct evwatch_prepare_cb_info *info, void *arg)
|
|
+{
|
|
+ /* timeout should not be written to */
|
|
+ struct timeval timeout = { 123, 456 };
|
|
+ tt_int_op(evwatch_prepare_get_timeout(info, &timeout), ==, 0);
|
|
+ tt_int_op(timeout.tv_sec, ==, 123);
|
|
+ tt_int_op(timeout.tv_usec, ==, 456);
|
|
+end:
|
|
+ ;
|
|
+}
|
|
+
|
|
+/**
|
|
+ Test that evwatch_prepare_get_timeout behaves correctly when there is no
|
|
+ timeout.
|
|
+ */
|
|
+static void
|
|
+test_timeout_unavailable(void *ptr)
|
|
+{
|
|
+ struct basic_test_data *data = ptr;
|
|
+ struct event_base *base = data->base;
|
|
+
|
|
+ evwatch_prepare_new(base, &prepare_callback_3, NULL);
|
|
+ event_base_dispatch(base);
|
|
+}
|
|
+
|
|
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
|
|
+static void *
|
|
+bad_malloc(size_t sz)
|
|
+{
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ Test that creating prepare and check watchers fails gracefully if we can't
|
|
+ allocate memory.
|
|
+ */
|
|
+static void
|
|
+test_malloc_failure(void *ptr)
|
|
+{
|
|
+ struct basic_test_data *data = ptr;
|
|
+ struct event_base *base = data->base;
|
|
+ struct evwatch *bad_prepare, *bad_check;
|
|
+
|
|
+ event_set_mem_functions(bad_malloc, realloc, free);
|
|
+ bad_prepare = evwatch_prepare_new(base, &prepare_callback_1, NULL);
|
|
+ tt_ptr_op(bad_prepare, ==, NULL);
|
|
+
|
|
+ bad_check = evwatch_check_new(base, &check_callback_1, NULL);
|
|
+ tt_ptr_op(bad_check, ==, NULL);
|
|
+
|
|
+ event_set_mem_functions(malloc, realloc, free);
|
|
+end:
|
|
+ ;
|
|
+}
|
|
+#endif
|
|
+
|
|
+struct testcase_t watch_testcases[] = {
|
|
+ BASIC(callback_ordering, TT_FORK|TT_NEED_BASE),
|
|
+ BASIC(timeout_unavailable, TT_FORK|TT_NEED_BASE),
|
|
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
|
|
+ BASIC(malloc_failure, TT_FORK|TT_NEED_BASE),
|
|
+#endif
|
|
+ END_OF_TESTCASES
|
|
+};
|
|
diff --git a/watch.c b/watch.c
|
|
new file mode 100644
|
|
index 00000000..645cbd9a
|
|
--- /dev/null
|
|
+++ b/watch.c
|
|
@@ -0,0 +1,82 @@
|
|
+/*
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The name of the author may not be used to endorse or promote products
|
|
+ * derived from this software without specific prior written permission.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#include "event2/watch.h"
|
|
+#include "event-internal.h"
|
|
+#include "evthread-internal.h"
|
|
+
|
|
+static inline struct evwatch *
|
|
+evwatch_new(struct event_base *base, union evwatch_cb callback, void *arg, unsigned type)
|
|
+{
|
|
+ struct evwatch *watcher = mm_malloc(sizeof(struct evwatch));
|
|
+ if (!watcher)
|
|
+ return NULL;
|
|
+ watcher->base = base;
|
|
+ watcher->type = type;
|
|
+ watcher->callback = callback;
|
|
+ watcher->arg = arg;
|
|
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
|
+ TAILQ_INSERT_TAIL(&base->watchers[type], watcher, next);
|
|
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
|
|
+ return watcher;
|
|
+}
|
|
+
|
|
+struct evwatch *
|
|
+evwatch_prepare_new(struct event_base *base, evwatch_prepare_cb callback, void *arg)
|
|
+{
|
|
+ union evwatch_cb cb = { .prepare = callback };
|
|
+ return evwatch_new(base, cb, arg, EVWATCH_PREPARE);
|
|
+}
|
|
+
|
|
+struct evwatch *
|
|
+evwatch_check_new(struct event_base *base, evwatch_check_cb callback, void *arg)
|
|
+{
|
|
+ union evwatch_cb cb = { .check = callback };
|
|
+ return evwatch_new(base, cb, arg, EVWATCH_CHECK);
|
|
+}
|
|
+
|
|
+struct event_base *
|
|
+evwatch_base(struct evwatch *watcher)
|
|
+{
|
|
+ return watcher->base;
|
|
+}
|
|
+
|
|
+void
|
|
+evwatch_free(struct evwatch *watcher)
|
|
+{
|
|
+ EVBASE_ACQUIRE_LOCK(watcher->base, th_base_lock);
|
|
+ TAILQ_REMOVE(&watcher->base->watchers[watcher->type], watcher, next);
|
|
+ EVBASE_RELEASE_LOCK(watcher->base, th_base_lock);
|
|
+ mm_free(watcher);
|
|
+}
|
|
+
|
|
+int
|
|
+evwatch_prepare_get_timeout(const struct evwatch_prepare_cb_info *info, struct timeval *timeout)
|
|
+{
|
|
+ if (info->timeout) {
|
|
+ *timeout = *(info->timeout);
|
|
+ return 1;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
--
|
|
2.23.0
|
|
|