Compare commits
58 Commits
pull-input
...
v2.4.0-rc2
Author | SHA1 | Date | |
---|---|---|---|
|
b69b30532e | ||
|
3edf6b3f1e | ||
|
a52b2cbf21 | ||
|
dc94bd9166 | ||
|
05e514b1d4 | ||
|
21a03d17f2 | ||
|
eabc977973 | ||
|
6493c975af | ||
|
12d69ac03b | ||
|
e4efd8a488 | ||
|
9990069758 | ||
|
b9c4630799 | ||
|
5f8343d067 | ||
|
774ee4772b | ||
|
57b73090e0 | ||
|
4b7a6bf402 | ||
|
a1bc040dab | ||
|
47c719964a | ||
|
bd03a38fdf | ||
|
625de449fc | ||
|
b49b8c572f | ||
|
da69028261 | ||
|
f63eab8bec | ||
|
80da311d81 | ||
|
fcf0cdc362 | ||
|
091f1f5296 | ||
|
09b61db7c1 | ||
|
5873281023 | ||
|
13566fe3e5 | ||
|
dcc8a3ab63 | ||
|
f73ca73634 | ||
|
bd09594603 | ||
|
f9d6dbf0bf | ||
|
38705bb57b | ||
|
9a2a66238e | ||
|
feb93f3617 | ||
|
d345ed2da3 | ||
|
75d663611e | ||
|
71358470ee | ||
|
621a20e081 | ||
|
5b5e8cdd7d | ||
|
92fdfa4bef | ||
|
86d7e214c2 | ||
|
cfda2cef3d | ||
|
fd1a9ef9c2 | ||
|
b4329bf41c | ||
|
b92304ee81 | ||
|
d3462e378f | ||
|
becaeb726a | ||
|
c6742b14fe | ||
|
24b41d66c8 | ||
|
5348c62cab | ||
|
586d2142a9 | ||
|
02dae26ac4 | ||
|
f8b3e48b2d | ||
|
67ff64e082 | ||
6110ce59af | |||
|
567161fdd4 |
20
aio-posix.c
20
aio-posix.c
@@ -233,26 +233,23 @@ static void add_pollfd(AioHandler *node)
|
|||||||
bool aio_poll(AioContext *ctx, bool blocking)
|
bool aio_poll(AioContext *ctx, bool blocking)
|
||||||
{
|
{
|
||||||
AioHandler *node;
|
AioHandler *node;
|
||||||
bool was_dispatching;
|
|
||||||
int i, ret;
|
int i, ret;
|
||||||
bool progress;
|
bool progress;
|
||||||
int64_t timeout;
|
int64_t timeout;
|
||||||
|
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
was_dispatching = ctx->dispatching;
|
|
||||||
progress = false;
|
progress = false;
|
||||||
|
|
||||||
/* aio_notify can avoid the expensive event_notifier_set if
|
/* aio_notify can avoid the expensive event_notifier_set if
|
||||||
* everything (file descriptors, bottom halves, timers) will
|
* everything (file descriptors, bottom halves, timers) will
|
||||||
* be re-evaluated before the next blocking poll(). This is
|
* be re-evaluated before the next blocking poll(). This is
|
||||||
* already true when aio_poll is called with blocking == false;
|
* already true when aio_poll is called with blocking == false;
|
||||||
* if blocking == true, it is only true after poll() returns.
|
* if blocking == true, it is only true after poll() returns,
|
||||||
*
|
* so disable the optimization now.
|
||||||
* If we're in a nested event loop, ctx->dispatching might be true.
|
|
||||||
* In that case we can restore it just before returning, but we
|
|
||||||
* have to clear it now.
|
|
||||||
*/
|
*/
|
||||||
aio_set_dispatching(ctx, !blocking);
|
if (blocking) {
|
||||||
|
atomic_add(&ctx->notify_me, 2);
|
||||||
|
}
|
||||||
|
|
||||||
ctx->walking_handlers++;
|
ctx->walking_handlers++;
|
||||||
|
|
||||||
@@ -272,10 +269,15 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
}
|
}
|
||||||
ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
|
ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
|
||||||
|
if (blocking) {
|
||||||
|
atomic_sub(&ctx->notify_me, 2);
|
||||||
|
}
|
||||||
if (timeout) {
|
if (timeout) {
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
aio_notify_accept(ctx);
|
||||||
|
|
||||||
/* if we have any readable fds, dispatch event */
|
/* if we have any readable fds, dispatch event */
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
for (i = 0; i < npfd; i++) {
|
for (i = 0; i < npfd; i++) {
|
||||||
@@ -287,12 +289,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||||||
ctx->walking_handlers--;
|
ctx->walking_handlers--;
|
||||||
|
|
||||||
/* Run dispatch even if there were no readable fds to run timers */
|
/* Run dispatch even if there were no readable fds to run timers */
|
||||||
aio_set_dispatching(ctx, true);
|
|
||||||
if (aio_dispatch(ctx)) {
|
if (aio_dispatch(ctx)) {
|
||||||
progress = true;
|
progress = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
aio_set_dispatching(ctx, was_dispatching);
|
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
|
|
||||||
return progress;
|
return progress;
|
||||||
|
48
aio-win32.c
48
aio-win32.c
@@ -279,30 +279,25 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||||||
{
|
{
|
||||||
AioHandler *node;
|
AioHandler *node;
|
||||||
HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
|
HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
|
||||||
bool was_dispatching, progress, have_select_revents, first;
|
bool progress, have_select_revents, first;
|
||||||
int count;
|
int count;
|
||||||
int timeout;
|
int timeout;
|
||||||
|
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
have_select_revents = aio_prepare(ctx);
|
|
||||||
if (have_select_revents) {
|
|
||||||
blocking = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
was_dispatching = ctx->dispatching;
|
|
||||||
progress = false;
|
progress = false;
|
||||||
|
|
||||||
/* aio_notify can avoid the expensive event_notifier_set if
|
/* aio_notify can avoid the expensive event_notifier_set if
|
||||||
* everything (file descriptors, bottom halves, timers) will
|
* everything (file descriptors, bottom halves, timers) will
|
||||||
* be re-evaluated before the next blocking poll(). This is
|
* be re-evaluated before the next blocking poll(). This is
|
||||||
* already true when aio_poll is called with blocking == false;
|
* already true when aio_poll is called with blocking == false;
|
||||||
* if blocking == true, it is only true after poll() returns.
|
* if blocking == true, it is only true after poll() returns,
|
||||||
*
|
* so disable the optimization now.
|
||||||
* If we're in a nested event loop, ctx->dispatching might be true.
|
|
||||||
* In that case we can restore it just before returning, but we
|
|
||||||
* have to clear it now.
|
|
||||||
*/
|
*/
|
||||||
aio_set_dispatching(ctx, !blocking);
|
if (blocking) {
|
||||||
|
atomic_add(&ctx->notify_me, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
have_select_revents = aio_prepare(ctx);
|
||||||
|
|
||||||
ctx->walking_handlers++;
|
ctx->walking_handlers++;
|
||||||
|
|
||||||
@@ -317,26 +312,36 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||||||
ctx->walking_handlers--;
|
ctx->walking_handlers--;
|
||||||
first = true;
|
first = true;
|
||||||
|
|
||||||
/* wait until next event */
|
/* ctx->notifier is always registered. */
|
||||||
while (count > 0) {
|
assert(count > 0);
|
||||||
|
|
||||||
|
/* Multiple iterations, all of them non-blocking except the first,
|
||||||
|
* may be necessary to process all pending events. After the first
|
||||||
|
* WaitForMultipleObjects call ctx->notify_me will be decremented.
|
||||||
|
*/
|
||||||
|
do {
|
||||||
HANDLE event;
|
HANDLE event;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
timeout = blocking
|
timeout = blocking && !have_select_revents
|
||||||
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
|
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
|
||||||
if (timeout) {
|
if (timeout) {
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
}
|
}
|
||||||
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
|
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
|
||||||
|
if (blocking) {
|
||||||
|
assert(first);
|
||||||
|
atomic_sub(&ctx->notify_me, 2);
|
||||||
|
}
|
||||||
if (timeout) {
|
if (timeout) {
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
}
|
}
|
||||||
aio_set_dispatching(ctx, true);
|
|
||||||
|
|
||||||
if (first && aio_bh_poll(ctx)) {
|
if (first) {
|
||||||
progress = true;
|
aio_notify_accept(ctx);
|
||||||
|
progress |= aio_bh_poll(ctx);
|
||||||
|
first = false;
|
||||||
}
|
}
|
||||||
first = false;
|
|
||||||
|
|
||||||
/* if we have any signaled events, dispatch event */
|
/* if we have any signaled events, dispatch event */
|
||||||
event = NULL;
|
event = NULL;
|
||||||
@@ -351,11 +356,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||||||
blocking = false;
|
blocking = false;
|
||||||
|
|
||||||
progress |= aio_dispatch_handlers(ctx, event);
|
progress |= aio_dispatch_handlers(ctx, event);
|
||||||
}
|
} while (count > 0);
|
||||||
|
|
||||||
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
||||||
|
|
||||||
aio_set_dispatching(ctx, was_dispatching);
|
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
return progress;
|
return progress;
|
||||||
}
|
}
|
||||||
|
35
async.c
35
async.c
@@ -184,6 +184,8 @@ aio_ctx_prepare(GSource *source, gint *timeout)
|
|||||||
{
|
{
|
||||||
AioContext *ctx = (AioContext *) source;
|
AioContext *ctx = (AioContext *) source;
|
||||||
|
|
||||||
|
atomic_or(&ctx->notify_me, 1);
|
||||||
|
|
||||||
/* We assume there is no timeout already supplied */
|
/* We assume there is no timeout already supplied */
|
||||||
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
|
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
|
||||||
|
|
||||||
@@ -200,6 +202,9 @@ aio_ctx_check(GSource *source)
|
|||||||
AioContext *ctx = (AioContext *) source;
|
AioContext *ctx = (AioContext *) source;
|
||||||
QEMUBH *bh;
|
QEMUBH *bh;
|
||||||
|
|
||||||
|
atomic_and(&ctx->notify_me, ~1);
|
||||||
|
aio_notify_accept(ctx);
|
||||||
|
|
||||||
for (bh = ctx->first_bh; bh; bh = bh->next) {
|
for (bh = ctx->first_bh; bh; bh = bh->next) {
|
||||||
if (!bh->deleted && bh->scheduled) {
|
if (!bh->deleted && bh->scheduled) {
|
||||||
return true;
|
return true;
|
||||||
@@ -254,24 +259,22 @@ ThreadPool *aio_get_thread_pool(AioContext *ctx)
|
|||||||
return ctx->thread_pool;
|
return ctx->thread_pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
void aio_set_dispatching(AioContext *ctx, bool dispatching)
|
void aio_notify(AioContext *ctx)
|
||||||
{
|
{
|
||||||
ctx->dispatching = dispatching;
|
/* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
|
||||||
if (!dispatching) {
|
* with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
|
||||||
/* Write ctx->dispatching before reading e.g. bh->scheduled.
|
*/
|
||||||
* Optimization: this is only needed when we're entering the "unsafe"
|
smp_mb();
|
||||||
* phase where other threads must call event_notifier_set.
|
if (ctx->notify_me) {
|
||||||
*/
|
event_notifier_set(&ctx->notifier);
|
||||||
smp_mb();
|
atomic_mb_set(&ctx->notified, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void aio_notify(AioContext *ctx)
|
void aio_notify_accept(AioContext *ctx)
|
||||||
{
|
{
|
||||||
/* Write e.g. bh->scheduled before reading ctx->dispatching. */
|
if (atomic_xchg(&ctx->notified, false)) {
|
||||||
smp_mb();
|
event_notifier_test_and_clear(&ctx->notifier);
|
||||||
if (!ctx->dispatching) {
|
|
||||||
event_notifier_set(&ctx->notifier);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -286,6 +289,10 @@ static void aio_rfifolock_cb(void *opaque)
|
|||||||
aio_notify(opaque);
|
aio_notify(opaque);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void event_notifier_dummy_cb(EventNotifier *e)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
AioContext *aio_context_new(Error **errp)
|
AioContext *aio_context_new(Error **errp)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@@ -300,7 +307,7 @@ AioContext *aio_context_new(Error **errp)
|
|||||||
g_source_set_can_recurse(&ctx->source, true);
|
g_source_set_can_recurse(&ctx->source, true);
|
||||||
aio_set_event_notifier(ctx, &ctx->notifier,
|
aio_set_event_notifier(ctx, &ctx->notifier,
|
||||||
(EventNotifierHandler *)
|
(EventNotifierHandler *)
|
||||||
event_notifier_test_and_clear);
|
event_notifier_dummy_cb);
|
||||||
ctx->thread_pool = NULL;
|
ctx->thread_pool = NULL;
|
||||||
qemu_mutex_init(&ctx->bh_lock);
|
qemu_mutex_init(&ctx->bh_lock);
|
||||||
rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
|
rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
|
||||||
|
@@ -388,7 +388,7 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||||||
MirrorBlockJob *s = opaque;
|
MirrorBlockJob *s = opaque;
|
||||||
MirrorExitData *data;
|
MirrorExitData *data;
|
||||||
BlockDriverState *bs = s->common.bs;
|
BlockDriverState *bs = s->common.bs;
|
||||||
int64_t sector_num, end, sectors_per_chunk, length;
|
int64_t sector_num, end, length;
|
||||||
uint64_t last_pause_ns;
|
uint64_t last_pause_ns;
|
||||||
BlockDriverInfo bdi;
|
BlockDriverInfo bdi;
|
||||||
char backing_filename[2]; /* we only need 2 characters because we are only
|
char backing_filename[2]; /* we only need 2 characters because we are only
|
||||||
@@ -442,7 +442,6 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||||||
goto immediate_exit;
|
goto immediate_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
|
|
||||||
mirror_free_init(s);
|
mirror_free_init(s);
|
||||||
|
|
||||||
last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||||
@@ -450,7 +449,9 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||||||
/* First part, loop on the sectors and initialize the dirty bitmap. */
|
/* First part, loop on the sectors and initialize the dirty bitmap. */
|
||||||
BlockDriverState *base = s->base;
|
BlockDriverState *base = s->base;
|
||||||
for (sector_num = 0; sector_num < end; ) {
|
for (sector_num = 0; sector_num < end; ) {
|
||||||
int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
|
/* Just to make sure we are not exceeding int limit. */
|
||||||
|
int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
|
||||||
|
end - sector_num);
|
||||||
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||||
|
|
||||||
if (now - last_pause_ns > SLICE_TIME) {
|
if (now - last_pause_ns > SLICE_TIME) {
|
||||||
@@ -462,8 +463,7 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||||||
goto immediate_exit;
|
goto immediate_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = bdrv_is_allocated_above(bs, base,
|
ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
|
||||||
sector_num, next - sector_num, &n);
|
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto immediate_exit;
|
goto immediate_exit;
|
||||||
@@ -472,10 +472,8 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||||||
assert(n > 0);
|
assert(n > 0);
|
||||||
if (ret == 1) {
|
if (ret == 1) {
|
||||||
bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
|
bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
|
||||||
sector_num = next;
|
|
||||||
} else {
|
|
||||||
sector_num += n;
|
|
||||||
}
|
}
|
||||||
|
sector_num += n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
4
configure
vendored
4
configure
vendored
@@ -2183,6 +2183,7 @@ if test "$gnutls_nettle" != "no"; then
|
|||||||
if $pkg_config --exists "nettle"; then
|
if $pkg_config --exists "nettle"; then
|
||||||
nettle_cflags=`$pkg_config --cflags nettle`
|
nettle_cflags=`$pkg_config --cflags nettle`
|
||||||
nettle_libs=`$pkg_config --libs nettle`
|
nettle_libs=`$pkg_config --libs nettle`
|
||||||
|
nettle_version=`$pkg_config --modversion nettle`
|
||||||
libs_softmmu="$nettle_libs $libs_softmmu"
|
libs_softmmu="$nettle_libs $libs_softmmu"
|
||||||
libs_tools="$nettle_libs $libs_tools"
|
libs_tools="$nettle_libs $libs_tools"
|
||||||
QEMU_CFLAGS="$QEMU_CFLAGS $nettle_cflags"
|
QEMU_CFLAGS="$QEMU_CFLAGS $nettle_cflags"
|
||||||
@@ -4490,7 +4491,7 @@ echo "GTK support $gtk"
|
|||||||
echo "GNUTLS support $gnutls"
|
echo "GNUTLS support $gnutls"
|
||||||
echo "GNUTLS hash $gnutls_hash"
|
echo "GNUTLS hash $gnutls_hash"
|
||||||
echo "GNUTLS gcrypt $gnutls_gcrypt"
|
echo "GNUTLS gcrypt $gnutls_gcrypt"
|
||||||
echo "GNUTLS nettle $gnutls_nettle"
|
echo "GNUTLS nettle $gnutls_nettle ${gnutls_nettle+($nettle_version)}"
|
||||||
echo "VTE support $vte"
|
echo "VTE support $vte"
|
||||||
echo "curses support $curses"
|
echo "curses support $curses"
|
||||||
echo "curl support $curl"
|
echo "curl support $curl"
|
||||||
@@ -4858,6 +4859,7 @@ if test "$gnutls_gcrypt" = "yes" ; then
|
|||||||
fi
|
fi
|
||||||
if test "$gnutls_nettle" = "yes" ; then
|
if test "$gnutls_nettle" = "yes" ; then
|
||||||
echo "CONFIG_GNUTLS_NETTLE=y" >> $config_host_mak
|
echo "CONFIG_GNUTLS_NETTLE=y" >> $config_host_mak
|
||||||
|
echo "CONFIG_NETTLE_VERSION_MAJOR=${nettle_version%%.*}" >> $config_host_mak
|
||||||
fi
|
fi
|
||||||
if test "$vte" = "yes" ; then
|
if test "$vte" = "yes" ; then
|
||||||
echo "CONFIG_VTE=y" >> $config_host_mak
|
echo "CONFIG_VTE=y" >> $config_host_mak
|
||||||
|
@@ -23,12 +23,51 @@
|
|||||||
#include <nettle/des.h>
|
#include <nettle/des.h>
|
||||||
#include <nettle/cbc.h>
|
#include <nettle/cbc.h>
|
||||||
|
|
||||||
|
#if CONFIG_NETTLE_VERSION_MAJOR < 3
|
||||||
|
typedef nettle_crypt_func nettle_cipher_func;
|
||||||
|
|
||||||
|
typedef void * cipher_ctx_t;
|
||||||
|
typedef unsigned cipher_length_t;
|
||||||
|
#else
|
||||||
|
typedef const void * cipher_ctx_t;
|
||||||
|
typedef size_t cipher_length_t;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static nettle_cipher_func aes_encrypt_wrapper;
|
||||||
|
static nettle_cipher_func aes_decrypt_wrapper;
|
||||||
|
static nettle_cipher_func des_encrypt_wrapper;
|
||||||
|
static nettle_cipher_func des_decrypt_wrapper;
|
||||||
|
|
||||||
|
static void aes_encrypt_wrapper(cipher_ctx_t ctx, cipher_length_t length,
|
||||||
|
uint8_t *dst, const uint8_t *src)
|
||||||
|
{
|
||||||
|
aes_encrypt(ctx, length, dst, src);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void aes_decrypt_wrapper(cipher_ctx_t ctx, cipher_length_t length,
|
||||||
|
uint8_t *dst, const uint8_t *src)
|
||||||
|
{
|
||||||
|
aes_decrypt(ctx, length, dst, src);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void des_encrypt_wrapper(cipher_ctx_t ctx, cipher_length_t length,
|
||||||
|
uint8_t *dst, const uint8_t *src)
|
||||||
|
{
|
||||||
|
des_encrypt(ctx, length, dst, src);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void des_decrypt_wrapper(cipher_ctx_t ctx, cipher_length_t length,
|
||||||
|
uint8_t *dst, const uint8_t *src)
|
||||||
|
{
|
||||||
|
des_decrypt(ctx, length, dst, src);
|
||||||
|
}
|
||||||
|
|
||||||
typedef struct QCryptoCipherNettle QCryptoCipherNettle;
|
typedef struct QCryptoCipherNettle QCryptoCipherNettle;
|
||||||
struct QCryptoCipherNettle {
|
struct QCryptoCipherNettle {
|
||||||
void *ctx_encrypt;
|
void *ctx_encrypt;
|
||||||
void *ctx_decrypt;
|
void *ctx_decrypt;
|
||||||
nettle_crypt_func *alg_encrypt;
|
nettle_cipher_func *alg_encrypt;
|
||||||
nettle_crypt_func *alg_decrypt;
|
nettle_cipher_func *alg_decrypt;
|
||||||
uint8_t *iv;
|
uint8_t *iv;
|
||||||
size_t niv;
|
size_t niv;
|
||||||
};
|
};
|
||||||
@@ -83,8 +122,8 @@ QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg,
|
|||||||
des_set_key(ctx->ctx_encrypt, rfbkey);
|
des_set_key(ctx->ctx_encrypt, rfbkey);
|
||||||
g_free(rfbkey);
|
g_free(rfbkey);
|
||||||
|
|
||||||
ctx->alg_encrypt = (nettle_crypt_func *)des_encrypt;
|
ctx->alg_encrypt = des_encrypt_wrapper;
|
||||||
ctx->alg_decrypt = (nettle_crypt_func *)des_decrypt;
|
ctx->alg_decrypt = des_decrypt_wrapper;
|
||||||
|
|
||||||
ctx->niv = DES_BLOCK_SIZE;
|
ctx->niv = DES_BLOCK_SIZE;
|
||||||
break;
|
break;
|
||||||
@@ -98,8 +137,8 @@ QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg,
|
|||||||
aes_set_encrypt_key(ctx->ctx_encrypt, nkey, key);
|
aes_set_encrypt_key(ctx->ctx_encrypt, nkey, key);
|
||||||
aes_set_decrypt_key(ctx->ctx_decrypt, nkey, key);
|
aes_set_decrypt_key(ctx->ctx_decrypt, nkey, key);
|
||||||
|
|
||||||
ctx->alg_encrypt = (nettle_crypt_func *)aes_encrypt;
|
ctx->alg_encrypt = aes_encrypt_wrapper;
|
||||||
ctx->alg_decrypt = (nettle_crypt_func *)aes_decrypt;
|
ctx->alg_decrypt = aes_decrypt_wrapper;
|
||||||
|
|
||||||
ctx->niv = AES_BLOCK_SIZE;
|
ctx->niv = AES_BLOCK_SIZE;
|
||||||
break;
|
break;
|
||||||
|
@@ -42,7 +42,7 @@ public:
|
|||||||
stream_ = stream;
|
stream_ = stream;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetPrintf(int (*printf_fn)(FILE *, const char *, ...)) {
|
void SetPrintf(fprintf_function printf_fn) {
|
||||||
printf_ = printf_fn;
|
printf_ = printf_fn;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ protected:
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int (*printf_)(FILE *, const char *, ...);
|
fprintf_function printf_;
|
||||||
FILE *stream_;
|
FILE *stream_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* This model describes the interaction between aio_set_dispatching()
|
* This model describes the interaction between ctx->notify_me
|
||||||
* and aio_notify().
|
* and aio_notify().
|
||||||
*
|
*
|
||||||
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||||
@@ -14,57 +14,53 @@
|
|||||||
* spin -a docs/aio_notify.promela
|
* spin -a docs/aio_notify.promela
|
||||||
* gcc -O2 pan.c
|
* gcc -O2 pan.c
|
||||||
* ./a.out -a
|
* ./a.out -a
|
||||||
|
*
|
||||||
|
* To verify it (with a bug planted in the model):
|
||||||
|
* spin -a -DBUG docs/aio_notify.promela
|
||||||
|
* gcc -O2 pan.c
|
||||||
|
* ./a.out -a
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define MAX 4
|
#define MAX 4
|
||||||
#define LAST (1 << (MAX - 1))
|
#define LAST (1 << (MAX - 1))
|
||||||
#define FINAL ((LAST << 1) - 1)
|
#define FINAL ((LAST << 1) - 1)
|
||||||
|
|
||||||
bool dispatching;
|
bool notify_me;
|
||||||
bool event;
|
bool event;
|
||||||
|
|
||||||
int req, done;
|
int req;
|
||||||
|
int done;
|
||||||
|
|
||||||
active proctype waiter()
|
active proctype waiter()
|
||||||
{
|
{
|
||||||
int fetch, blocking;
|
int fetch;
|
||||||
|
|
||||||
do
|
do
|
||||||
:: done != FINAL -> {
|
:: true -> {
|
||||||
// Computing "blocking" is separate from execution of the
|
notify_me++;
|
||||||
// "bottom half"
|
|
||||||
blocking = (req == 0);
|
if
|
||||||
|
#ifndef BUG
|
||||||
|
:: (req > 0) -> skip;
|
||||||
|
#endif
|
||||||
|
:: else ->
|
||||||
|
// Wait for a nudge from the other side
|
||||||
|
do
|
||||||
|
:: event == 1 -> { event = 0; break; }
|
||||||
|
od;
|
||||||
|
fi;
|
||||||
|
|
||||||
|
notify_me--;
|
||||||
|
|
||||||
// This is our "bottom half"
|
|
||||||
atomic { fetch = req; req = 0; }
|
atomic { fetch = req; req = 0; }
|
||||||
done = done | fetch;
|
done = done | fetch;
|
||||||
|
|
||||||
// Wait for a nudge from the other side
|
|
||||||
do
|
|
||||||
:: event == 1 -> { event = 0; break; }
|
|
||||||
:: !blocking -> break;
|
|
||||||
od;
|
|
||||||
|
|
||||||
dispatching = 1;
|
|
||||||
|
|
||||||
// If you are simulating this model, you may want to add
|
|
||||||
// something like this here:
|
|
||||||
//
|
|
||||||
// int foo; foo++; foo++; foo++;
|
|
||||||
//
|
|
||||||
// This only wastes some time and makes it more likely
|
|
||||||
// that the notifier process hits the "fast path".
|
|
||||||
|
|
||||||
dispatching = 0;
|
|
||||||
}
|
}
|
||||||
:: else -> break;
|
|
||||||
od
|
od
|
||||||
}
|
}
|
||||||
|
|
||||||
active proctype notifier()
|
active proctype notifier()
|
||||||
{
|
{
|
||||||
int next = 1;
|
int next = 1;
|
||||||
int sets = 0;
|
|
||||||
|
|
||||||
do
|
do
|
||||||
:: next <= LAST -> {
|
:: next <= LAST -> {
|
||||||
@@ -74,8 +70,8 @@ active proctype notifier()
|
|||||||
|
|
||||||
// aio_notify
|
// aio_notify
|
||||||
if
|
if
|
||||||
:: dispatching == 0 -> sets++; event = 1;
|
:: notify_me == 1 -> event = 1;
|
||||||
:: else -> skip;
|
:: else -> printf("Skipped event_notifier_set\n"); skip;
|
||||||
fi;
|
fi;
|
||||||
|
|
||||||
// Test both synchronous and asynchronous delivery
|
// Test both synchronous and asynchronous delivery
|
||||||
@@ -86,19 +82,12 @@ active proctype notifier()
|
|||||||
:: 1 -> skip;
|
:: 1 -> skip;
|
||||||
fi;
|
fi;
|
||||||
}
|
}
|
||||||
:: else -> break;
|
|
||||||
od;
|
od;
|
||||||
printf("Skipped %d event_notifier_set\n", MAX - sets);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define p (done == FINAL)
|
never { /* [] done < FINAL */
|
||||||
|
accept_init:
|
||||||
never {
|
do
|
||||||
do
|
:: done < FINAL -> skip;
|
||||||
:: 1 // after an arbitrarily long prefix
|
od;
|
||||||
:: p -> break // p becomes true
|
|
||||||
od;
|
|
||||||
do
|
|
||||||
:: !p -> accept: break // it then must remains true forever after
|
|
||||||
od
|
|
||||||
}
|
}
|
||||||
|
152
docs/aio_notify_accept.promela
Normal file
152
docs/aio_notify_accept.promela
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
/*
|
||||||
|
* This model describes the interaction between ctx->notified
|
||||||
|
* and ctx->notifier.
|
||||||
|
*
|
||||||
|
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||||
|
*
|
||||||
|
* This file is in the public domain. If you really want a license,
|
||||||
|
* the WTFPL will do.
|
||||||
|
*
|
||||||
|
* To verify the buggy version:
|
||||||
|
* spin -a -DBUG1 docs/aio_notify_bug.promela
|
||||||
|
* gcc -O2 pan.c
|
||||||
|
* ./a.out -a -f
|
||||||
|
* (or -DBUG2)
|
||||||
|
*
|
||||||
|
* To verify the fixed version:
|
||||||
|
* spin -a docs/aio_notify_bug.promela
|
||||||
|
* gcc -O2 pan.c
|
||||||
|
* ./a.out -a -f
|
||||||
|
*
|
||||||
|
* Add -DCHECK_REQ to test an alternative invariant and the
|
||||||
|
* "notify_me" optimization.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int notify_me;
|
||||||
|
bool notified;
|
||||||
|
bool event;
|
||||||
|
bool req;
|
||||||
|
bool notifier_done;
|
||||||
|
|
||||||
|
#ifdef CHECK_REQ
|
||||||
|
#define USE_NOTIFY_ME 1
|
||||||
|
#else
|
||||||
|
#define USE_NOTIFY_ME 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef BUG
|
||||||
|
#error Please define BUG1 or BUG2 instead.
|
||||||
|
#endif
|
||||||
|
|
||||||
|
active proctype notifier()
|
||||||
|
{
|
||||||
|
do
|
||||||
|
:: true -> {
|
||||||
|
req = 1;
|
||||||
|
if
|
||||||
|
:: !USE_NOTIFY_ME || notify_me ->
|
||||||
|
#if defined BUG1
|
||||||
|
/* CHECK_REQ does not detect this bug! */
|
||||||
|
notified = 1;
|
||||||
|
event = 1;
|
||||||
|
#elif defined BUG2
|
||||||
|
if
|
||||||
|
:: !notified -> event = 1;
|
||||||
|
:: else -> skip;
|
||||||
|
fi;
|
||||||
|
notified = 1;
|
||||||
|
#else
|
||||||
|
event = 1;
|
||||||
|
notified = 1;
|
||||||
|
#endif
|
||||||
|
:: else -> skip;
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
:: true -> break;
|
||||||
|
od;
|
||||||
|
notifier_done = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define AIO_POLL \
|
||||||
|
notify_me++; \
|
||||||
|
if \
|
||||||
|
:: !req -> { \
|
||||||
|
if \
|
||||||
|
:: event -> skip; \
|
||||||
|
fi; \
|
||||||
|
} \
|
||||||
|
:: else -> skip; \
|
||||||
|
fi; \
|
||||||
|
notify_me--; \
|
||||||
|
\
|
||||||
|
atomic { old = notified; notified = 0; } \
|
||||||
|
if \
|
||||||
|
:: old -> event = 0; \
|
||||||
|
:: else -> skip; \
|
||||||
|
fi; \
|
||||||
|
\
|
||||||
|
req = 0;
|
||||||
|
|
||||||
|
active proctype waiter()
|
||||||
|
{
|
||||||
|
bool old;
|
||||||
|
|
||||||
|
do
|
||||||
|
:: true -> AIO_POLL;
|
||||||
|
od;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Same as waiter(), but disappears after a while. */
|
||||||
|
active proctype temporary_waiter()
|
||||||
|
{
|
||||||
|
bool old;
|
||||||
|
|
||||||
|
do
|
||||||
|
:: true -> AIO_POLL;
|
||||||
|
:: true -> break;
|
||||||
|
od;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CHECK_REQ
|
||||||
|
never {
|
||||||
|
do
|
||||||
|
:: req -> goto accept_if_req_not_eventually_false;
|
||||||
|
:: true -> skip;
|
||||||
|
od;
|
||||||
|
|
||||||
|
accept_if_req_not_eventually_false:
|
||||||
|
if
|
||||||
|
:: req -> goto accept_if_req_not_eventually_false;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
/* There must be infinitely many transitions of event as long
|
||||||
|
* as the notifier does not exit.
|
||||||
|
*
|
||||||
|
* If event stayed always true, the waiters would be busy looping.
|
||||||
|
* If event stayed always false, the waiters would be sleeping
|
||||||
|
* forever.
|
||||||
|
*/
|
||||||
|
never {
|
||||||
|
do
|
||||||
|
:: !event -> goto accept_if_event_not_eventually_true;
|
||||||
|
:: event -> goto accept_if_event_not_eventually_false;
|
||||||
|
:: true -> skip;
|
||||||
|
od;
|
||||||
|
|
||||||
|
accept_if_event_not_eventually_true:
|
||||||
|
if
|
||||||
|
:: !event && notifier_done -> do :: true -> skip; od;
|
||||||
|
:: !event && !notifier_done -> goto accept_if_event_not_eventually_true;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
|
||||||
|
accept_if_event_not_eventually_false:
|
||||||
|
if
|
||||||
|
:: event -> goto accept_if_event_not_eventually_false;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
#endif
|
140
docs/aio_notify_bug.promela
Normal file
140
docs/aio_notify_bug.promela
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
/*
|
||||||
|
* This model describes a bug in aio_notify. If ctx->notifier is
|
||||||
|
* cleared too late, a wakeup could be lost.
|
||||||
|
*
|
||||||
|
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||||
|
*
|
||||||
|
* This file is in the public domain. If you really want a license,
|
||||||
|
* the WTFPL will do.
|
||||||
|
*
|
||||||
|
* To verify the buggy version:
|
||||||
|
* spin -a -DBUG docs/aio_notify_bug.promela
|
||||||
|
* gcc -O2 pan.c
|
||||||
|
* ./a.out -a -f
|
||||||
|
*
|
||||||
|
* To verify the fixed version:
|
||||||
|
* spin -a docs/aio_notify_bug.promela
|
||||||
|
* gcc -O2 pan.c
|
||||||
|
* ./a.out -a -f
|
||||||
|
*
|
||||||
|
* Add -DCHECK_REQ to test an alternative invariant and the
|
||||||
|
* "notify_me" optimization.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int notify_me;
|
||||||
|
bool event;
|
||||||
|
bool req;
|
||||||
|
bool notifier_done;
|
||||||
|
|
||||||
|
#ifdef CHECK_REQ
|
||||||
|
#define USE_NOTIFY_ME 1
|
||||||
|
#else
|
||||||
|
#define USE_NOTIFY_ME 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
active proctype notifier()
|
||||||
|
{
|
||||||
|
do
|
||||||
|
:: true -> {
|
||||||
|
req = 1;
|
||||||
|
if
|
||||||
|
:: !USE_NOTIFY_ME || notify_me -> event = 1;
|
||||||
|
:: else -> skip;
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
:: true -> break;
|
||||||
|
od;
|
||||||
|
notifier_done = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef BUG
|
||||||
|
#define AIO_POLL \
|
||||||
|
notify_me++; \
|
||||||
|
if \
|
||||||
|
:: !req -> { \
|
||||||
|
if \
|
||||||
|
:: event -> skip; \
|
||||||
|
fi; \
|
||||||
|
} \
|
||||||
|
:: else -> skip; \
|
||||||
|
fi; \
|
||||||
|
notify_me--; \
|
||||||
|
\
|
||||||
|
req = 0; \
|
||||||
|
event = 0;
|
||||||
|
#else
|
||||||
|
#define AIO_POLL \
|
||||||
|
notify_me++; \
|
||||||
|
if \
|
||||||
|
:: !req -> { \
|
||||||
|
if \
|
||||||
|
:: event -> skip; \
|
||||||
|
fi; \
|
||||||
|
} \
|
||||||
|
:: else -> skip; \
|
||||||
|
fi; \
|
||||||
|
notify_me--; \
|
||||||
|
\
|
||||||
|
event = 0; \
|
||||||
|
req = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
active proctype waiter()
|
||||||
|
{
|
||||||
|
do
|
||||||
|
:: true -> AIO_POLL;
|
||||||
|
od;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Same as waiter(), but disappears after a while. */
|
||||||
|
active proctype temporary_waiter()
|
||||||
|
{
|
||||||
|
do
|
||||||
|
:: true -> AIO_POLL;
|
||||||
|
:: true -> break;
|
||||||
|
od;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CHECK_REQ
|
||||||
|
never {
|
||||||
|
do
|
||||||
|
:: req -> goto accept_if_req_not_eventually_false;
|
||||||
|
:: true -> skip;
|
||||||
|
od;
|
||||||
|
|
||||||
|
accept_if_req_not_eventually_false:
|
||||||
|
if
|
||||||
|
:: req -> goto accept_if_req_not_eventually_false;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
/* There must be infinitely many transitions of event as long
|
||||||
|
* as the notifier does not exit.
|
||||||
|
*
|
||||||
|
* If event stayed always true, the waiters would be busy looping.
|
||||||
|
* If event stayed always false, the waiters would be sleeping
|
||||||
|
* forever.
|
||||||
|
*/
|
||||||
|
never {
|
||||||
|
do
|
||||||
|
:: !event -> goto accept_if_event_not_eventually_true;
|
||||||
|
:: event -> goto accept_if_event_not_eventually_false;
|
||||||
|
:: true -> skip;
|
||||||
|
od;
|
||||||
|
|
||||||
|
accept_if_event_not_eventually_true:
|
||||||
|
if
|
||||||
|
:: !event && notifier_done -> do :: true -> skip; od;
|
||||||
|
:: !event && !notifier_done -> goto accept_if_event_not_eventually_true;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
|
||||||
|
accept_if_event_not_eventually_false:
|
||||||
|
if
|
||||||
|
:: event -> goto accept_if_event_not_eventually_false;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
#endif
|
@@ -127,11 +127,6 @@ in the ancillary data:
|
|||||||
If Master is unable to send the full message or receives a wrong reply it will
|
If Master is unable to send the full message or receives a wrong reply it will
|
||||||
close the connection. An optional reconnection mechanism can be implemented.
|
close the connection. An optional reconnection mechanism can be implemented.
|
||||||
|
|
||||||
Multi queue support
|
|
||||||
-------------------
|
|
||||||
The protocol supports multiple queues by setting all index fields in the sent
|
|
||||||
messages to a properly calculated value.
|
|
||||||
|
|
||||||
Message types
|
Message types
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
|
@@ -206,9 +206,6 @@ const VMStateDescription vmstate_ich9_pm = {
|
|||||||
},
|
},
|
||||||
.subsections = (const VMStateDescription*[]) {
|
.subsections = (const VMStateDescription*[]) {
|
||||||
&vmstate_memhp_state,
|
&vmstate_memhp_state,
|
||||||
NULL
|
|
||||||
},
|
|
||||||
.subsections = (const VMStateDescription*[]) {
|
|
||||||
&vmstate_tco_io_state,
|
&vmstate_tco_io_state,
|
||||||
NULL
|
NULL
|
||||||
}
|
}
|
||||||
|
@@ -187,11 +187,6 @@ static void eth_rx_desc_get(uint32_t addr, mv88w8618_rx_desc *desc)
|
|||||||
le32_to_cpus(&desc->next);
|
le32_to_cpus(&desc->next);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int eth_can_receive(NetClientState *nc)
|
|
||||||
{
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size)
|
static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size)
|
||||||
{
|
{
|
||||||
mv88w8618_eth_state *s = qemu_get_nic_opaque(nc);
|
mv88w8618_eth_state *s = qemu_get_nic_opaque(nc);
|
||||||
@@ -381,7 +376,6 @@ static void eth_cleanup(NetClientState *nc)
|
|||||||
static NetClientInfo net_mv88w8618_info = {
|
static NetClientInfo net_mv88w8618_info = {
|
||||||
.type = NET_CLIENT_OPTIONS_KIND_NIC,
|
.type = NET_CLIENT_OPTIONS_KIND_NIC,
|
||||||
.size = sizeof(NICState),
|
.size = sizeof(NICState),
|
||||||
.can_receive = eth_can_receive,
|
|
||||||
.receive = eth_receive,
|
.receive = eth_receive,
|
||||||
.cleanup = eth_cleanup,
|
.cleanup = eth_cleanup,
|
||||||
};
|
};
|
||||||
|
@@ -144,6 +144,7 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
|
|||||||
} else {
|
} else {
|
||||||
s->boot_cpu_ptr = &s->apu_cpu[i];
|
s->boot_cpu_ptr = &s->apu_cpu[i];
|
||||||
}
|
}
|
||||||
|
g_free(name);
|
||||||
|
|
||||||
object_property_set_int(OBJECT(&s->apu_cpu[i]), GIC_BASE_ADDR,
|
object_property_set_int(OBJECT(&s->apu_cpu[i]), GIC_BASE_ADDR,
|
||||||
"reset-cbar", &err);
|
"reset-cbar", &err);
|
||||||
@@ -181,6 +182,7 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
|
|||||||
} else {
|
} else {
|
||||||
s->boot_cpu_ptr = &s->rpu_cpu[i];
|
s->boot_cpu_ptr = &s->rpu_cpu[i];
|
||||||
}
|
}
|
||||||
|
g_free(name);
|
||||||
|
|
||||||
object_property_set_bool(OBJECT(&s->rpu_cpu[i]), true, "reset-hivecs",
|
object_property_set_bool(OBJECT(&s->rpu_cpu[i]), true, "reset-hivecs",
|
||||||
&err);
|
&err);
|
||||||
|
@@ -271,6 +271,11 @@ static void qxl_spice_monitors_config_async(PCIQXLDevice *qxl, int replay)
|
|||||||
QXL_COOKIE_TYPE_POST_LOAD_MONITORS_CONFIG,
|
QXL_COOKIE_TYPE_POST_LOAD_MONITORS_CONFIG,
|
||||||
0));
|
0));
|
||||||
} else {
|
} else {
|
||||||
|
#if SPICE_SERVER_VERSION >= 0x000c06 /* release 0.12.6 */
|
||||||
|
if (qxl->max_outputs) {
|
||||||
|
spice_qxl_set_max_monitors(&qxl->ssd.qxl, qxl->max_outputs);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
qxl->guest_monitors_config = qxl->ram->monitors_config;
|
qxl->guest_monitors_config = qxl->ram->monitors_config;
|
||||||
spice_qxl_monitors_config_async(&qxl->ssd.qxl,
|
spice_qxl_monitors_config_async(&qxl->ssd.qxl,
|
||||||
qxl->ram->monitors_config,
|
qxl->ram->monitors_config,
|
||||||
@@ -991,6 +996,7 @@ static int interface_client_monitors_config(QXLInstance *sin,
|
|||||||
PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl);
|
PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl);
|
||||||
QXLRom *rom = memory_region_get_ram_ptr(&qxl->rom_bar);
|
QXLRom *rom = memory_region_get_ram_ptr(&qxl->rom_bar);
|
||||||
int i;
|
int i;
|
||||||
|
unsigned max_outputs = ARRAY_SIZE(rom->client_monitors_config.heads);
|
||||||
|
|
||||||
if (qxl->revision < 4) {
|
if (qxl->revision < 4) {
|
||||||
trace_qxl_client_monitors_config_unsupported_by_device(qxl->id,
|
trace_qxl_client_monitors_config_unsupported_by_device(qxl->id,
|
||||||
@@ -1013,17 +1019,23 @@ static int interface_client_monitors_config(QXLInstance *sin,
|
|||||||
if (!monitors_config) {
|
if (!monitors_config) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if SPICE_SERVER_VERSION >= 0x000c06 /* release 0.12.6 */
|
||||||
|
/* limit number of outputs based on setting limit */
|
||||||
|
if (qxl->max_outputs && qxl->max_outputs <= max_outputs) {
|
||||||
|
max_outputs = qxl->max_outputs;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
memset(&rom->client_monitors_config, 0,
|
memset(&rom->client_monitors_config, 0,
|
||||||
sizeof(rom->client_monitors_config));
|
sizeof(rom->client_monitors_config));
|
||||||
rom->client_monitors_config.count = monitors_config->num_of_monitors;
|
rom->client_monitors_config.count = monitors_config->num_of_monitors;
|
||||||
/* monitors_config->flags ignored */
|
/* monitors_config->flags ignored */
|
||||||
if (rom->client_monitors_config.count >=
|
if (rom->client_monitors_config.count >= max_outputs) {
|
||||||
ARRAY_SIZE(rom->client_monitors_config.heads)) {
|
|
||||||
trace_qxl_client_monitors_config_capped(qxl->id,
|
trace_qxl_client_monitors_config_capped(qxl->id,
|
||||||
monitors_config->num_of_monitors,
|
monitors_config->num_of_monitors,
|
||||||
ARRAY_SIZE(rom->client_monitors_config.heads));
|
max_outputs);
|
||||||
rom->client_monitors_config.count =
|
rom->client_monitors_config.count = max_outputs;
|
||||||
ARRAY_SIZE(rom->client_monitors_config.heads);
|
|
||||||
}
|
}
|
||||||
for (i = 0 ; i < rom->client_monitors_config.count ; ++i) {
|
for (i = 0 ; i < rom->client_monitors_config.count ; ++i) {
|
||||||
VDAgentMonConfig *monitor = &monitors_config->monitors[i];
|
VDAgentMonConfig *monitor = &monitors_config->monitors[i];
|
||||||
@@ -2274,6 +2286,9 @@ static Property qxl_properties[] = {
|
|||||||
DEFINE_PROP_UINT32("vram64_size_mb", PCIQXLDevice, vram_size_mb, -1),
|
DEFINE_PROP_UINT32("vram64_size_mb", PCIQXLDevice, vram_size_mb, -1),
|
||||||
DEFINE_PROP_UINT32("vgamem_mb", PCIQXLDevice, vgamem_size_mb, 16),
|
DEFINE_PROP_UINT32("vgamem_mb", PCIQXLDevice, vgamem_size_mb, 16),
|
||||||
DEFINE_PROP_INT32("surfaces", PCIQXLDevice, ssd.num_surfaces, 1024),
|
DEFINE_PROP_INT32("surfaces", PCIQXLDevice, ssd.num_surfaces, 1024),
|
||||||
|
#if SPICE_SERVER_VERSION >= 0x000c06 /* release 0.12.6 */
|
||||||
|
DEFINE_PROP_UINT16("max_outputs", PCIQXLDevice, max_outputs, 0),
|
||||||
|
#endif
|
||||||
DEFINE_PROP_END_OF_LIST(),
|
DEFINE_PROP_END_OF_LIST(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -99,6 +99,9 @@ typedef struct PCIQXLDevice {
|
|||||||
QXLModes *modes;
|
QXLModes *modes;
|
||||||
uint32_t rom_size;
|
uint32_t rom_size;
|
||||||
MemoryRegion rom_bar;
|
MemoryRegion rom_bar;
|
||||||
|
#if SPICE_SERVER_VERSION >= 0x000c06 /* release 0.12.6 */
|
||||||
|
uint16_t max_outputs;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* vram pci bar */
|
/* vram pci bar */
|
||||||
uint32_t vram_size;
|
uint32_t vram_size;
|
||||||
|
@@ -279,8 +279,13 @@ static void ahci_port_write(AHCIState *s, int port, int offset, uint32_t val)
|
|||||||
break;
|
break;
|
||||||
case PORT_CMD:
|
case PORT_CMD:
|
||||||
/* Block any Read-only fields from being set;
|
/* Block any Read-only fields from being set;
|
||||||
* including LIST_ON and FIS_ON. */
|
* including LIST_ON and FIS_ON.
|
||||||
pr->cmd = (pr->cmd & PORT_CMD_RO_MASK) | (val & ~PORT_CMD_RO_MASK);
|
* The spec requires to set ICC bits to zero after the ICC change
|
||||||
|
* is done. We don't support ICC state changes, therefore always
|
||||||
|
* force the ICC bits to zero.
|
||||||
|
*/
|
||||||
|
pr->cmd = (pr->cmd & PORT_CMD_RO_MASK) |
|
||||||
|
(val & ~(PORT_CMD_RO_MASK|PORT_CMD_ICC_MASK));
|
||||||
|
|
||||||
/* Check FIS RX and CLB engines, allow transition to false: */
|
/* Check FIS RX and CLB engines, allow transition to false: */
|
||||||
ahci_cond_start_engines(&s->dev[port], true);
|
ahci_cond_start_engines(&s->dev[port], true);
|
||||||
|
@@ -520,11 +520,6 @@ static int eth_match_groupaddr(ETRAXFSEthState *eth, const unsigned char *sa)
|
|||||||
return match;
|
return match;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int eth_can_receive(NetClientState *nc)
|
|
||||||
{
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size)
|
static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size)
|
||||||
{
|
{
|
||||||
unsigned char sa_bcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
|
unsigned char sa_bcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
|
||||||
@@ -584,7 +579,6 @@ static const MemoryRegionOps eth_ops = {
|
|||||||
static NetClientInfo net_etraxfs_info = {
|
static NetClientInfo net_etraxfs_info = {
|
||||||
.type = NET_CLIENT_OPTIONS_KIND_NIC,
|
.type = NET_CLIENT_OPTIONS_KIND_NIC,
|
||||||
.size = sizeof(NICState),
|
.size = sizeof(NICState),
|
||||||
.can_receive = eth_can_receive,
|
|
||||||
.receive = eth_receive,
|
.receive = eth_receive,
|
||||||
.link_status_changed = eth_set_link,
|
.link_status_changed = eth_set_link,
|
||||||
};
|
};
|
||||||
|
@@ -461,11 +461,6 @@ static void lan9118_reset(DeviceState *d)
|
|||||||
lan9118_reload_eeprom(s);
|
lan9118_reload_eeprom(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int lan9118_can_receive(NetClientState *nc)
|
|
||||||
{
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void rx_fifo_push(lan9118_state *s, uint32_t val)
|
static void rx_fifo_push(lan9118_state *s, uint32_t val)
|
||||||
{
|
{
|
||||||
int fifo_pos;
|
int fifo_pos;
|
||||||
@@ -1312,7 +1307,6 @@ static const MemoryRegionOps lan9118_16bit_mem_ops = {
|
|||||||
static NetClientInfo net_lan9118_info = {
|
static NetClientInfo net_lan9118_info = {
|
||||||
.type = NET_CLIENT_OPTIONS_KIND_NIC,
|
.type = NET_CLIENT_OPTIONS_KIND_NIC,
|
||||||
.size = sizeof(NICState),
|
.size = sizeof(NICState),
|
||||||
.can_receive = lan9118_can_receive,
|
|
||||||
.receive = lan9118_receive,
|
.receive = lan9118_receive,
|
||||||
.link_status_changed = lan9118_set_link,
|
.link_status_changed = lan9118_set_link,
|
||||||
};
|
};
|
||||||
|
@@ -160,7 +160,6 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
|
|||||||
|
|
||||||
net->dev.nvqs = 2;
|
net->dev.nvqs = 2;
|
||||||
net->dev.vqs = net->vqs;
|
net->dev.vqs = net->vqs;
|
||||||
net->dev.vq_index = net->nc->queue_index;
|
|
||||||
|
|
||||||
r = vhost_dev_init(&net->dev, options->opaque,
|
r = vhost_dev_init(&net->dev, options->opaque,
|
||||||
options->backend_type);
|
options->backend_type);
|
||||||
@@ -287,7 +286,7 @@ static void vhost_net_stop_one(struct vhost_net *net,
|
|||||||
for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
|
for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
|
||||||
const VhostOps *vhost_ops = net->dev.vhost_ops;
|
const VhostOps *vhost_ops = net->dev.vhost_ops;
|
||||||
int r = vhost_ops->vhost_call(&net->dev, VHOST_RESET_OWNER,
|
int r = vhost_ops->vhost_call(&net->dev, VHOST_RESET_OWNER,
|
||||||
&file);
|
NULL);
|
||||||
assert(r >= 0);
|
assert(r >= 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -162,6 +162,8 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
|
|||||||
virtio_net_vhost_status(n, status);
|
virtio_net_vhost_status(n, status);
|
||||||
|
|
||||||
for (i = 0; i < n->max_queues; i++) {
|
for (i = 0; i < n->max_queues; i++) {
|
||||||
|
NetClientState *ncs = qemu_get_subqueue(n->nic, i);
|
||||||
|
bool queue_started;
|
||||||
q = &n->vqs[i];
|
q = &n->vqs[i];
|
||||||
|
|
||||||
if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
|
if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
|
||||||
@@ -169,12 +171,18 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
|
|||||||
} else {
|
} else {
|
||||||
queue_status = status;
|
queue_status = status;
|
||||||
}
|
}
|
||||||
|
queue_started =
|
||||||
|
virtio_net_started(n, queue_status) && !n->vhost_started;
|
||||||
|
|
||||||
|
if (queue_started) {
|
||||||
|
qemu_flush_queued_packets(ncs);
|
||||||
|
}
|
||||||
|
|
||||||
if (!q->tx_waiting) {
|
if (!q->tx_waiting) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (virtio_net_started(n, queue_status) && !n->vhost_started) {
|
if (queue_started) {
|
||||||
if (q->tx_timer) {
|
if (q->tx_timer) {
|
||||||
timer_mod(q->tx_timer,
|
timer_mod(q->tx_timer,
|
||||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
|
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
|
||||||
@@ -1142,7 +1150,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
|
|||||||
ssize_t ret, len;
|
ssize_t ret, len;
|
||||||
unsigned int out_num = elem.out_num;
|
unsigned int out_num = elem.out_num;
|
||||||
struct iovec *out_sg = &elem.out_sg[0];
|
struct iovec *out_sg = &elem.out_sg[0];
|
||||||
struct iovec sg[VIRTQUEUE_MAX_SIZE];
|
struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1];
|
||||||
|
struct virtio_net_hdr_mrg_rxbuf mhdr;
|
||||||
|
|
||||||
if (out_num < 1) {
|
if (out_num < 1) {
|
||||||
error_report("virtio-net header not in first element");
|
error_report("virtio-net header not in first element");
|
||||||
@@ -1150,13 +1159,25 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (n->has_vnet_hdr) {
|
if (n->has_vnet_hdr) {
|
||||||
if (out_sg[0].iov_len < n->guest_hdr_len) {
|
if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
|
||||||
|
n->guest_hdr_len) {
|
||||||
error_report("virtio-net header incorrect");
|
error_report("virtio-net header incorrect");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base);
|
if (virtio_needs_swap(vdev)) {
|
||||||
|
virtio_net_hdr_swap(vdev, (void *) &mhdr);
|
||||||
|
sg2[0].iov_base = &mhdr;
|
||||||
|
sg2[0].iov_len = n->guest_hdr_len;
|
||||||
|
out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
|
||||||
|
out_sg, out_num,
|
||||||
|
n->guest_hdr_len, -1);
|
||||||
|
if (out_num == VIRTQUEUE_MAX_SIZE) {
|
||||||
|
goto drop;
|
||||||
|
}
|
||||||
|
out_num += 1;
|
||||||
|
out_sg = sg2;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If host wants to see the guest header as is, we can
|
* If host wants to see the guest header as is, we can
|
||||||
* pass it on unchanged. Otherwise, copy just the parts
|
* pass it on unchanged. Otherwise, copy just the parts
|
||||||
@@ -1186,7 +1207,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
|
|||||||
}
|
}
|
||||||
|
|
||||||
len += ret;
|
len += ret;
|
||||||
|
drop:
|
||||||
virtqueue_push(q->tx_vq, &elem, 0);
|
virtqueue_push(q->tx_vq, &elem, 0);
|
||||||
virtio_notify(vdev, q->tx_vq);
|
virtio_notify(vdev, q->tx_vq);
|
||||||
|
|
||||||
@@ -1306,9 +1327,86 @@ static void virtio_net_tx_bh(void *opaque)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void virtio_net_add_queue(VirtIONet *n, int index)
|
||||||
|
{
|
||||||
|
VirtIODevice *vdev = VIRTIO_DEVICE(n);
|
||||||
|
|
||||||
|
n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
|
||||||
|
if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
|
||||||
|
n->vqs[index].tx_vq =
|
||||||
|
virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
|
||||||
|
n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||||
|
virtio_net_tx_timer,
|
||||||
|
&n->vqs[index]);
|
||||||
|
} else {
|
||||||
|
n->vqs[index].tx_vq =
|
||||||
|
virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
|
||||||
|
n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
|
||||||
|
}
|
||||||
|
|
||||||
|
n->vqs[index].tx_waiting = 0;
|
||||||
|
n->vqs[index].n = n;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void virtio_net_del_queue(VirtIONet *n, int index)
|
||||||
|
{
|
||||||
|
VirtIODevice *vdev = VIRTIO_DEVICE(n);
|
||||||
|
VirtIONetQueue *q = &n->vqs[index];
|
||||||
|
NetClientState *nc = qemu_get_subqueue(n->nic, index);
|
||||||
|
|
||||||
|
qemu_purge_queued_packets(nc);
|
||||||
|
|
||||||
|
virtio_del_queue(vdev, index * 2);
|
||||||
|
if (q->tx_timer) {
|
||||||
|
timer_del(q->tx_timer);
|
||||||
|
timer_free(q->tx_timer);
|
||||||
|
} else {
|
||||||
|
qemu_bh_delete(q->tx_bh);
|
||||||
|
}
|
||||||
|
virtio_del_queue(vdev, index * 2 + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
|
||||||
|
{
|
||||||
|
VirtIODevice *vdev = VIRTIO_DEVICE(n);
|
||||||
|
int old_num_queues = virtio_get_num_queues(vdev);
|
||||||
|
int new_num_queues = new_max_queues * 2 + 1;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
assert(old_num_queues >= 3);
|
||||||
|
assert(old_num_queues % 2 == 1);
|
||||||
|
|
||||||
|
if (old_num_queues == new_num_queues) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We always need to remove and add ctrl vq if
|
||||||
|
* old_num_queues != new_num_queues. Remove ctrl_vq first,
|
||||||
|
* and then we only enter one of the following too loops.
|
||||||
|
*/
|
||||||
|
virtio_del_queue(vdev, old_num_queues - 1);
|
||||||
|
|
||||||
|
for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
|
||||||
|
/* new_num_queues < old_num_queues */
|
||||||
|
virtio_net_del_queue(n, i / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
|
||||||
|
/* new_num_queues > old_num_queues */
|
||||||
|
virtio_net_add_queue(n, i / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* add ctrl_vq last */
|
||||||
|
n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
|
||||||
|
}
|
||||||
|
|
||||||
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
|
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
|
||||||
{
|
{
|
||||||
|
int max = multiqueue ? n->max_queues : 1;
|
||||||
|
|
||||||
n->multiqueue = multiqueue;
|
n->multiqueue = multiqueue;
|
||||||
|
virtio_net_change_num_queues(n, max);
|
||||||
|
|
||||||
virtio_net_set_queues(n);
|
virtio_net_set_queues(n);
|
||||||
}
|
}
|
||||||
@@ -1583,21 +1681,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < n->max_queues; i++) {
|
for (i = 0; i < n->max_queues; i++) {
|
||||||
n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
|
virtio_net_add_queue(n, i);
|
||||||
if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
|
|
||||||
n->vqs[i].tx_vq =
|
|
||||||
virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
|
|
||||||
n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
|
||||||
virtio_net_tx_timer,
|
|
||||||
&n->vqs[i]);
|
|
||||||
} else {
|
|
||||||
n->vqs[i].tx_vq =
|
|
||||||
virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
|
|
||||||
n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
n->vqs[i].tx_waiting = 0;
|
|
||||||
n->vqs[i].n = n;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
|
n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
|
||||||
@@ -1651,7 +1735,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
|
|||||||
{
|
{
|
||||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||||
VirtIONet *n = VIRTIO_NET(dev);
|
VirtIONet *n = VIRTIO_NET(dev);
|
||||||
int i;
|
int i, max_queues;
|
||||||
|
|
||||||
/* This will stop vhost backend if appropriate. */
|
/* This will stop vhost backend if appropriate. */
|
||||||
virtio_net_set_status(vdev, 0);
|
virtio_net_set_status(vdev, 0);
|
||||||
@@ -1666,18 +1750,9 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
|
|||||||
g_free(n->mac_table.macs);
|
g_free(n->mac_table.macs);
|
||||||
g_free(n->vlans);
|
g_free(n->vlans);
|
||||||
|
|
||||||
for (i = 0; i < n->max_queues; i++) {
|
max_queues = n->multiqueue ? n->max_queues : 1;
|
||||||
VirtIONetQueue *q = &n->vqs[i];
|
for (i = 0; i < max_queues; i++) {
|
||||||
NetClientState *nc = qemu_get_subqueue(n->nic, i);
|
virtio_net_del_queue(n, i);
|
||||||
|
|
||||||
qemu_purge_queued_packets(nc);
|
|
||||||
|
|
||||||
if (q->tx_timer) {
|
|
||||||
timer_del(q->tx_timer);
|
|
||||||
timer_free(q->tx_timer);
|
|
||||||
} else if (q->tx_bh) {
|
|
||||||
qemu_bh_delete(q->tx_bh);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
timer_del(n->announce_timer);
|
timer_del(n->announce_timer);
|
||||||
|
@@ -885,6 +885,63 @@ vmxnet3_get_next_rx_descr(VMXNET3State *s, bool is_head,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* In case packet was csum offloaded (either NEEDS_CSUM or DATA_VALID),
|
||||||
|
* the implementation always passes an RxCompDesc with a "Checksum
|
||||||
|
* calculated and found correct" to the OS (cnc=0 and tuc=1, see
|
||||||
|
* vmxnet3_rx_update_descr). This emulates the observed ESXi behavior.
|
||||||
|
*
|
||||||
|
* Therefore, if packet has the NEEDS_CSUM set, we must calculate
|
||||||
|
* and place a fully computed checksum into the tcp/udp header.
|
||||||
|
* Otherwise, the OS driver will receive a checksum-correct indication
|
||||||
|
* (CHECKSUM_UNNECESSARY), but with the actual tcp/udp checksum field
|
||||||
|
* having just the pseudo header csum value.
|
||||||
|
*
|
||||||
|
* While this is not a problem if packet is destined for local delivery,
|
||||||
|
* in the case the host OS performs forwarding, it will forward an
|
||||||
|
* incorrectly checksummed packet.
|
||||||
|
*/
|
||||||
|
static void vmxnet3_rx_need_csum_calculate(struct VmxnetRxPkt *pkt,
|
||||||
|
const void *pkt_data,
|
||||||
|
size_t pkt_len)
|
||||||
|
{
|
||||||
|
struct virtio_net_hdr *vhdr;
|
||||||
|
bool isip4, isip6, istcp, isudp;
|
||||||
|
uint8_t *data;
|
||||||
|
int len;
|
||||||
|
|
||||||
|
if (!vmxnet_rx_pkt_has_virt_hdr(pkt)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
vhdr = vmxnet_rx_pkt_get_vhdr(pkt);
|
||||||
|
if (!VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
vmxnet_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
|
||||||
|
if (!(isip4 || isip6) || !(istcp || isudp)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
vmxnet3_dump_virt_hdr(vhdr);
|
||||||
|
|
||||||
|
/* Validate packet len: csum_start + scum_offset + length of csum field */
|
||||||
|
if (pkt_len < (vhdr->csum_start + vhdr->csum_offset + 2)) {
|
||||||
|
VMW_PKPRN("packet len:%d < csum_start(%d) + csum_offset(%d) + 2, "
|
||||||
|
"cannot calculate checksum",
|
||||||
|
len, vhdr->csum_start, vhdr->csum_offset);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
data = (uint8_t *)pkt_data + vhdr->csum_start;
|
||||||
|
len = pkt_len - vhdr->csum_start;
|
||||||
|
/* Put the checksum obtained into the packet */
|
||||||
|
stw_be_p(data + vhdr->csum_offset, net_raw_checksum(data, len));
|
||||||
|
|
||||||
|
vhdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
||||||
|
vhdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID;
|
||||||
|
}
|
||||||
|
|
||||||
static void vmxnet3_rx_update_descr(struct VmxnetRxPkt *pkt,
|
static void vmxnet3_rx_update_descr(struct VmxnetRxPkt *pkt,
|
||||||
struct Vmxnet3_RxCompDesc *rxcd)
|
struct Vmxnet3_RxCompDesc *rxcd)
|
||||||
{
|
{
|
||||||
@@ -1897,6 +1954,8 @@ vmxnet3_receive(NetClientState *nc, const uint8_t *buf, size_t size)
|
|||||||
get_eth_packet_type(PKT_GET_ETH_HDR(buf)));
|
get_eth_packet_type(PKT_GET_ETH_HDR(buf)));
|
||||||
|
|
||||||
if (vmxnet3_rx_filter_may_indicate(s, buf, size)) {
|
if (vmxnet3_rx_filter_may_indicate(s, buf, size)) {
|
||||||
|
vmxnet_rx_pkt_set_protocols(s->rx_pkt, buf, size);
|
||||||
|
vmxnet3_rx_need_csum_calculate(s->rx_pkt, buf, size);
|
||||||
vmxnet_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping);
|
vmxnet_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping);
|
||||||
bytes_indicated = vmxnet3_indicate_packet(s) ? size : -1;
|
bytes_indicated = vmxnet3_indicate_packet(s) ? size : -1;
|
||||||
if (bytes_indicated < size) {
|
if (bytes_indicated < size) {
|
||||||
|
@@ -92,9 +92,6 @@ void vmxnet_rx_pkt_attach_data(struct VmxnetRxPkt *pkt, const void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
pkt->tci = tci;
|
pkt->tci = tci;
|
||||||
|
|
||||||
eth_get_protocols(data, len, &pkt->isip4, &pkt->isip6,
|
|
||||||
&pkt->isudp, &pkt->istcp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void vmxnet_rx_pkt_dump(struct VmxnetRxPkt *pkt)
|
void vmxnet_rx_pkt_dump(struct VmxnetRxPkt *pkt)
|
||||||
@@ -131,6 +128,15 @@ size_t vmxnet_rx_pkt_get_total_len(struct VmxnetRxPkt *pkt)
|
|||||||
return pkt->tot_len;
|
return pkt->tot_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vmxnet_rx_pkt_set_protocols(struct VmxnetRxPkt *pkt, const void *data,
|
||||||
|
size_t len)
|
||||||
|
{
|
||||||
|
assert(pkt);
|
||||||
|
|
||||||
|
eth_get_protocols(data, len, &pkt->isip4, &pkt->isip6,
|
||||||
|
&pkt->isudp, &pkt->istcp);
|
||||||
|
}
|
||||||
|
|
||||||
void vmxnet_rx_pkt_get_protocols(struct VmxnetRxPkt *pkt,
|
void vmxnet_rx_pkt_get_protocols(struct VmxnetRxPkt *pkt,
|
||||||
bool *isip4, bool *isip6,
|
bool *isip4, bool *isip6,
|
||||||
bool *isudp, bool *istcp)
|
bool *isudp, bool *istcp)
|
||||||
|
@@ -54,6 +54,17 @@ void vmxnet_rx_pkt_init(struct VmxnetRxPkt **pkt, bool has_virt_hdr);
|
|||||||
*/
|
*/
|
||||||
size_t vmxnet_rx_pkt_get_total_len(struct VmxnetRxPkt *pkt);
|
size_t vmxnet_rx_pkt_get_total_len(struct VmxnetRxPkt *pkt);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* parse and set packet analysis results
|
||||||
|
*
|
||||||
|
* @pkt: packet
|
||||||
|
* @data: pointer to the data buffer to be parsed
|
||||||
|
* @len: data length
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void vmxnet_rx_pkt_set_protocols(struct VmxnetRxPkt *pkt, const void *data,
|
||||||
|
size_t len);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fetches packet analysis results
|
* fetches packet analysis results
|
||||||
*
|
*
|
||||||
|
@@ -2101,12 +2101,10 @@ static void pci_del_option_rom(PCIDevice *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if !offset
|
|
||||||
* Reserve space and add capability to the linked list in pci config space
|
|
||||||
*
|
|
||||||
* if offset = 0,
|
* if offset = 0,
|
||||||
* Find and reserve space and add capability to the linked list
|
* Find and reserve space and add capability to the linked list
|
||||||
* in pci config space */
|
* in pci config space
|
||||||
|
*/
|
||||||
int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
|
int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
|
||||||
uint8_t offset, uint8_t size)
|
uint8_t offset, uint8_t size)
|
||||||
{
|
{
|
||||||
|
@@ -873,8 +873,9 @@ static int timebase_post_load(void *opaque, int version_id)
|
|||||||
*/
|
*/
|
||||||
host_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
|
host_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
|
||||||
ns_diff = MAX(0, host_ns - tb_remote->time_of_the_day_ns);
|
ns_diff = MAX(0, host_ns - tb_remote->time_of_the_day_ns);
|
||||||
migration_duration_ns = MIN(NSEC_PER_SEC, ns_diff);
|
migration_duration_ns = MIN(NANOSECONDS_PER_SECOND, ns_diff);
|
||||||
migration_duration_tb = muldiv64(migration_duration_ns, freq, NSEC_PER_SEC);
|
migration_duration_tb = muldiv64(migration_duration_ns, freq,
|
||||||
|
NANOSECONDS_PER_SECOND);
|
||||||
guest_tb = tb_remote->guest_timebase + MIN(0, migration_duration_tb);
|
guest_tb = tb_remote->guest_timebase + MIN(0, migration_duration_tb);
|
||||||
|
|
||||||
tb_off_adj = guest_tb - cpu_get_real_ticks();
|
tb_off_adj = guest_tb - cpu_get_real_ticks();
|
||||||
|
@@ -422,6 +422,7 @@ static void realize(DeviceState *d, Error **errp)
|
|||||||
error_free(err);
|
error_free(err);
|
||||||
object_unref(OBJECT(drc));
|
object_unref(OBJECT(drc));
|
||||||
}
|
}
|
||||||
|
g_free(child_name);
|
||||||
DPRINTFN("drc realize complete");
|
DPRINTFN("drc realize complete");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -51,7 +51,7 @@ void spapr_rtc_read(DeviceState *dev, struct tm *tm, uint32_t *ns)
|
|||||||
assert(rtc);
|
assert(rtc);
|
||||||
|
|
||||||
guest_ns = host_ns + rtc->ns_offset;
|
guest_ns = host_ns + rtc->ns_offset;
|
||||||
guest_s = guest_ns / NSEC_PER_SEC;
|
guest_s = guest_ns / NANOSECONDS_PER_SECOND;
|
||||||
|
|
||||||
if (tm) {
|
if (tm) {
|
||||||
gmtime_r(&guest_s, tm);
|
gmtime_r(&guest_s, tm);
|
||||||
@@ -71,7 +71,7 @@ int spapr_rtc_import_offset(DeviceState *dev, int64_t legacy_offset)
|
|||||||
|
|
||||||
rtc = SPAPR_RTC(dev);
|
rtc = SPAPR_RTC(dev);
|
||||||
|
|
||||||
rtc->ns_offset = legacy_offset * NSEC_PER_SEC;
|
rtc->ns_offset = legacy_offset * NANOSECONDS_PER_SECOND;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -146,7 +146,7 @@ static void rtas_set_time_of_day(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
|||||||
|
|
||||||
host_ns = qemu_clock_get_ns(rtc_clock);
|
host_ns = qemu_clock_get_ns(rtc_clock);
|
||||||
|
|
||||||
rtc->ns_offset = (new_s * NSEC_PER_SEC) - host_ns;
|
rtc->ns_offset = (new_s * NANOSECONDS_PER_SECOND) - host_ns;
|
||||||
|
|
||||||
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
|
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
|
||||||
}
|
}
|
||||||
@@ -168,7 +168,7 @@ static void spapr_rtc_realize(DeviceState *dev, Error **errp)
|
|||||||
qemu_get_timedate(&tm, 0);
|
qemu_get_timedate(&tm, 0);
|
||||||
host_s = mktimegm(&tm);
|
host_s = mktimegm(&tm);
|
||||||
rtc_ns = qemu_clock_get_ns(rtc_clock);
|
rtc_ns = qemu_clock_get_ns(rtc_clock);
|
||||||
rtc->ns_offset = host_s * NSEC_PER_SEC - rtc_ns;
|
rtc->ns_offset = host_s * NANOSECONDS_PER_SECOND - rtc_ns;
|
||||||
|
|
||||||
object_property_add_tm(OBJECT(rtc), "date", spapr_rtc_qom_date, NULL);
|
object_property_add_tm(OBJECT(rtc), "date", spapr_rtc_qom_date, NULL);
|
||||||
}
|
}
|
||||||
|
@@ -56,7 +56,7 @@
|
|||||||
|
|
||||||
#define RTC_REINJECT_ON_ACK_COUNT 20
|
#define RTC_REINJECT_ON_ACK_COUNT 20
|
||||||
#define RTC_CLOCK_RATE 32768
|
#define RTC_CLOCK_RATE 32768
|
||||||
#define UIP_HOLD_LENGTH (8 * NSEC_PER_SEC / 32768)
|
#define UIP_HOLD_LENGTH (8 * NANOSECONDS_PER_SECOND / 32768)
|
||||||
|
|
||||||
#define MC146818_RTC(obj) OBJECT_CHECK(RTCState, (obj), TYPE_MC146818_RTC)
|
#define MC146818_RTC(obj) OBJECT_CHECK(RTCState, (obj), TYPE_MC146818_RTC)
|
||||||
|
|
||||||
@@ -105,7 +105,7 @@ static uint64_t get_guest_rtc_ns(RTCState *s)
|
|||||||
uint64_t guest_rtc;
|
uint64_t guest_rtc;
|
||||||
uint64_t guest_clock = qemu_clock_get_ns(rtc_clock);
|
uint64_t guest_clock = qemu_clock_get_ns(rtc_clock);
|
||||||
|
|
||||||
guest_rtc = s->base_rtc * NSEC_PER_SEC
|
guest_rtc = s->base_rtc * NANOSECONDS_PER_SECOND
|
||||||
+ guest_clock - s->last_update + s->offset;
|
+ guest_clock - s->last_update + s->offset;
|
||||||
return guest_rtc;
|
return guest_rtc;
|
||||||
}
|
}
|
||||||
@@ -231,16 +231,17 @@ static void check_update_timer(RTCState *s)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
guest_nsec = get_guest_rtc_ns(s) % NSEC_PER_SEC;
|
guest_nsec = get_guest_rtc_ns(s) % NANOSECONDS_PER_SECOND;
|
||||||
/* if UF is clear, reprogram to next second */
|
/* if UF is clear, reprogram to next second */
|
||||||
next_update_time = qemu_clock_get_ns(rtc_clock)
|
next_update_time = qemu_clock_get_ns(rtc_clock)
|
||||||
+ NSEC_PER_SEC - guest_nsec;
|
+ NANOSECONDS_PER_SECOND - guest_nsec;
|
||||||
|
|
||||||
/* Compute time of next alarm. One second is already accounted
|
/* Compute time of next alarm. One second is already accounted
|
||||||
* for in next_update_time.
|
* for in next_update_time.
|
||||||
*/
|
*/
|
||||||
next_alarm_sec = get_next_alarm(s);
|
next_alarm_sec = get_next_alarm(s);
|
||||||
s->next_alarm_time = next_update_time + (next_alarm_sec - 1) * NSEC_PER_SEC;
|
s->next_alarm_time = next_update_time +
|
||||||
|
(next_alarm_sec - 1) * NANOSECONDS_PER_SECOND;
|
||||||
|
|
||||||
if (s->cmos_data[RTC_REG_C] & REG_C_UF) {
|
if (s->cmos_data[RTC_REG_C] & REG_C_UF) {
|
||||||
/* UF is set, but AF is clear. Program the timer to target
|
/* UF is set, but AF is clear. Program the timer to target
|
||||||
@@ -456,7 +457,7 @@ static void cmos_ioport_write(void *opaque, hwaddr addr,
|
|||||||
/* if disabling set mode, update the time */
|
/* if disabling set mode, update the time */
|
||||||
if ((s->cmos_data[RTC_REG_B] & REG_B_SET) &&
|
if ((s->cmos_data[RTC_REG_B] & REG_B_SET) &&
|
||||||
(s->cmos_data[RTC_REG_A] & 0x70) <= 0x20) {
|
(s->cmos_data[RTC_REG_A] & 0x70) <= 0x20) {
|
||||||
s->offset = get_guest_rtc_ns(s) % NSEC_PER_SEC;
|
s->offset = get_guest_rtc_ns(s) % NANOSECONDS_PER_SECOND;
|
||||||
rtc_set_time(s);
|
rtc_set_time(s);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -580,7 +581,7 @@ static void rtc_update_time(RTCState *s)
|
|||||||
int64_t guest_nsec;
|
int64_t guest_nsec;
|
||||||
|
|
||||||
guest_nsec = get_guest_rtc_ns(s);
|
guest_nsec = get_guest_rtc_ns(s);
|
||||||
guest_sec = guest_nsec / NSEC_PER_SEC;
|
guest_sec = guest_nsec / NANOSECONDS_PER_SECOND;
|
||||||
gmtime_r(&guest_sec, &ret);
|
gmtime_r(&guest_sec, &ret);
|
||||||
|
|
||||||
/* Is SET flag of Register B disabled? */
|
/* Is SET flag of Register B disabled? */
|
||||||
@@ -608,7 +609,8 @@ static int update_in_progress(RTCState *s)
|
|||||||
|
|
||||||
guest_nsec = get_guest_rtc_ns(s);
|
guest_nsec = get_guest_rtc_ns(s);
|
||||||
/* UIP bit will be set at last 244us of every second. */
|
/* UIP bit will be set at last 244us of every second. */
|
||||||
if ((guest_nsec % NSEC_PER_SEC) >= (NSEC_PER_SEC - UIP_HOLD_LENGTH)) {
|
if ((guest_nsec % NANOSECONDS_PER_SECOND) >=
|
||||||
|
(NANOSECONDS_PER_SECOND - UIP_HOLD_LENGTH)) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -283,6 +283,7 @@ typedef struct CCIDBus {
|
|||||||
typedef struct USBCCIDState {
|
typedef struct USBCCIDState {
|
||||||
USBDevice dev;
|
USBDevice dev;
|
||||||
USBEndpoint *intr;
|
USBEndpoint *intr;
|
||||||
|
USBEndpoint *bulk;
|
||||||
CCIDBus bus;
|
CCIDBus bus;
|
||||||
CCIDCardState *card;
|
CCIDCardState *card;
|
||||||
BulkIn bulk_in_pending[BULK_IN_PENDING_NUM]; /* circular */
|
BulkIn bulk_in_pending[BULK_IN_PENDING_NUM]; /* circular */
|
||||||
@@ -769,6 +770,7 @@ static void ccid_write_slot_status(USBCCIDState *s, CCID_Header *recv)
|
|||||||
h->b.bError = s->bError;
|
h->b.bError = s->bError;
|
||||||
h->bClockStatus = CLOCK_STATUS_RUNNING;
|
h->bClockStatus = CLOCK_STATUS_RUNNING;
|
||||||
ccid_reset_error_status(s);
|
ccid_reset_error_status(s);
|
||||||
|
usb_wakeup(s->bulk, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ccid_write_parameters(USBCCIDState *s, CCID_Header *recv)
|
static void ccid_write_parameters(USBCCIDState *s, CCID_Header *recv)
|
||||||
@@ -789,6 +791,7 @@ static void ccid_write_parameters(USBCCIDState *s, CCID_Header *recv)
|
|||||||
h->bProtocolNum = s->bProtocolNum;
|
h->bProtocolNum = s->bProtocolNum;
|
||||||
h->abProtocolDataStructure = s->abProtocolDataStructure;
|
h->abProtocolDataStructure = s->abProtocolDataStructure;
|
||||||
ccid_reset_error_status(s);
|
ccid_reset_error_status(s);
|
||||||
|
usb_wakeup(s->bulk, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ccid_write_data_block(USBCCIDState *s, uint8_t slot, uint8_t seq,
|
static void ccid_write_data_block(USBCCIDState *s, uint8_t slot, uint8_t seq,
|
||||||
@@ -810,6 +813,7 @@ static void ccid_write_data_block(USBCCIDState *s, uint8_t slot, uint8_t seq,
|
|||||||
}
|
}
|
||||||
memcpy(p->abData, data, len);
|
memcpy(p->abData, data, len);
|
||||||
ccid_reset_error_status(s);
|
ccid_reset_error_status(s);
|
||||||
|
usb_wakeup(s->bulk, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ccid_report_error_failed(USBCCIDState *s, uint8_t error)
|
static void ccid_report_error_failed(USBCCIDState *s, uint8_t error)
|
||||||
@@ -1184,7 +1188,7 @@ void ccid_card_send_apdu_to_guest(CCIDCardState *card,
|
|||||||
uint8_t *apdu, uint32_t len)
|
uint8_t *apdu, uint32_t len)
|
||||||
{
|
{
|
||||||
DeviceState *qdev = DEVICE(card);
|
DeviceState *qdev = DEVICE(card);
|
||||||
USBDevice *dev = USB_DEVICE(qdev);
|
USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent);
|
||||||
USBCCIDState *s = USB_CCID_DEV(dev);
|
USBCCIDState *s = USB_CCID_DEV(dev);
|
||||||
Answer *answer;
|
Answer *answer;
|
||||||
|
|
||||||
@@ -1207,7 +1211,7 @@ void ccid_card_send_apdu_to_guest(CCIDCardState *card,
|
|||||||
void ccid_card_card_removed(CCIDCardState *card)
|
void ccid_card_card_removed(CCIDCardState *card)
|
||||||
{
|
{
|
||||||
DeviceState *qdev = DEVICE(card);
|
DeviceState *qdev = DEVICE(card);
|
||||||
USBDevice *dev = USB_DEVICE(qdev);
|
USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent);
|
||||||
USBCCIDState *s = USB_CCID_DEV(dev);
|
USBCCIDState *s = USB_CCID_DEV(dev);
|
||||||
|
|
||||||
ccid_on_slot_change(s, false);
|
ccid_on_slot_change(s, false);
|
||||||
@@ -1218,7 +1222,7 @@ void ccid_card_card_removed(CCIDCardState *card)
|
|||||||
int ccid_card_ccid_attach(CCIDCardState *card)
|
int ccid_card_ccid_attach(CCIDCardState *card)
|
||||||
{
|
{
|
||||||
DeviceState *qdev = DEVICE(card);
|
DeviceState *qdev = DEVICE(card);
|
||||||
USBDevice *dev = USB_DEVICE(qdev);
|
USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent);
|
||||||
USBCCIDState *s = USB_CCID_DEV(dev);
|
USBCCIDState *s = USB_CCID_DEV(dev);
|
||||||
|
|
||||||
DPRINTF(s, 1, "CCID Attach\n");
|
DPRINTF(s, 1, "CCID Attach\n");
|
||||||
@@ -1231,7 +1235,7 @@ int ccid_card_ccid_attach(CCIDCardState *card)
|
|||||||
void ccid_card_ccid_detach(CCIDCardState *card)
|
void ccid_card_ccid_detach(CCIDCardState *card)
|
||||||
{
|
{
|
||||||
DeviceState *qdev = DEVICE(card);
|
DeviceState *qdev = DEVICE(card);
|
||||||
USBDevice *dev = USB_DEVICE(qdev);
|
USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent);
|
||||||
USBCCIDState *s = USB_CCID_DEV(dev);
|
USBCCIDState *s = USB_CCID_DEV(dev);
|
||||||
|
|
||||||
DPRINTF(s, 1, "CCID Detach\n");
|
DPRINTF(s, 1, "CCID Detach\n");
|
||||||
@@ -1244,7 +1248,7 @@ void ccid_card_ccid_detach(CCIDCardState *card)
|
|||||||
void ccid_card_card_error(CCIDCardState *card, uint64_t error)
|
void ccid_card_card_error(CCIDCardState *card, uint64_t error)
|
||||||
{
|
{
|
||||||
DeviceState *qdev = DEVICE(card);
|
DeviceState *qdev = DEVICE(card);
|
||||||
USBDevice *dev = USB_DEVICE(qdev);
|
USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent);
|
||||||
USBCCIDState *s = USB_CCID_DEV(dev);
|
USBCCIDState *s = USB_CCID_DEV(dev);
|
||||||
|
|
||||||
s->bmCommandStatus = COMMAND_STATUS_FAILED;
|
s->bmCommandStatus = COMMAND_STATUS_FAILED;
|
||||||
@@ -1263,7 +1267,7 @@ void ccid_card_card_error(CCIDCardState *card, uint64_t error)
|
|||||||
void ccid_card_card_inserted(CCIDCardState *card)
|
void ccid_card_card_inserted(CCIDCardState *card)
|
||||||
{
|
{
|
||||||
DeviceState *qdev = DEVICE(card);
|
DeviceState *qdev = DEVICE(card);
|
||||||
USBDevice *dev = USB_DEVICE(qdev);
|
USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent);
|
||||||
USBCCIDState *s = USB_CCID_DEV(dev);
|
USBCCIDState *s = USB_CCID_DEV(dev);
|
||||||
|
|
||||||
s->bmCommandStatus = COMMAND_STATUS_NO_ERROR;
|
s->bmCommandStatus = COMMAND_STATUS_NO_ERROR;
|
||||||
@@ -1275,7 +1279,7 @@ static int ccid_card_exit(DeviceState *qdev)
|
|||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
CCIDCardState *card = CCID_CARD(qdev);
|
CCIDCardState *card = CCID_CARD(qdev);
|
||||||
USBDevice *dev = USB_DEVICE(qdev);
|
USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent);
|
||||||
USBCCIDState *s = USB_CCID_DEV(dev);
|
USBCCIDState *s = USB_CCID_DEV(dev);
|
||||||
|
|
||||||
if (ccid_card_inserted(s)) {
|
if (ccid_card_inserted(s)) {
|
||||||
@@ -1289,7 +1293,7 @@ static int ccid_card_exit(DeviceState *qdev)
|
|||||||
static int ccid_card_init(DeviceState *qdev)
|
static int ccid_card_init(DeviceState *qdev)
|
||||||
{
|
{
|
||||||
CCIDCardState *card = CCID_CARD(qdev);
|
CCIDCardState *card = CCID_CARD(qdev);
|
||||||
USBDevice *dev = USB_DEVICE(qdev);
|
USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent);
|
||||||
USBCCIDState *s = USB_CCID_DEV(dev);
|
USBCCIDState *s = USB_CCID_DEV(dev);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@@ -1319,6 +1323,7 @@ static void ccid_realize(USBDevice *dev, Error **errp)
|
|||||||
NULL);
|
NULL);
|
||||||
qbus_set_hotplug_handler(BUS(&s->bus), DEVICE(dev), &error_abort);
|
qbus_set_hotplug_handler(BUS(&s->bus), DEVICE(dev), &error_abort);
|
||||||
s->intr = usb_ep_get(dev, USB_TOKEN_IN, CCID_INT_IN_EP);
|
s->intr = usb_ep_get(dev, USB_TOKEN_IN, CCID_INT_IN_EP);
|
||||||
|
s->bulk = usb_ep_get(dev, USB_TOKEN_IN, CCID_BULK_IN_EP);
|
||||||
s->card = NULL;
|
s->card = NULL;
|
||||||
s->migration_state = MIGRATION_NONE;
|
s->migration_state = MIGRATION_NONE;
|
||||||
s->migration_target_ip = 0;
|
s->migration_target_ip = 0;
|
||||||
|
@@ -32,7 +32,7 @@
|
|||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
#define FRAME_TIMER_FREQ 1000
|
#define FRAME_TIMER_FREQ 1000
|
||||||
#define FRAME_TIMER_NS (NSEC_PER_SEC / FRAME_TIMER_FREQ)
|
#define FRAME_TIMER_NS (NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ)
|
||||||
#define UFRAME_TIMER_NS (FRAME_TIMER_NS / 8)
|
#define UFRAME_TIMER_NS (FRAME_TIMER_NS / 8)
|
||||||
|
|
||||||
#define NB_MAXINTRATE 8 // Max rate at which controller issues ints
|
#define NB_MAXINTRATE 8 // Max rate at which controller issues ints
|
||||||
|
@@ -2222,8 +2222,6 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid,
|
|||||||
if (xfer->running_retry) {
|
if (xfer->running_retry) {
|
||||||
DPRINTF("xhci: xfer nacked, stopping schedule\n");
|
DPRINTF("xhci: xfer nacked, stopping schedule\n");
|
||||||
epctx->retry = xfer;
|
epctx->retry = xfer;
|
||||||
timer_mod(epctx->kick_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
|
||||||
epctx->interval * 125000);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -889,6 +889,9 @@ static int usb_host_open(USBHostDevice *s, libusb_device *dev)
|
|||||||
fail:
|
fail:
|
||||||
trace_usb_host_open_failure(bus_num, addr);
|
trace_usb_host_open_failure(bus_num, addr);
|
||||||
if (s->dh != NULL) {
|
if (s->dh != NULL) {
|
||||||
|
usb_host_release_interfaces(s);
|
||||||
|
libusb_reset_device(s->dh);
|
||||||
|
usb_host_attach_kernel(s);
|
||||||
libusb_close(s->dh);
|
libusb_close(s->dh);
|
||||||
s->dh = NULL;
|
s->dh = NULL;
|
||||||
s->dev = NULL;
|
s->dev = NULL;
|
||||||
|
@@ -210,12 +210,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case VHOST_SET_OWNER:
|
case VHOST_SET_OWNER:
|
||||||
break;
|
|
||||||
|
|
||||||
case VHOST_RESET_OWNER:
|
case VHOST_RESET_OWNER:
|
||||||
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
|
|
||||||
msg.state.index += dev->vq_index;
|
|
||||||
msg.size = sizeof(m.state);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VHOST_SET_MEM_TABLE:
|
case VHOST_SET_MEM_TABLE:
|
||||||
@@ -258,20 +253,17 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
|
|||||||
case VHOST_SET_VRING_NUM:
|
case VHOST_SET_VRING_NUM:
|
||||||
case VHOST_SET_VRING_BASE:
|
case VHOST_SET_VRING_BASE:
|
||||||
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
|
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
|
||||||
msg.state.index += dev->vq_index;
|
|
||||||
msg.size = sizeof(m.state);
|
msg.size = sizeof(m.state);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VHOST_GET_VRING_BASE:
|
case VHOST_GET_VRING_BASE:
|
||||||
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
|
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
|
||||||
msg.state.index += dev->vq_index;
|
|
||||||
msg.size = sizeof(m.state);
|
msg.size = sizeof(m.state);
|
||||||
need_reply = 1;
|
need_reply = 1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VHOST_SET_VRING_ADDR:
|
case VHOST_SET_VRING_ADDR:
|
||||||
memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
|
memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
|
||||||
msg.addr.index += dev->vq_index;
|
|
||||||
msg.size = sizeof(m.addr);
|
msg.size = sizeof(m.addr);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -279,7 +271,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
|
|||||||
case VHOST_SET_VRING_CALL:
|
case VHOST_SET_VRING_CALL:
|
||||||
case VHOST_SET_VRING_ERR:
|
case VHOST_SET_VRING_ERR:
|
||||||
file = arg;
|
file = arg;
|
||||||
msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
|
msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
|
||||||
msg.size = sizeof(m.u64);
|
msg.size = sizeof(m.u64);
|
||||||
if (ioeventfd_enabled() && file->fd > 0) {
|
if (ioeventfd_enabled() && file->fd > 0) {
|
||||||
fds[fd_num++] = file->fd;
|
fds[fd_num++] = file->fd;
|
||||||
@@ -321,7 +313,6 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
|
|||||||
error_report("Received bad msg size.");
|
error_report("Received bad msg size.");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
msg.state.index -= dev->vq_index;
|
|
||||||
memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
|
memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@@ -77,6 +77,12 @@ static void virtio_rng_process(VirtIORNG *vrng)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (vrng->activate_timer) {
|
||||||
|
timer_mod(vrng->rate_limit_timer,
|
||||||
|
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vrng->conf.period_ms);
|
||||||
|
vrng->activate_timer = false;
|
||||||
|
}
|
||||||
|
|
||||||
if (vrng->quota_remaining < 0) {
|
if (vrng->quota_remaining < 0) {
|
||||||
quota = 0;
|
quota = 0;
|
||||||
} else {
|
} else {
|
||||||
@@ -138,8 +144,7 @@ static void check_rate_limit(void *opaque)
|
|||||||
|
|
||||||
vrng->quota_remaining = vrng->conf.max_bytes;
|
vrng->quota_remaining = vrng->conf.max_bytes;
|
||||||
virtio_rng_process(vrng);
|
virtio_rng_process(vrng);
|
||||||
timer_mod(vrng->rate_limit_timer,
|
vrng->activate_timer = true;
|
||||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vrng->conf.period_ms);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtio_rng_device_realize(DeviceState *dev, Error **errp)
|
static void virtio_rng_device_realize(DeviceState *dev, Error **errp)
|
||||||
@@ -195,13 +200,9 @@ static void virtio_rng_device_realize(DeviceState *dev, Error **errp)
|
|||||||
|
|
||||||
vrng->vq = virtio_add_queue(vdev, 8, handle_input);
|
vrng->vq = virtio_add_queue(vdev, 8, handle_input);
|
||||||
vrng->quota_remaining = vrng->conf.max_bytes;
|
vrng->quota_remaining = vrng->conf.max_bytes;
|
||||||
|
|
||||||
vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
|
vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
|
||||||
check_rate_limit, vrng);
|
check_rate_limit, vrng);
|
||||||
|
vrng->activate_timer = true;
|
||||||
timer_mod(vrng->rate_limit_timer,
|
|
||||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vrng->conf.period_ms);
|
|
||||||
|
|
||||||
register_savevm(dev, "virtio-rng", -1, 1, virtio_rng_save,
|
register_savevm(dev, "virtio-rng", -1, 1, virtio_rng_save,
|
||||||
virtio_rng_load, vrng);
|
virtio_rng_load, vrng);
|
||||||
}
|
}
|
||||||
|
@@ -63,10 +63,30 @@ struct AioContext {
|
|||||||
*/
|
*/
|
||||||
int walking_handlers;
|
int walking_handlers;
|
||||||
|
|
||||||
/* Used to avoid unnecessary event_notifier_set calls in aio_notify.
|
/* Used to avoid unnecessary event_notifier_set calls in aio_notify;
|
||||||
* Writes protected by lock or BQL, reads are lockless.
|
* accessed with atomic primitives. If this field is 0, everything
|
||||||
|
* (file descriptors, bottom halves, timers) will be re-evaluated
|
||||||
|
* before the next blocking poll(), thus the event_notifier_set call
|
||||||
|
* can be skipped. If it is non-zero, you may need to wake up a
|
||||||
|
* concurrent aio_poll or the glib main event loop, making
|
||||||
|
* event_notifier_set necessary.
|
||||||
|
*
|
||||||
|
* Bit 0 is reserved for GSource usage of the AioContext, and is 1
|
||||||
|
* between a call to aio_ctx_check and the next call to aio_ctx_dispatch.
|
||||||
|
* Bits 1-31 simply count the number of active calls to aio_poll
|
||||||
|
* that are in the prepare or poll phase.
|
||||||
|
*
|
||||||
|
* The GSource and aio_poll must use a different mechanism because
|
||||||
|
* there is no certainty that a call to GSource's prepare callback
|
||||||
|
* (via g_main_context_prepare) is indeed followed by check and
|
||||||
|
* dispatch. It's not clear whether this would be a bug, but let's
|
||||||
|
* play safe and allow it---it will just cause extra calls to
|
||||||
|
* event_notifier_set until the next call to dispatch.
|
||||||
|
*
|
||||||
|
* Instead, the aio_poll calls include both the prepare and the
|
||||||
|
* dispatch phase, hence a simple counter is enough for them.
|
||||||
*/
|
*/
|
||||||
bool dispatching;
|
uint32_t notify_me;
|
||||||
|
|
||||||
/* lock to protect between bh's adders and deleter */
|
/* lock to protect between bh's adders and deleter */
|
||||||
QemuMutex bh_lock;
|
QemuMutex bh_lock;
|
||||||
@@ -79,7 +99,19 @@ struct AioContext {
|
|||||||
*/
|
*/
|
||||||
int walking_bh;
|
int walking_bh;
|
||||||
|
|
||||||
/* Used for aio_notify. */
|
/* Used by aio_notify.
|
||||||
|
*
|
||||||
|
* "notified" is used to avoid expensive event_notifier_test_and_clear
|
||||||
|
* calls. When it is clear, the EventNotifier is clear, or one thread
|
||||||
|
* is going to clear "notified" before processing more events. False
|
||||||
|
* positives are possible, i.e. "notified" could be set even though the
|
||||||
|
* EventNotifier is clear.
|
||||||
|
*
|
||||||
|
* Note that event_notifier_set *cannot* be optimized the same way. For
|
||||||
|
* more information on the problem that would result, see "#ifdef BUG2"
|
||||||
|
* in the docs/aio_notify_accept.promela formal model.
|
||||||
|
*/
|
||||||
|
bool notified;
|
||||||
EventNotifier notifier;
|
EventNotifier notifier;
|
||||||
|
|
||||||
/* Thread pool for performing work and receiving completion callbacks */
|
/* Thread pool for performing work and receiving completion callbacks */
|
||||||
@@ -89,9 +121,6 @@ struct AioContext {
|
|||||||
QEMUTimerListGroup tlg;
|
QEMUTimerListGroup tlg;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Used internally to synchronize aio_poll against qemu_bh_schedule. */
|
|
||||||
void aio_set_dispatching(AioContext *ctx, bool dispatching);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* aio_context_new: Allocate a new AioContext.
|
* aio_context_new: Allocate a new AioContext.
|
||||||
*
|
*
|
||||||
@@ -156,6 +185,24 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
|
|||||||
*/
|
*/
|
||||||
void aio_notify(AioContext *ctx);
|
void aio_notify(AioContext *ctx);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* aio_notify_accept: Acknowledge receiving an aio_notify.
|
||||||
|
*
|
||||||
|
* aio_notify() uses an EventNotifier in order to wake up a sleeping
|
||||||
|
* aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
|
||||||
|
* usually rare, but the AioContext has to clear the EventNotifier on
|
||||||
|
* every aio_poll() or g_main_context_iteration() in order to avoid
|
||||||
|
* busy waiting. This event_notifier_test_and_clear() cannot be done
|
||||||
|
* using the usual aio_context_set_event_notifier(), because it must
|
||||||
|
* be done before processing all events (file descriptors, bottom halves,
|
||||||
|
* timers).
|
||||||
|
*
|
||||||
|
* aio_notify_accept() is an optimized event_notifier_test_and_clear()
|
||||||
|
* that is specific to an AioContext's notifier; it is used internally
|
||||||
|
* to clear the EventNotifier only if aio_notify() had been called.
|
||||||
|
*/
|
||||||
|
void aio_notify_accept(AioContext *ctx);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* aio_bh_poll: Poll bottom halves for an AioContext.
|
* aio_bh_poll: Poll bottom halves for an AioContext.
|
||||||
*
|
*
|
||||||
|
@@ -143,6 +143,15 @@ static inline uint64_t virtio_ldq_p(VirtIODevice *vdev, const void *ptr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool virtio_needs_swap(VirtIODevice *vdev)
|
||||||
|
{
|
||||||
|
#ifdef HOST_WORDS_BIGENDIAN
|
||||||
|
return virtio_access_is_big_endian(vdev) ? false : true;
|
||||||
|
#else
|
||||||
|
return virtio_access_is_big_endian(vdev) ? true : false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static inline uint16_t virtio_tswap16(VirtIODevice *vdev, uint16_t s)
|
static inline uint16_t virtio_tswap16(VirtIODevice *vdev, uint16_t s)
|
||||||
{
|
{
|
||||||
#ifdef HOST_WORDS_BIGENDIAN
|
#ifdef HOST_WORDS_BIGENDIAN
|
||||||
|
@@ -44,6 +44,7 @@ typedef struct VirtIORNG {
|
|||||||
*/
|
*/
|
||||||
QEMUTimer *rate_limit_timer;
|
QEMUTimer *rate_limit_timer;
|
||||||
int64_t quota_remaining;
|
int64_t quota_remaining;
|
||||||
|
bool activate_timer;
|
||||||
} VirtIORNG;
|
} VirtIORNG;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -5,7 +5,7 @@
|
|||||||
#include "qemu-common.h"
|
#include "qemu-common.h"
|
||||||
#include "qemu/notify.h"
|
#include "qemu/notify.h"
|
||||||
|
|
||||||
#define NSEC_PER_SEC 1000000000LL
|
#define NANOSECONDS_PER_SECOND 1000000000LL
|
||||||
|
|
||||||
/* timers */
|
/* timers */
|
||||||
|
|
||||||
|
44
memory.c
44
memory.c
@@ -1887,23 +1887,16 @@ static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
|
|||||||
sizeof(FlatRange), cmp_flatrange_addr);
|
sizeof(FlatRange), cmp_flatrange_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool memory_region_present(MemoryRegion *container, hwaddr addr)
|
|
||||||
{
|
|
||||||
MemoryRegion *mr = memory_region_find(container, addr, 1).mr;
|
|
||||||
if (!mr || (mr == container)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
memory_region_unref(mr);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool memory_region_is_mapped(MemoryRegion *mr)
|
bool memory_region_is_mapped(MemoryRegion *mr)
|
||||||
{
|
{
|
||||||
return mr->container ? true : false;
|
return mr->container ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryRegionSection memory_region_find(MemoryRegion *mr,
|
/* Same as memory_region_find, but it does not add a reference to the
|
||||||
hwaddr addr, uint64_t size)
|
* returned region. It must be called from an RCU critical section.
|
||||||
|
*/
|
||||||
|
static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
|
||||||
|
hwaddr addr, uint64_t size)
|
||||||
{
|
{
|
||||||
MemoryRegionSection ret = { .mr = NULL };
|
MemoryRegionSection ret = { .mr = NULL };
|
||||||
MemoryRegion *root;
|
MemoryRegion *root;
|
||||||
@@ -1924,11 +1917,10 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr,
|
|||||||
}
|
}
|
||||||
range = addrrange_make(int128_make64(addr), int128_make64(size));
|
range = addrrange_make(int128_make64(addr), int128_make64(size));
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
view = atomic_rcu_read(&as->current_map);
|
view = atomic_rcu_read(&as->current_map);
|
||||||
fr = flatview_lookup(view, range);
|
fr = flatview_lookup(view, range);
|
||||||
if (!fr) {
|
if (!fr) {
|
||||||
goto out;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
|
while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
|
||||||
@@ -1944,12 +1936,32 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr,
|
|||||||
ret.size = range.size;
|
ret.size = range.size;
|
||||||
ret.offset_within_address_space = int128_get64(range.start);
|
ret.offset_within_address_space = int128_get64(range.start);
|
||||||
ret.readonly = fr->readonly;
|
ret.readonly = fr->readonly;
|
||||||
memory_region_ref(ret.mr);
|
return ret;
|
||||||
out:
|
}
|
||||||
|
|
||||||
|
MemoryRegionSection memory_region_find(MemoryRegion *mr,
|
||||||
|
hwaddr addr, uint64_t size)
|
||||||
|
{
|
||||||
|
MemoryRegionSection ret;
|
||||||
|
rcu_read_lock();
|
||||||
|
ret = memory_region_find_rcu(mr, addr, size);
|
||||||
|
if (ret.mr) {
|
||||||
|
memory_region_ref(ret.mr);
|
||||||
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool memory_region_present(MemoryRegion *container, hwaddr addr)
|
||||||
|
{
|
||||||
|
MemoryRegion *mr;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
mr = memory_region_find_rcu(container, addr, 1).mr;
|
||||||
|
rcu_read_unlock();
|
||||||
|
return mr && mr != container;
|
||||||
|
}
|
||||||
|
|
||||||
void address_space_sync_dirty_bitmap(AddressSpace *as)
|
void address_space_sync_dirty_bitmap(AddressSpace *as)
|
||||||
{
|
{
|
||||||
FlatView *view;
|
FlatView *view;
|
||||||
|
@@ -2997,7 +2997,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque)
|
|||||||
(unsigned int)comp->block_idx,
|
(unsigned int)comp->block_idx,
|
||||||
rdma->local_ram_blocks.nb_blocks);
|
rdma->local_ram_blocks.nb_blocks);
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
break;
|
goto out;
|
||||||
}
|
}
|
||||||
block = &(rdma->local_ram_blocks.block[comp->block_idx]);
|
block = &(rdma->local_ram_blocks.block[comp->block_idx]);
|
||||||
|
|
||||||
@@ -3092,7 +3092,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque)
|
|||||||
(unsigned int)reg->current_index,
|
(unsigned int)reg->current_index,
|
||||||
rdma->local_ram_blocks.nb_blocks);
|
rdma->local_ram_blocks.nb_blocks);
|
||||||
ret = -ENOENT;
|
ret = -ENOENT;
|
||||||
break;
|
goto out;
|
||||||
}
|
}
|
||||||
block = &(rdma->local_ram_blocks.block[reg->current_index]);
|
block = &(rdma->local_ram_blocks.block[reg->current_index]);
|
||||||
if (block->is_ram_block) {
|
if (block->is_ram_block) {
|
||||||
@@ -3102,7 +3102,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque)
|
|||||||
block->block_name, block->offset,
|
block->block_name, block->offset,
|
||||||
reg->key.current_addr);
|
reg->key.current_addr);
|
||||||
ret = -ERANGE;
|
ret = -ERANGE;
|
||||||
break;
|
goto out;
|
||||||
}
|
}
|
||||||
host_addr = (block->local_host_addr +
|
host_addr = (block->local_host_addr +
|
||||||
(reg->key.current_addr - block->offset));
|
(reg->key.current_addr - block->offset));
|
||||||
@@ -3118,7 +3118,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque)
|
|||||||
" chunk: %" PRIx64,
|
" chunk: %" PRIx64,
|
||||||
block->block_name, reg->key.chunk);
|
block->block_name, reg->key.chunk);
|
||||||
ret = -ERANGE;
|
ret = -ERANGE;
|
||||||
break;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
chunk_start = ram_chunk_start(block, chunk);
|
chunk_start = ram_chunk_start(block, chunk);
|
||||||
|
19
net/net.c
19
net/net.c
@@ -1257,14 +1257,19 @@ void qmp_set_link(const char *name, bool up, Error **errp)
|
|||||||
static void net_vm_change_state_handler(void *opaque, int running,
|
static void net_vm_change_state_handler(void *opaque, int running,
|
||||||
RunState state)
|
RunState state)
|
||||||
{
|
{
|
||||||
/* Complete all queued packets, to guarantee we don't modify
|
NetClientState *nc;
|
||||||
* state later when VM is not running.
|
NetClientState *tmp;
|
||||||
*/
|
|
||||||
if (!running) {
|
|
||||||
NetClientState *nc;
|
|
||||||
NetClientState *tmp;
|
|
||||||
|
|
||||||
QTAILQ_FOREACH_SAFE(nc, &net_clients, next, tmp) {
|
QTAILQ_FOREACH_SAFE(nc, &net_clients, next, tmp) {
|
||||||
|
if (running) {
|
||||||
|
/* Flush queued packets and wake up backends. */
|
||||||
|
if (nc->peer && qemu_can_send_packet(nc)) {
|
||||||
|
qemu_flush_queued_packets(nc->peer);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Complete all queued packets, to guarantee we don't modify
|
||||||
|
* state later when VM is not running.
|
||||||
|
*/
|
||||||
qemu_flush_or_purge_queued_packets(nc, true);
|
qemu_flush_or_purge_queued_packets(nc, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -213,7 +213,7 @@ static void net_socket_send(void *opaque)
|
|||||||
if (s->index >= s->packet_len) {
|
if (s->index >= s->packet_len) {
|
||||||
s->index = 0;
|
s->index = 0;
|
||||||
s->state = 0;
|
s->state = 0;
|
||||||
if (qemu_send_packet_async(&s->nc, s->buf, size,
|
if (qemu_send_packet_async(&s->nc, s->buf, s->packet_len,
|
||||||
net_socket_send_completed) == 0) {
|
net_socket_send_completed) == 0) {
|
||||||
net_socket_read_poll(s, false);
|
net_socket_read_poll(s, false);
|
||||||
break;
|
break;
|
||||||
|
@@ -120,39 +120,35 @@ static void net_vhost_user_event(void *opaque, int event)
|
|||||||
case CHR_EVENT_OPENED:
|
case CHR_EVENT_OPENED:
|
||||||
vhost_user_start(s);
|
vhost_user_start(s);
|
||||||
net_vhost_link_down(s, false);
|
net_vhost_link_down(s, false);
|
||||||
error_report("chardev \"%s\" went up", s->nc.info_str);
|
error_report("chardev \"%s\" went up", s->chr->label);
|
||||||
break;
|
break;
|
||||||
case CHR_EVENT_CLOSED:
|
case CHR_EVENT_CLOSED:
|
||||||
net_vhost_link_down(s, true);
|
net_vhost_link_down(s, true);
|
||||||
vhost_user_stop(s);
|
vhost_user_stop(s);
|
||||||
error_report("chardev \"%s\" went down", s->nc.info_str);
|
error_report("chardev \"%s\" went down", s->chr->label);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int net_vhost_user_init(NetClientState *peer, const char *device,
|
static int net_vhost_user_init(NetClientState *peer, const char *device,
|
||||||
const char *name, CharDriverState *chr,
|
const char *name, CharDriverState *chr)
|
||||||
uint32_t queues)
|
|
||||||
{
|
{
|
||||||
NetClientState *nc;
|
NetClientState *nc;
|
||||||
VhostUserState *s;
|
VhostUserState *s;
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < queues; i++) {
|
nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
|
||||||
nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
|
|
||||||
|
|
||||||
snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
|
snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
|
||||||
i, chr->label);
|
chr->label);
|
||||||
|
|
||||||
s = DO_UPCAST(VhostUserState, nc, nc);
|
s = DO_UPCAST(VhostUserState, nc, nc);
|
||||||
|
|
||||||
/* We don't provide a receive callback */
|
/* We don't provide a receive callback */
|
||||||
s->nc.receive_disabled = 1;
|
s->nc.receive_disabled = 1;
|
||||||
s->chr = chr;
|
s->chr = chr;
|
||||||
s->nc.queue_index = i;
|
|
||||||
|
qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
|
||||||
|
|
||||||
qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -230,7 +226,6 @@ static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp)
|
|||||||
int net_init_vhost_user(const NetClientOptions *opts, const char *name,
|
int net_init_vhost_user(const NetClientOptions *opts, const char *name,
|
||||||
NetClientState *peer, Error **errp)
|
NetClientState *peer, Error **errp)
|
||||||
{
|
{
|
||||||
uint32_t queues;
|
|
||||||
const NetdevVhostUserOptions *vhost_user_opts;
|
const NetdevVhostUserOptions *vhost_user_opts;
|
||||||
CharDriverState *chr;
|
CharDriverState *chr;
|
||||||
|
|
||||||
@@ -248,12 +243,6 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* number of queues for multiqueue */
|
|
||||||
if (vhost_user_opts->has_queues) {
|
|
||||||
queues = vhost_user_opts->queues;
|
|
||||||
} else {
|
|
||||||
queues = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return net_vhost_user_init(peer, "vhost_user", name, chr, queues);
|
return net_vhost_user_init(peer, "vhost_user", name, chr);
|
||||||
}
|
}
|
||||||
|
@@ -2466,16 +2466,12 @@
|
|||||||
#
|
#
|
||||||
# @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
|
# @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
|
||||||
#
|
#
|
||||||
# @queues: #optional number of queues to be created for multiqueue vhost-user
|
|
||||||
# (default: 1) (Since 2.4)
|
|
||||||
#
|
|
||||||
# Since 2.1
|
# Since 2.1
|
||||||
##
|
##
|
||||||
{ 'struct': 'NetdevVhostUserOptions',
|
{ 'struct': 'NetdevVhostUserOptions',
|
||||||
'data': {
|
'data': {
|
||||||
'chardev': 'str',
|
'chardev': 'str',
|
||||||
'*vhostforce': 'bool',
|
'*vhostforce': 'bool' } }
|
||||||
'*queues': 'uint32' } }
|
|
||||||
|
|
||||||
##
|
##
|
||||||
# @NetClientOptions
|
# @NetClientOptions
|
||||||
|
@@ -1963,14 +1963,13 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
|
|||||||
netdev. @code{-net} and @code{-device} with parameter @option{vlan} create the
|
netdev. @code{-net} and @code{-device} with parameter @option{vlan} create the
|
||||||
required hub automatically.
|
required hub automatically.
|
||||||
|
|
||||||
@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
|
@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
|
||||||
|
|
||||||
Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
|
Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
|
||||||
be a unix domain socket backed one. The vhost-user uses a specifically defined
|
be a unix domain socket backed one. The vhost-user uses a specifically defined
|
||||||
protocol to pass vhost ioctl replacement messages to an application on the other
|
protocol to pass vhost ioctl replacement messages to an application on the other
|
||||||
end of the socket. On non-MSIX guests, the feature can be forced with
|
end of the socket. On non-MSIX guests, the feature can be forced with
|
||||||
@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
|
@var{vhostforce}.
|
||||||
be created for multiqueue vhost-user.
|
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
@example
|
@example
|
||||||
|
@@ -99,7 +99,7 @@ QEMUTimerList *timerlist_new(QEMUClockType type,
|
|||||||
QEMUClock *clock = qemu_clock_ptr(type);
|
QEMUClock *clock = qemu_clock_ptr(type);
|
||||||
|
|
||||||
timer_list = g_malloc0(sizeof(QEMUTimerList));
|
timer_list = g_malloc0(sizeof(QEMUTimerList));
|
||||||
qemu_event_init(&timer_list->timers_done_ev, false);
|
qemu_event_init(&timer_list->timers_done_ev, true);
|
||||||
timer_list->clock = clock;
|
timer_list->clock = clock;
|
||||||
timer_list->notify_cb = cb;
|
timer_list->notify_cb = cb;
|
||||||
timer_list->notify_opaque = opaque;
|
timer_list->notify_opaque = opaque;
|
||||||
|
@@ -714,7 +714,7 @@
|
|||||||
# @virtual: Win virtual bus type
|
# @virtual: Win virtual bus type
|
||||||
# @file-backed virtual: Win file-backed bus type
|
# @file-backed virtual: Win file-backed bus type
|
||||||
#
|
#
|
||||||
# Since: 2.2
|
# Since: 2.2; 'Unknown' and all entries below since 2.4
|
||||||
##
|
##
|
||||||
{ 'enum': 'GuestDiskBusType',
|
{ 'enum': 'GuestDiskBusType',
|
||||||
'data': [ 'ide', 'fdc', 'scsi', 'virtio', 'xen', 'usb', 'uml', 'sata',
|
'data': [ 'ide', 'fdc', 'scsi', 'virtio', 'xen', 'usb', 'uml', 'sata',
|
||||||
|
@@ -17,7 +17,7 @@ bool write_kvmstate_to_list(ARMCPU *cpu)
|
|||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool write_list_to_kvmstate(ARMCPU *cpu)
|
bool write_list_to_kvmstate(ARMCPU *cpu, int level)
|
||||||
{
|
{
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
@@ -409,7 +409,7 @@ bool write_kvmstate_to_list(ARMCPU *cpu)
|
|||||||
return ok;
|
return ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool write_list_to_kvmstate(ARMCPU *cpu)
|
bool write_list_to_kvmstate(ARMCPU *cpu, int level)
|
||||||
{
|
{
|
||||||
CPUState *cs = CPU(cpu);
|
CPUState *cs = CPU(cpu);
|
||||||
int i;
|
int i;
|
||||||
@@ -421,6 +421,10 @@ bool write_list_to_kvmstate(ARMCPU *cpu)
|
|||||||
uint32_t v32;
|
uint32_t v32;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (kvm_arm_cpreg_level(regidx) > level) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
r.id = regidx;
|
r.id = regidx;
|
||||||
switch (regidx & KVM_REG_SIZE_MASK) {
|
switch (regidx & KVM_REG_SIZE_MASK) {
|
||||||
case KVM_REG_SIZE_U32:
|
case KVM_REG_SIZE_U32:
|
||||||
|
@@ -153,6 +153,34 @@ bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef struct CPRegStateLevel {
|
||||||
|
uint64_t regidx;
|
||||||
|
int level;
|
||||||
|
} CPRegStateLevel;
|
||||||
|
|
||||||
|
/* All coprocessor registers not listed in the following table are assumed to
|
||||||
|
* be of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
|
||||||
|
* often, you must add it to this table with a state of either
|
||||||
|
* KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
|
||||||
|
*/
|
||||||
|
static const CPRegStateLevel non_runtime_cpregs[] = {
|
||||||
|
{ KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
|
||||||
|
};
|
||||||
|
|
||||||
|
int kvm_arm_cpreg_level(uint64_t regidx)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
|
||||||
|
const CPRegStateLevel *l = &non_runtime_cpregs[i];
|
||||||
|
if (l->regidx == regidx) {
|
||||||
|
return l->level;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return KVM_PUT_RUNTIME_STATE;
|
||||||
|
}
|
||||||
|
|
||||||
#define ARM_MPIDR_HWID_BITMASK 0xFFFFFF
|
#define ARM_MPIDR_HWID_BITMASK 0xFFFFFF
|
||||||
#define ARM_CPU_ID_MPIDR 0, 0, 0, 5
|
#define ARM_CPU_ID_MPIDR 0, 0, 0, 5
|
||||||
|
|
||||||
@@ -367,7 +395,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
|||||||
* managed to update the CPUARMState with, and only allowing those
|
* managed to update the CPUARMState with, and only allowing those
|
||||||
* to be written back up into the kernel).
|
* to be written back up into the kernel).
|
||||||
*/
|
*/
|
||||||
if (!write_list_to_kvmstate(cpu)) {
|
if (!write_list_to_kvmstate(cpu, level)) {
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -139,6 +139,34 @@ bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef struct CPRegStateLevel {
|
||||||
|
uint64_t regidx;
|
||||||
|
int level;
|
||||||
|
} CPRegStateLevel;
|
||||||
|
|
||||||
|
/* All system registers not listed in the following table are assumed to be
|
||||||
|
* of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
|
||||||
|
* often, you must add it to this table with a state of either
|
||||||
|
* KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
|
||||||
|
*/
|
||||||
|
static const CPRegStateLevel non_runtime_cpregs[] = {
|
||||||
|
{ KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
|
||||||
|
};
|
||||||
|
|
||||||
|
int kvm_arm_cpreg_level(uint64_t regidx)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
|
||||||
|
const CPRegStateLevel *l = &non_runtime_cpregs[i];
|
||||||
|
if (l->regidx == regidx) {
|
||||||
|
return l->level;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return KVM_PUT_RUNTIME_STATE;
|
||||||
|
}
|
||||||
|
|
||||||
#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
|
#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
|
||||||
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
|
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
|
||||||
|
|
||||||
@@ -280,7 +308,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!write_list_to_kvmstate(cpu)) {
|
if (!write_list_to_kvmstate(cpu, level)) {
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -68,9 +68,19 @@ int kvm_arm_init_cpreg_list(ARMCPU *cpu);
|
|||||||
*/
|
*/
|
||||||
bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx);
|
bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kvm_arm_cpreg_level
|
||||||
|
* regidx: KVM register index
|
||||||
|
*
|
||||||
|
* Return the level of this coprocessor/system register. Return value is
|
||||||
|
* either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE.
|
||||||
|
*/
|
||||||
|
int kvm_arm_cpreg_level(uint64_t regidx);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* write_list_to_kvmstate:
|
* write_list_to_kvmstate:
|
||||||
* @cpu: ARMCPU
|
* @cpu: ARMCPU
|
||||||
|
* @level: the state level to sync
|
||||||
*
|
*
|
||||||
* For each register listed in the ARMCPU cpreg_indexes list, write
|
* For each register listed in the ARMCPU cpreg_indexes list, write
|
||||||
* its value from the cpreg_values list into the kernel (via ioctl).
|
* its value from the cpreg_values list into the kernel (via ioctl).
|
||||||
@@ -83,7 +93,7 @@ bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx);
|
|||||||
* Note that we do not stop early on failure -- we will attempt
|
* Note that we do not stop early on failure -- we will attempt
|
||||||
* writing all registers in the list.
|
* writing all registers in the list.
|
||||||
*/
|
*/
|
||||||
bool write_list_to_kvmstate(ARMCPU *cpu);
|
bool write_list_to_kvmstate(ARMCPU *cpu, int level);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* write_kvmstate_to_list:
|
* write_kvmstate_to_list:
|
||||||
|
@@ -251,7 +251,7 @@ static int cpu_post_load(void *opaque, int version_id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (kvm_enabled()) {
|
if (kvm_enabled()) {
|
||||||
if (!write_list_to_kvmstate(cpu)) {
|
if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
/* Note that it's OK for the TCG side not to know about
|
/* Note that it's OK for the TCG side not to know about
|
||||||
|
@@ -235,10 +235,9 @@ int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
|
|||||||
static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
|
static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
|
||||||
int32_t *addr)
|
int32_t *addr)
|
||||||
{
|
{
|
||||||
uint64_t val64 = *addr;
|
|
||||||
struct kvm_one_reg cp0reg = {
|
struct kvm_one_reg cp0reg = {
|
||||||
.id = reg_id,
|
.id = reg_id,
|
||||||
.addr = (uintptr_t)&val64
|
.addr = (uintptr_t)addr
|
||||||
};
|
};
|
||||||
|
|
||||||
return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
|
return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
|
||||||
@@ -270,18 +269,12 @@ static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
|
|||||||
static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
|
static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
|
||||||
int32_t *addr)
|
int32_t *addr)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
uint64_t val64 = 0;
|
|
||||||
struct kvm_one_reg cp0reg = {
|
struct kvm_one_reg cp0reg = {
|
||||||
.id = reg_id,
|
.id = reg_id,
|
||||||
.addr = (uintptr_t)&val64
|
.addr = (uintptr_t)addr
|
||||||
};
|
};
|
||||||
|
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
|
return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
|
||||||
if (ret >= 0) {
|
|
||||||
*addr = val64;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64 reg_id,
|
static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64 reg_id,
|
||||||
@@ -635,12 +628,12 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
|||||||
|
|
||||||
/* Set the registers based on QEMU's view of things */
|
/* Set the registers based on QEMU's view of things */
|
||||||
for (i = 0; i < 32; i++) {
|
for (i = 0; i < 32; i++) {
|
||||||
regs.gpr[i] = env->active_tc.gpr[i];
|
regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
regs.hi = env->active_tc.HI[0];
|
regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
|
||||||
regs.lo = env->active_tc.LO[0];
|
regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
|
||||||
regs.pc = env->active_tc.PC;
|
regs.pc = (int64_t)(target_long)env->active_tc.PC;
|
||||||
|
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
|
||||||
|
|
||||||
|
@@ -478,6 +478,7 @@ $(patsubst %, check-%, $(check-unit-y)): check-%: %
|
|||||||
|
|
||||||
$(patsubst %, check-report-qtest-%.xml, $(QTEST_TARGETS)): check-report-qtest-%.xml: $(check-qtest-y)
|
$(patsubst %, check-report-qtest-%.xml, $(QTEST_TARGETS)): check-report-qtest-%.xml: $(check-qtest-y)
|
||||||
$(call quiet-command,QTEST_QEMU_BINARY=$*-softmmu/qemu-system-$* \
|
$(call quiet-command,QTEST_QEMU_BINARY=$*-softmmu/qemu-system-$* \
|
||||||
|
QTEST_QEMU_IMG=qemu-img$(EXESUF) \
|
||||||
gtester -q $(GTESTER_OPTIONS) -o $@ -m=$(SPEED) $(check-qtest-$*-y),"GTESTER $@")
|
gtester -q $(GTESTER_OPTIONS) -o $@ -m=$(SPEED) $(check-qtest-$*-y),"GTESTER $@")
|
||||||
|
|
||||||
check-report-unit.xml: $(check-unit-y)
|
check-report-unit.xml: $(check-unit-y)
|
||||||
|
@@ -339,6 +339,31 @@ static void test_bmdma_short_prdt(void)
|
|||||||
assert_bit_clear(inb(IDE_BASE + reg_status), DF | ERR);
|
assert_bit_clear(inb(IDE_BASE + reg_status), DF | ERR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void test_bmdma_one_sector_short_prdt(void)
|
||||||
|
{
|
||||||
|
uint8_t status;
|
||||||
|
|
||||||
|
/* Read 2 sectors but only give 1 sector in PRDT */
|
||||||
|
PrdtEntry prdt[] = {
|
||||||
|
{
|
||||||
|
.addr = 0,
|
||||||
|
.size = cpu_to_le32(0x200 | PRDT_EOT),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Normal request */
|
||||||
|
status = send_dma_request(CMD_READ_DMA, 0, 2,
|
||||||
|
prdt, ARRAY_SIZE(prdt));
|
||||||
|
g_assert_cmphex(status, ==, 0);
|
||||||
|
assert_bit_clear(inb(IDE_BASE + reg_status), DF | ERR);
|
||||||
|
|
||||||
|
/* Abort the request before it completes */
|
||||||
|
status = send_dma_request(CMD_READ_DMA | CMDF_ABORT, 0, 2,
|
||||||
|
prdt, ARRAY_SIZE(prdt));
|
||||||
|
g_assert_cmphex(status, ==, 0);
|
||||||
|
assert_bit_clear(inb(IDE_BASE + reg_status), DF | ERR);
|
||||||
|
}
|
||||||
|
|
||||||
static void test_bmdma_long_prdt(void)
|
static void test_bmdma_long_prdt(void)
|
||||||
{
|
{
|
||||||
uint8_t status;
|
uint8_t status;
|
||||||
@@ -592,6 +617,8 @@ int main(int argc, char **argv)
|
|||||||
qtest_add_func("/ide/bmdma/setup", test_bmdma_setup);
|
qtest_add_func("/ide/bmdma/setup", test_bmdma_setup);
|
||||||
qtest_add_func("/ide/bmdma/simple_rw", test_bmdma_simple_rw);
|
qtest_add_func("/ide/bmdma/simple_rw", test_bmdma_simple_rw);
|
||||||
qtest_add_func("/ide/bmdma/short_prdt", test_bmdma_short_prdt);
|
qtest_add_func("/ide/bmdma/short_prdt", test_bmdma_short_prdt);
|
||||||
|
qtest_add_func("/ide/bmdma/one_sector_short_prdt",
|
||||||
|
test_bmdma_one_sector_short_prdt);
|
||||||
qtest_add_func("/ide/bmdma/long_prdt", test_bmdma_long_prdt);
|
qtest_add_func("/ide/bmdma/long_prdt", test_bmdma_long_prdt);
|
||||||
qtest_add_func("/ide/bmdma/no_busmaster", test_bmdma_no_busmaster);
|
qtest_add_func("/ide/bmdma/no_busmaster", test_bmdma_no_busmaster);
|
||||||
qtest_add_func("/ide/bmdma/teardown", test_bmdma_teardown);
|
qtest_add_func("/ide/bmdma/teardown", test_bmdma_teardown);
|
||||||
|
@@ -86,7 +86,7 @@ static void test_timer(void)
|
|||||||
fatal("time too big %u\n", curr);
|
fatal("time too big %u\n", curr);
|
||||||
}
|
}
|
||||||
for (cnt = 0; ; ) {
|
for (cnt = 0; ; ) {
|
||||||
clock_step(1 * NSEC_PER_SEC);
|
clock_step(1 * NANOSECONDS_PER_SECOND);
|
||||||
prev = curr;
|
prev = curr;
|
||||||
curr = in_Timer();
|
curr = in_Timer();
|
||||||
|
|
||||||
@@ -125,7 +125,7 @@ static void test_timer(void)
|
|||||||
out_IntrStatus(0x4000);
|
out_IntrStatus(0x4000);
|
||||||
curr = in_Timer();
|
curr = in_Timer();
|
||||||
out_TimerInt(curr + 0.5 * CLK);
|
out_TimerInt(curr + 0.5 * CLK);
|
||||||
clock_step(1 * NSEC_PER_SEC);
|
clock_step(1 * NANOSECONDS_PER_SECOND);
|
||||||
out_Timer(0);
|
out_Timer(0);
|
||||||
if ((in_IntrStatus() & 0x4000) == 0) {
|
if ((in_IntrStatus() & 0x4000) == 0) {
|
||||||
fatal("we should have an interrupt here!\n");
|
fatal("we should have an interrupt here!\n");
|
||||||
@@ -137,7 +137,7 @@ static void test_timer(void)
|
|||||||
out_IntrStatus(0x4000);
|
out_IntrStatus(0x4000);
|
||||||
curr = in_Timer();
|
curr = in_Timer();
|
||||||
out_TimerInt(curr + 0.5 * CLK);
|
out_TimerInt(curr + 0.5 * CLK);
|
||||||
clock_step(1 * NSEC_PER_SEC);
|
clock_step(1 * NANOSECONDS_PER_SECOND);
|
||||||
out_TimerInt(0);
|
out_TimerInt(0);
|
||||||
if ((in_IntrStatus() & 0x4000) == 0) {
|
if ((in_IntrStatus() & 0x4000) == 0) {
|
||||||
fatal("we should have an interrupt here!\n");
|
fatal("we should have an interrupt here!\n");
|
||||||
@@ -148,7 +148,7 @@ static void test_timer(void)
|
|||||||
next = curr + 5.0 * CLK;
|
next = curr + 5.0 * CLK;
|
||||||
out_TimerInt(next);
|
out_TimerInt(next);
|
||||||
for (cnt = 0; ; ) {
|
for (cnt = 0; ; ) {
|
||||||
clock_step(1 * NSEC_PER_SEC);
|
clock_step(1 * NANOSECONDS_PER_SECOND);
|
||||||
prev = curr;
|
prev = curr;
|
||||||
curr = in_Timer();
|
curr = in_Timer();
|
||||||
diff = (curr-prev) & 0xffffffffu;
|
diff = (curr-prev) & 0xffffffffu;
|
||||||
|
@@ -97,14 +97,6 @@ static void event_ready_cb(EventNotifier *e)
|
|||||||
|
|
||||||
/* Tests using aio_*. */
|
/* Tests using aio_*. */
|
||||||
|
|
||||||
static void test_notify(void)
|
|
||||||
{
|
|
||||||
g_assert(!aio_poll(ctx, false));
|
|
||||||
aio_notify(ctx);
|
|
||||||
g_assert(!aio_poll(ctx, true));
|
|
||||||
g_assert(!aio_poll(ctx, false));
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
QemuMutex start_lock;
|
QemuMutex start_lock;
|
||||||
bool thread_acquired;
|
bool thread_acquired;
|
||||||
@@ -331,7 +323,7 @@ static void test_wait_event_notifier(void)
|
|||||||
EventNotifierTestData data = { .n = 0, .active = 1 };
|
EventNotifierTestData data = { .n = 0, .active = 1 };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
g_assert(!aio_poll(ctx, false));
|
while (aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 1);
|
g_assert_cmpint(data.active, ==, 1);
|
||||||
|
|
||||||
@@ -356,7 +348,7 @@ static void test_flush_event_notifier(void)
|
|||||||
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
g_assert(!aio_poll(ctx, false));
|
while (aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 10);
|
g_assert_cmpint(data.active, ==, 10);
|
||||||
|
|
||||||
@@ -494,14 +486,6 @@ static void test_timer_schedule(void)
|
|||||||
* works well, and that's what I am using.
|
* works well, and that's what I am using.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void test_source_notify(void)
|
|
||||||
{
|
|
||||||
while (g_main_context_iteration(NULL, false));
|
|
||||||
aio_notify(ctx);
|
|
||||||
g_assert(g_main_context_iteration(NULL, true));
|
|
||||||
g_assert(!g_main_context_iteration(NULL, false));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void test_source_flush(void)
|
static void test_source_flush(void)
|
||||||
{
|
{
|
||||||
g_assert(!g_main_context_iteration(NULL, false));
|
g_assert(!g_main_context_iteration(NULL, false));
|
||||||
@@ -669,7 +653,7 @@ static void test_source_wait_event_notifier(void)
|
|||||||
EventNotifierTestData data = { .n = 0, .active = 1 };
|
EventNotifierTestData data = { .n = 0, .active = 1 };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
g_assert(g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 1);
|
g_assert_cmpint(data.active, ==, 1);
|
||||||
|
|
||||||
@@ -694,7 +678,7 @@ static void test_source_flush_event_notifier(void)
|
|||||||
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
g_assert(g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 10);
|
g_assert_cmpint(data.active, ==, 10);
|
||||||
|
|
||||||
@@ -830,7 +814,6 @@ int main(int argc, char **argv)
|
|||||||
while (g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
|
|
||||||
g_test_init(&argc, &argv, NULL);
|
g_test_init(&argc, &argv, NULL);
|
||||||
g_test_add_func("/aio/notify", test_notify);
|
|
||||||
g_test_add_func("/aio/acquire", test_acquire);
|
g_test_add_func("/aio/acquire", test_acquire);
|
||||||
g_test_add_func("/aio/bh/schedule", test_bh_schedule);
|
g_test_add_func("/aio/bh/schedule", test_bh_schedule);
|
||||||
g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
|
g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
|
||||||
@@ -845,7 +828,6 @@ int main(int argc, char **argv)
|
|||||||
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
|
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
|
||||||
g_test_add_func("/aio/timer/schedule", test_timer_schedule);
|
g_test_add_func("/aio/timer/schedule", test_timer_schedule);
|
||||||
|
|
||||||
g_test_add_func("/aio-gsource/notify", test_source_notify);
|
|
||||||
g_test_add_func("/aio-gsource/flush", test_source_flush);
|
g_test_add_func("/aio-gsource/flush", test_source_flush);
|
||||||
g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
|
g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
|
||||||
g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
|
g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
|
||||||
|
@@ -40,19 +40,19 @@ static void test_leak_bucket(void)
|
|||||||
bkt.level = 1.5;
|
bkt.level = 1.5;
|
||||||
|
|
||||||
/* leak an op work of time */
|
/* leak an op work of time */
|
||||||
throttle_leak_bucket(&bkt, NSEC_PER_SEC / 150);
|
throttle_leak_bucket(&bkt, NANOSECONDS_PER_SECOND / 150);
|
||||||
g_assert(bkt.avg == 150);
|
g_assert(bkt.avg == 150);
|
||||||
g_assert(bkt.max == 15);
|
g_assert(bkt.max == 15);
|
||||||
g_assert(double_cmp(bkt.level, 0.5));
|
g_assert(double_cmp(bkt.level, 0.5));
|
||||||
|
|
||||||
/* leak again emptying the bucket */
|
/* leak again emptying the bucket */
|
||||||
throttle_leak_bucket(&bkt, NSEC_PER_SEC / 150);
|
throttle_leak_bucket(&bkt, NANOSECONDS_PER_SECOND / 150);
|
||||||
g_assert(bkt.avg == 150);
|
g_assert(bkt.avg == 150);
|
||||||
g_assert(bkt.max == 15);
|
g_assert(bkt.max == 15);
|
||||||
g_assert(double_cmp(bkt.level, 0));
|
g_assert(double_cmp(bkt.level, 0));
|
||||||
|
|
||||||
/* check that the bucket level won't go lower */
|
/* check that the bucket level won't go lower */
|
||||||
throttle_leak_bucket(&bkt, NSEC_PER_SEC / 150);
|
throttle_leak_bucket(&bkt, NANOSECONDS_PER_SECOND / 150);
|
||||||
g_assert(bkt.avg == 150);
|
g_assert(bkt.avg == 150);
|
||||||
g_assert(bkt.max == 15);
|
g_assert(bkt.max == 15);
|
||||||
g_assert(double_cmp(bkt.level, 0));
|
g_assert(double_cmp(bkt.level, 0));
|
||||||
@@ -90,7 +90,7 @@ static void test_compute_wait(void)
|
|||||||
bkt.level = 15.5;
|
bkt.level = 15.5;
|
||||||
wait = throttle_compute_wait(&bkt);
|
wait = throttle_compute_wait(&bkt);
|
||||||
/* time required to do half an operation */
|
/* time required to do half an operation */
|
||||||
result = (int64_t) NSEC_PER_SEC / 150 / 2;
|
result = (int64_t) NANOSECONDS_PER_SECOND / 150 / 2;
|
||||||
g_assert(wait == result);
|
g_assert(wait == result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -40,29 +40,29 @@ static QDict *qmp_get_event(const char *name)
|
|||||||
|
|
||||||
static QDict *ib700_program_and_wait(QTestState *s)
|
static QDict *ib700_program_and_wait(QTestState *s)
|
||||||
{
|
{
|
||||||
clock_step(NSEC_PER_SEC * 40);
|
clock_step(NANOSECONDS_PER_SECOND * 40);
|
||||||
qmp_check_no_event();
|
qmp_check_no_event();
|
||||||
|
|
||||||
/* 2 second limit */
|
/* 2 second limit */
|
||||||
outb(0x443, 14);
|
outb(0x443, 14);
|
||||||
|
|
||||||
/* Ping */
|
/* Ping */
|
||||||
clock_step(NSEC_PER_SEC);
|
clock_step(NANOSECONDS_PER_SECOND);
|
||||||
qmp_check_no_event();
|
qmp_check_no_event();
|
||||||
outb(0x443, 14);
|
outb(0x443, 14);
|
||||||
|
|
||||||
/* Disable */
|
/* Disable */
|
||||||
clock_step(NSEC_PER_SEC);
|
clock_step(NANOSECONDS_PER_SECOND);
|
||||||
qmp_check_no_event();
|
qmp_check_no_event();
|
||||||
outb(0x441, 1);
|
outb(0x441, 1);
|
||||||
clock_step(3 * NSEC_PER_SEC);
|
clock_step(3 * NANOSECONDS_PER_SECOND);
|
||||||
qmp_check_no_event();
|
qmp_check_no_event();
|
||||||
|
|
||||||
/* Enable and let it fire */
|
/* Enable and let it fire */
|
||||||
outb(0x443, 13);
|
outb(0x443, 13);
|
||||||
clock_step(3 * NSEC_PER_SEC);
|
clock_step(3 * NANOSECONDS_PER_SECOND);
|
||||||
qmp_check_no_event();
|
qmp_check_no_event();
|
||||||
clock_step(2 * NSEC_PER_SEC);
|
clock_step(2 * NANOSECONDS_PER_SECOND);
|
||||||
return qmp_get_event("WATCHDOG");
|
return qmp_get_event("WATCHDOG");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -36,7 +36,7 @@ void throttle_leak_bucket(LeakyBucket *bkt, int64_t delta_ns)
|
|||||||
double leak;
|
double leak;
|
||||||
|
|
||||||
/* compute how much to leak */
|
/* compute how much to leak */
|
||||||
leak = (bkt->avg * (double) delta_ns) / NSEC_PER_SEC;
|
leak = (bkt->avg * (double) delta_ns) / NANOSECONDS_PER_SECOND;
|
||||||
|
|
||||||
/* make the bucket leak */
|
/* make the bucket leak */
|
||||||
bkt->level = MAX(bkt->level - leak, 0);
|
bkt->level = MAX(bkt->level - leak, 0);
|
||||||
@@ -72,7 +72,7 @@ static void throttle_do_leak(ThrottleState *ts, int64_t now)
|
|||||||
*/
|
*/
|
||||||
static int64_t throttle_do_compute_wait(double limit, double extra)
|
static int64_t throttle_do_compute_wait(double limit, double extra)
|
||||||
{
|
{
|
||||||
double wait = extra * NSEC_PER_SEC;
|
double wait = extra * NANOSECONDS_PER_SECOND;
|
||||||
wait /= limit;
|
wait /= limit;
|
||||||
return wait;
|
return wait;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user