Compare commits
129 Commits
v2.8.0-rc4
...
pull-ui-20
Author | SHA1 | Date | |
---|---|---|---|
|
e934644126 | ||
|
f27ff81070 | ||
|
97efe4f961 | ||
|
c952b71582 | ||
|
6250dff39a | ||
|
d825367172 | ||
|
f29b3431f6 | ||
|
b3cb21b9b5 | ||
|
a8ffb372a2 | ||
|
3d4da9d6f3 | ||
|
e92fbc753d | ||
|
9c904a7581 | ||
|
12597061b3 | ||
|
5e5db49953 | ||
|
82a4118694 | ||
|
a7c8215e3b | ||
|
684e508c23 | ||
|
aff8fd18f1 | ||
|
23425cc2b7 | ||
|
9ef9d40261 | ||
|
0d9d86fb4d | ||
|
ee68697551 | ||
|
0062ea0fd6 | ||
|
4a1cba3802 | ||
|
f6a51c84cd | ||
|
721671ade7 | ||
|
0891ee1112 | ||
|
a37c07022b | ||
|
ba0d10378c | ||
|
04b88c84a1 | ||
|
5c3df1f096 | ||
|
6cc9906b4c | ||
|
baecbde6d7 | ||
|
1211d81b17 | ||
|
d5ebc8272b | ||
|
f2b58c4375 | ||
|
88da0b0301 | ||
|
bcb8998fac | ||
|
ea83441cc4 | ||
|
583f21f8b9 | ||
|
204f01b309 | ||
|
33243031da | ||
|
b8e23926c5 | ||
|
dbe2b65566 | ||
|
2b5e217067 | ||
|
0194cf31cf | ||
|
367790cce8 | ||
|
72d2e4b6a4 | ||
|
7b542eb96d | ||
|
14f944063a | ||
|
fb5543d820 | ||
|
0ccb9c1d81 | ||
|
8be95defd6 | ||
|
817af1c72d | ||
|
f84aab269d | ||
|
8a1e52b69d | ||
|
0f72559fbc | ||
|
91db4642f8 | ||
|
9e41bade85 | ||
|
8e953a658f | ||
|
26d5df9578 | ||
|
0584d3c33f | ||
|
c491e1521f | ||
|
bd407a21a9 | ||
|
6efbac908f | ||
|
74af4eec29 | ||
|
ef17f83661 | ||
|
6a0e947b12 | ||
|
bd673bd8ab | ||
|
4c3386f421 | ||
|
2d105bd6b5 | ||
|
e03192fd62 | ||
|
e353aac51b | ||
|
0bfa02595a | ||
|
e971fa0422 | ||
|
c9b61d9aa1 | ||
|
92204403ef | ||
|
0a97c40f8e | ||
|
416d72b97b | ||
|
0f1944735b | ||
|
2494c9f640 | ||
|
450aaae863 | ||
|
e5fdf663cf | ||
|
e45d4ef6e3 | ||
|
65839b56b9 | ||
|
a470b33259 | ||
|
c76904ef2f | ||
|
6c7c3c21f9 | ||
|
c52ab08aee | ||
|
6053a86fe7 | ||
|
bc20403598 | ||
|
166dbda7e1 | ||
|
8929fc3a55 | ||
|
96a3d39277 | ||
|
e3592bc9d8 | ||
|
feddd2fd91 | ||
|
272f042877 | ||
|
be232eb076 | ||
|
638cbd452d | ||
|
722f8d9099 | ||
|
11717bc93a | ||
|
45241cf9d7 | ||
|
8caa05d889 | ||
|
e7a9f35321 | ||
|
c17a18ef30 | ||
|
a273f4cedf | ||
|
5ffb350541 | ||
|
eb7a20a361 | ||
|
1f4e496e1f | ||
|
715c31ec8e | ||
|
0ce265ffef | ||
|
2651efe7f5 | ||
|
225adf16d2 | ||
|
4fd460bf25 | ||
|
f4d7674722 | ||
|
e11680524a | ||
|
5ce9cfe737 | ||
|
12a4f2162a | ||
|
1f923c70bd | ||
|
d1e8e8ecc3 | ||
|
ffb7bf452a | ||
|
d4c64800bb | ||
|
9443598d7e | ||
|
893dcdbfa9 | ||
|
fcf5ef2ab5 | ||
|
82ecffa8c0 | ||
|
0737f32daf | ||
|
9b7621bca2 | ||
|
abd7f08b23 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -82,10 +82,6 @@
|
||||
*.d
|
||||
!/scripts/qemu-guest-agent/fsfreeze-hook.d
|
||||
*.o
|
||||
*.lo
|
||||
*.la
|
||||
*.pc
|
||||
.libs
|
||||
.sdk
|
||||
*.gcda
|
||||
*.gcno
|
||||
|
18
HACKING
18
HACKING
@@ -1,10 +1,28 @@
|
||||
1. Preprocessor
|
||||
|
||||
1.1. Variadic macros
|
||||
|
||||
For variadic macros, stick with this C99-like syntax:
|
||||
|
||||
#define DPRINTF(fmt, ...) \
|
||||
do { printf("IRQ: " fmt, ## __VA_ARGS__); } while (0)
|
||||
|
||||
1.2. Include directives
|
||||
|
||||
Order include directives as follows:
|
||||
|
||||
#include "qemu/osdep.h" /* Always first... */
|
||||
#include <...> /* then system headers... */
|
||||
#include "..." /* and finally QEMU headers. */
|
||||
|
||||
The "qemu/osdep.h" header contains preprocessor macros that affect the behavior
|
||||
of core system headers like <stdint.h>. It must be the first include so that
|
||||
core system headers included by external libraries get the preprocessor macros
|
||||
that QEMU depends on.
|
||||
|
||||
Do not include "qemu/osdep.h" from header files since the .c file will have
|
||||
already included it.
|
||||
|
||||
2. C types
|
||||
|
||||
It should be common sense to use the right type, but we have collected
|
||||
|
48
MAINTAINERS
48
MAINTAINERS
@@ -106,7 +106,7 @@ F: include/fpu/
|
||||
Alpha
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
S: Maintained
|
||||
F: target-alpha/
|
||||
F: target/alpha/
|
||||
F: hw/alpha/
|
||||
F: tests/tcg/alpha/
|
||||
F: disas/alpha.c
|
||||
@@ -115,7 +115,7 @@ ARM
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: target-arm/
|
||||
F: target/arm/
|
||||
F: hw/arm/
|
||||
F: hw/cpu/a*mpcore.c
|
||||
F: include/hw/cpu/a*mpcore.h
|
||||
@@ -126,7 +126,7 @@ F: disas/libvixl/
|
||||
CRIS
|
||||
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
|
||||
S: Maintained
|
||||
F: target-cris/
|
||||
F: target/cris/
|
||||
F: hw/cris/
|
||||
F: include/hw/cris/
|
||||
F: tests/tcg/cris/
|
||||
@@ -135,7 +135,7 @@ F: disas/cris.c
|
||||
LM32
|
||||
M: Michael Walle <michael@walle.cc>
|
||||
S: Maintained
|
||||
F: target-lm32/
|
||||
F: target/lm32/
|
||||
F: disas/lm32.c
|
||||
F: hw/lm32/
|
||||
F: hw/*/lm32_*
|
||||
@@ -147,13 +147,13 @@ F: tests/tcg/lm32/
|
||||
M68K
|
||||
M: Laurent Vivier <laurent@vivier.eu>
|
||||
S: Maintained
|
||||
F: target-m68k/
|
||||
F: target/m68k/
|
||||
F: disas/m68k.c
|
||||
|
||||
MicroBlaze
|
||||
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
|
||||
S: Maintained
|
||||
F: target-microblaze/
|
||||
F: target/microblaze/
|
||||
F: hw/microblaze/
|
||||
F: disas/microblaze.c
|
||||
|
||||
@@ -161,7 +161,7 @@ MIPS
|
||||
M: Aurelien Jarno <aurelien@aurel32.net>
|
||||
M: Yongbok Kim <yongbok.kim@imgtec.com>
|
||||
S: Maintained
|
||||
F: target-mips/
|
||||
F: target/mips/
|
||||
F: hw/mips/
|
||||
F: hw/misc/mips_*
|
||||
F: hw/intc/mips_gic.c
|
||||
@@ -176,7 +176,7 @@ F: disas/mips.c
|
||||
Moxie
|
||||
M: Anthony Green <green@moxielogic.com>
|
||||
S: Maintained
|
||||
F: target-moxie/
|
||||
F: target/moxie/
|
||||
F: disas/moxie.c
|
||||
F: hw/moxie/
|
||||
F: default-configs/moxie-softmmu.mak
|
||||
@@ -184,7 +184,7 @@ F: default-configs/moxie-softmmu.mak
|
||||
OpenRISC
|
||||
M: Jia Liu <proljc@gmail.com>
|
||||
S: Maintained
|
||||
F: target-openrisc/
|
||||
F: target/openrisc/
|
||||
F: hw/openrisc/
|
||||
F: tests/tcg/openrisc/
|
||||
|
||||
@@ -193,7 +193,7 @@ M: David Gibson <david@gibson.dropbear.id.au>
|
||||
M: Alexander Graf <agraf@suse.de>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Maintained
|
||||
F: target-ppc/
|
||||
F: target/ppc/
|
||||
F: hw/ppc/
|
||||
F: include/hw/ppc/
|
||||
F: disas/ppc.c
|
||||
@@ -202,14 +202,14 @@ S390
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Alexander Graf <agraf@suse.de>
|
||||
S: Maintained
|
||||
F: target-s390x/
|
||||
F: target/s390x/
|
||||
F: hw/s390x/
|
||||
F: disas/s390.c
|
||||
|
||||
SH4
|
||||
M: Aurelien Jarno <aurelien@aurel32.net>
|
||||
S: Odd Fixes
|
||||
F: target-sh4/
|
||||
F: target/sh4/
|
||||
F: hw/sh4/
|
||||
F: disas/sh4.c
|
||||
F: include/hw/sh4/
|
||||
@@ -218,7 +218,7 @@ SPARC
|
||||
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
|
||||
M: Artyom Tarasenko <atar4qemu@gmail.com>
|
||||
S: Maintained
|
||||
F: target-sparc/
|
||||
F: target/sparc/
|
||||
F: hw/sparc/
|
||||
F: hw/sparc64/
|
||||
F: disas/sparc.c
|
||||
@@ -226,7 +226,7 @@ F: disas/sparc.c
|
||||
UniCore32
|
||||
M: Guan Xuetao <gxt@mprc.pku.edu.cn>
|
||||
S: Maintained
|
||||
F: target-unicore32/
|
||||
F: target/unicore32/
|
||||
F: hw/unicore32/
|
||||
F: include/hw/unicore32/
|
||||
|
||||
@@ -235,7 +235,7 @@ M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Eduardo Habkost <ehabkost@redhat.com>
|
||||
S: Maintained
|
||||
F: target-i386/
|
||||
F: target/i386/
|
||||
F: hw/i386/
|
||||
F: disas/i386.c
|
||||
|
||||
@@ -243,14 +243,14 @@ Xtensa
|
||||
M: Max Filippov <jcmvbkbc@gmail.com>
|
||||
W: http://wiki.osll.spb.ru/doku.php?id=etc:users:jcmvbkbc:qemu-target-xtensa
|
||||
S: Maintained
|
||||
F: target-xtensa/
|
||||
F: target/xtensa/
|
||||
F: hw/xtensa/
|
||||
F: tests/tcg/xtensa/
|
||||
|
||||
TriCore
|
||||
M: Bastian Koppelmann <kbastian@mail.uni-paderborn.de>
|
||||
S: Maintained
|
||||
F: target-tricore/
|
||||
F: target/tricore/
|
||||
F: hw/tricore/
|
||||
F: include/hw/tricore/
|
||||
|
||||
@@ -269,26 +269,26 @@ ARM
|
||||
M: Peter Maydell <peter.maydell@linaro.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: target-arm/kvm.c
|
||||
F: target/arm/kvm.c
|
||||
|
||||
MIPS
|
||||
M: James Hogan <james.hogan@imgtec.com>
|
||||
S: Maintained
|
||||
F: target-mips/kvm.c
|
||||
F: target/mips/kvm.c
|
||||
|
||||
PPC
|
||||
M: Alexander Graf <agraf@suse.de>
|
||||
S: Maintained
|
||||
F: target-ppc/kvm.c
|
||||
F: target/ppc/kvm.c
|
||||
|
||||
S390
|
||||
M: Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
M: Cornelia Huck <cornelia.huck@de.ibm.com>
|
||||
M: Alexander Graf <agraf@suse.de>
|
||||
S: Maintained
|
||||
F: target-s390x/kvm.c
|
||||
F: target-s390x/ioinst.[ch]
|
||||
F: target-s390x/machine.c
|
||||
F: target/s390x/kvm.c
|
||||
F: target/s390x/ioinst.[ch]
|
||||
F: target/s390x/machine.c
|
||||
F: hw/intc/s390_flic.c
|
||||
F: hw/intc/s390_flic_kvm.c
|
||||
F: include/hw/s390x/s390_flic.h
|
||||
@@ -301,7 +301,7 @@ M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
M: Marcelo Tosatti <mtosatti@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
F: target-i386/kvm.c
|
||||
F: target/i386/kvm.c
|
||||
|
||||
Guest CPU Cores (Xen):
|
||||
----------------------
|
||||
|
9
Makefile
9
Makefile
@@ -231,12 +231,10 @@ ALL_SUBDIRS=$(TARGET_DIRS) $(patsubst %,pc-bios/%, $(ROMS))
|
||||
|
||||
recurse-all: $(SUBDIR_RULES) $(ROMSUBDIR_RULES)
|
||||
|
||||
$(BUILD_DIR)/version.o: $(SRC_PATH)/version.rc config-host.h | $(BUILD_DIR)/version.lo
|
||||
$(BUILD_DIR)/version.o: $(SRC_PATH)/version.rc config-host.h
|
||||
$(call quiet-command,$(WINDRES) -I$(BUILD_DIR) -o $@ $<,"RC","version.o")
|
||||
$(BUILD_DIR)/version.lo: $(SRC_PATH)/version.rc config-host.h
|
||||
$(call quiet-command,$(WINDRES) -I$(BUILD_DIR) -o $@ $<,"RC","version.lo")
|
||||
|
||||
Makefile: $(version-obj-y) $(version-lobj-y)
|
||||
Makefile: $(version-obj-y)
|
||||
|
||||
######################################################################
|
||||
# Build libraries
|
||||
@@ -358,10 +356,9 @@ clean:
|
||||
rm -f config.mak op-i386.h opc-i386.h gen-op-i386.h op-arm.h opc-arm.h gen-op-arm.h
|
||||
rm -f qemu-options.def
|
||||
rm -f *.msi
|
||||
find . \( -name '*.l[oa]' -o -name '*.so' -o -name '*.dll' -o -name '*.mo' -o -name '*.[oda]' \) -type f -exec rm {} +
|
||||
find . \( -name '*.so' -o -name '*.dll' -o -name '*.mo' -o -name '*.[oda]' \) -type f -exec rm {} +
|
||||
rm -f $(filter-out %.tlb,$(TOOLS)) $(HELPERS-y) qemu-ga TAGS cscope.* *.pod *~ */*~
|
||||
rm -f fsdev/*.pod
|
||||
rm -rf .libs */.libs
|
||||
rm -f qemu-img-cmds.h
|
||||
rm -f ui/shader/*-vert.h ui/shader/*-frag.h
|
||||
@# May not be present in GENERATED_HEADERS
|
||||
|
@@ -97,7 +97,6 @@ common-obj-y += disas/
|
||||
######################################################################
|
||||
# Resource file for Windows executables
|
||||
version-obj-$(CONFIG_WIN32) += $(BUILD_DIR)/version.o
|
||||
version-lobj-$(CONFIG_WIN32) += $(BUILD_DIR)/version.lo
|
||||
|
||||
######################################################################
|
||||
# tracing
|
||||
@@ -155,11 +154,11 @@ trace-events-y += hw/alpha/trace-events
|
||||
trace-events-y += ui/trace-events
|
||||
trace-events-y += audio/trace-events
|
||||
trace-events-y += net/trace-events
|
||||
trace-events-y += target-arm/trace-events
|
||||
trace-events-y += target-i386/trace-events
|
||||
trace-events-y += target-sparc/trace-events
|
||||
trace-events-y += target-s390x/trace-events
|
||||
trace-events-y += target-ppc/trace-events
|
||||
trace-events-y += target/arm/trace-events
|
||||
trace-events-y += target/i386/trace-events
|
||||
trace-events-y += target/sparc/trace-events
|
||||
trace-events-y += target/s390x/trace-events
|
||||
trace-events-y += target/ppc/trace-events
|
||||
trace-events-y += qom/trace-events
|
||||
trace-events-y += linux-user/trace-events
|
||||
trace-events-y += qapi/trace-events
|
||||
|
@@ -11,7 +11,7 @@ $(call set-vpath, $(SRC_PATH):$(BUILD_DIR))
|
||||
ifdef CONFIG_LINUX
|
||||
QEMU_CFLAGS += -I../linux-headers
|
||||
endif
|
||||
QEMU_CFLAGS += -I.. -I$(SRC_PATH)/target-$(TARGET_BASE_ARCH) -DNEED_CPU_H
|
||||
QEMU_CFLAGS += -I.. -I$(SRC_PATH)/target/$(TARGET_BASE_ARCH) -DNEED_CPU_H
|
||||
|
||||
QEMU_CFLAGS+=-I$(SRC_PATH)/include
|
||||
|
||||
@@ -76,6 +76,7 @@ $(QEMU_PROG)-simpletrace.stp: $(BUILD_DIR)/trace-events-all
|
||||
else
|
||||
stap:
|
||||
endif
|
||||
.PHONY: stap
|
||||
|
||||
all: $(PROGS) stap
|
||||
|
||||
@@ -92,7 +93,7 @@ obj-$(CONFIG_TCG_INTERPRETER) += tci.o
|
||||
obj-y += tcg/tcg-common.o
|
||||
obj-$(CONFIG_TCG_INTERPRETER) += disas/tci.o
|
||||
obj-y += fpu/softfloat.o
|
||||
obj-y += target-$(TARGET_BASE_ARCH)/
|
||||
obj-y += target/$(TARGET_BASE_ARCH)/
|
||||
obj-y += disas.o
|
||||
obj-y += tcg-runtime.o
|
||||
obj-$(call notempty,$(TARGET_XML_FILES)) += gdbstub-xml.o
|
||||
|
312
aio-posix.c
312
aio-posix.c
@@ -18,6 +18,8 @@
|
||||
#include "block/block.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/sockets.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "trace.h"
|
||||
#ifdef CONFIG_EPOLL_CREATE1
|
||||
#include <sys/epoll.h>
|
||||
#endif
|
||||
@@ -27,6 +29,9 @@ struct AioHandler
|
||||
GPollFD pfd;
|
||||
IOHandler *io_read;
|
||||
IOHandler *io_write;
|
||||
AioPollFn *io_poll;
|
||||
IOHandler *io_poll_begin;
|
||||
IOHandler *io_poll_end;
|
||||
int deleted;
|
||||
void *opaque;
|
||||
bool is_external;
|
||||
@@ -200,6 +205,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
bool is_external,
|
||||
IOHandler *io_read,
|
||||
IOHandler *io_write,
|
||||
AioPollFn *io_poll,
|
||||
void *opaque)
|
||||
{
|
||||
AioHandler *node;
|
||||
@@ -209,7 +215,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
node = find_aio_handler(ctx, fd);
|
||||
|
||||
/* Are we deleting the fd handler? */
|
||||
if (!io_read && !io_write) {
|
||||
if (!io_read && !io_write && !io_poll) {
|
||||
if (node == NULL) {
|
||||
return;
|
||||
}
|
||||
@@ -228,6 +234,10 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
QLIST_REMOVE(node, node);
|
||||
deleted = true;
|
||||
}
|
||||
|
||||
if (!node->io_poll) {
|
||||
ctx->poll_disable_cnt--;
|
||||
}
|
||||
} else {
|
||||
if (node == NULL) {
|
||||
/* Alloc and insert if it's not already there */
|
||||
@@ -237,10 +247,16 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
|
||||
g_source_add_poll(&ctx->source, &node->pfd);
|
||||
is_new = true;
|
||||
|
||||
ctx->poll_disable_cnt += !io_poll;
|
||||
} else {
|
||||
ctx->poll_disable_cnt += !io_poll - !node->io_poll;
|
||||
}
|
||||
|
||||
/* Update handler with latest information */
|
||||
node->io_read = io_read;
|
||||
node->io_write = io_write;
|
||||
node->io_poll = io_poll;
|
||||
node->opaque = opaque;
|
||||
node->is_external = is_external;
|
||||
|
||||
@@ -250,22 +266,83 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
|
||||
aio_epoll_update(ctx, node, is_new);
|
||||
aio_notify(ctx);
|
||||
|
||||
if (deleted) {
|
||||
g_free(node);
|
||||
}
|
||||
}
|
||||
|
||||
void aio_set_fd_poll(AioContext *ctx, int fd,
|
||||
IOHandler *io_poll_begin,
|
||||
IOHandler *io_poll_end)
|
||||
{
|
||||
AioHandler *node = find_aio_handler(ctx, fd);
|
||||
|
||||
if (!node) {
|
||||
return;
|
||||
}
|
||||
|
||||
node->io_poll_begin = io_poll_begin;
|
||||
node->io_poll_end = io_poll_end;
|
||||
}
|
||||
|
||||
void aio_set_event_notifier(AioContext *ctx,
|
||||
EventNotifier *notifier,
|
||||
bool is_external,
|
||||
EventNotifierHandler *io_read)
|
||||
EventNotifierHandler *io_read,
|
||||
AioPollFn *io_poll)
|
||||
{
|
||||
aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
|
||||
is_external, (IOHandler *)io_read, NULL, notifier);
|
||||
aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external,
|
||||
(IOHandler *)io_read, NULL, io_poll, notifier);
|
||||
}
|
||||
|
||||
void aio_set_event_notifier_poll(AioContext *ctx,
|
||||
EventNotifier *notifier,
|
||||
EventNotifierHandler *io_poll_begin,
|
||||
EventNotifierHandler *io_poll_end)
|
||||
{
|
||||
aio_set_fd_poll(ctx, event_notifier_get_fd(notifier),
|
||||
(IOHandler *)io_poll_begin,
|
||||
(IOHandler *)io_poll_end);
|
||||
}
|
||||
|
||||
static void poll_set_started(AioContext *ctx, bool started)
|
||||
{
|
||||
AioHandler *node;
|
||||
|
||||
if (started == ctx->poll_started) {
|
||||
return;
|
||||
}
|
||||
|
||||
ctx->poll_started = started;
|
||||
|
||||
ctx->walking_handlers++;
|
||||
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
||||
IOHandler *fn;
|
||||
|
||||
if (node->deleted) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (started) {
|
||||
fn = node->io_poll_begin;
|
||||
} else {
|
||||
fn = node->io_poll_end;
|
||||
}
|
||||
|
||||
if (fn) {
|
||||
fn(node->opaque);
|
||||
}
|
||||
}
|
||||
ctx->walking_handlers--;
|
||||
}
|
||||
|
||||
|
||||
bool aio_prepare(AioContext *ctx)
|
||||
{
|
||||
/* Poll mode cannot be used with glib's event loop, disable it. */
|
||||
poll_set_started(ctx, false);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -290,9 +367,13 @@ bool aio_pending(AioContext *ctx)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool aio_dispatch(AioContext *ctx)
|
||||
/*
|
||||
* Note that dispatch_fds == false has the side-effect of post-poning the
|
||||
* freeing of deleted handlers.
|
||||
*/
|
||||
bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
|
||||
{
|
||||
AioHandler *node;
|
||||
AioHandler *node = NULL;
|
||||
bool progress = false;
|
||||
|
||||
/*
|
||||
@@ -308,7 +389,9 @@ bool aio_dispatch(AioContext *ctx)
|
||||
* We have to walk very carefully in case aio_set_fd_handler is
|
||||
* called while we're walking.
|
||||
*/
|
||||
node = QLIST_FIRST(&ctx->aio_handlers);
|
||||
if (dispatch_fds) {
|
||||
node = QLIST_FIRST(&ctx->aio_handlers);
|
||||
}
|
||||
while (node) {
|
||||
AioHandler *tmp;
|
||||
int revents;
|
||||
@@ -400,12 +483,100 @@ static void add_pollfd(AioHandler *node)
|
||||
npfd++;
|
||||
}
|
||||
|
||||
static bool run_poll_handlers_once(AioContext *ctx)
|
||||
{
|
||||
bool progress = false;
|
||||
AioHandler *node;
|
||||
|
||||
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
||||
if (!node->deleted && node->io_poll &&
|
||||
node->io_poll(node->opaque)) {
|
||||
progress = true;
|
||||
}
|
||||
|
||||
/* Caller handles freeing deleted nodes. Don't do it here. */
|
||||
}
|
||||
|
||||
return progress;
|
||||
}
|
||||
|
||||
/* run_poll_handlers:
|
||||
* @ctx: the AioContext
|
||||
* @max_ns: maximum time to poll for, in nanoseconds
|
||||
*
|
||||
* Polls for a given time.
|
||||
*
|
||||
* Note that ctx->notify_me must be non-zero so this function can detect
|
||||
* aio_notify().
|
||||
*
|
||||
* Note that the caller must have incremented ctx->walking_handlers.
|
||||
*
|
||||
* Returns: true if progress was made, false otherwise
|
||||
*/
|
||||
static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
|
||||
{
|
||||
bool progress;
|
||||
int64_t end_time;
|
||||
|
||||
assert(ctx->notify_me);
|
||||
assert(ctx->walking_handlers > 0);
|
||||
assert(ctx->poll_disable_cnt == 0);
|
||||
|
||||
trace_run_poll_handlers_begin(ctx, max_ns);
|
||||
|
||||
end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns;
|
||||
|
||||
do {
|
||||
progress = run_poll_handlers_once(ctx);
|
||||
} while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time);
|
||||
|
||||
trace_run_poll_handlers_end(ctx, progress);
|
||||
|
||||
return progress;
|
||||
}
|
||||
|
||||
/* try_poll_mode:
|
||||
* @ctx: the AioContext
|
||||
* @blocking: busy polling is only attempted when blocking is true
|
||||
*
|
||||
* ctx->notify_me must be non-zero so this function can detect aio_notify().
|
||||
*
|
||||
* Note that the caller must have incremented ctx->walking_handlers.
|
||||
*
|
||||
* Returns: true if progress was made, false otherwise
|
||||
*/
|
||||
static bool try_poll_mode(AioContext *ctx, bool blocking)
|
||||
{
|
||||
if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) {
|
||||
/* See qemu_soonest_timeout() uint64_t hack */
|
||||
int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
|
||||
(uint64_t)ctx->poll_ns);
|
||||
|
||||
if (max_ns) {
|
||||
poll_set_started(ctx, true);
|
||||
|
||||
if (run_poll_handlers(ctx, max_ns)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
poll_set_started(ctx, false);
|
||||
|
||||
/* Even if we don't run busy polling, try polling once in case it can make
|
||||
* progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
|
||||
*/
|
||||
return run_poll_handlers_once(ctx);
|
||||
}
|
||||
|
||||
bool aio_poll(AioContext *ctx, bool blocking)
|
||||
{
|
||||
AioHandler *node;
|
||||
int i, ret;
|
||||
int i;
|
||||
int ret = 0;
|
||||
bool progress;
|
||||
int64_t timeout;
|
||||
int64_t start = 0;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
progress = false;
|
||||
@@ -423,41 +594,91 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||
|
||||
ctx->walking_handlers++;
|
||||
|
||||
assert(npfd == 0);
|
||||
if (ctx->poll_max_ns) {
|
||||
start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
}
|
||||
|
||||
/* fill pollfds */
|
||||
if (try_poll_mode(ctx, blocking)) {
|
||||
progress = true;
|
||||
} else {
|
||||
assert(npfd == 0);
|
||||
|
||||
if (!aio_epoll_enabled(ctx)) {
|
||||
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
||||
if (!node->deleted && node->pfd.events
|
||||
&& aio_node_check(ctx, node->is_external)) {
|
||||
add_pollfd(node);
|
||||
/* fill pollfds */
|
||||
|
||||
if (!aio_epoll_enabled(ctx)) {
|
||||
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
||||
if (!node->deleted && node->pfd.events
|
||||
&& aio_node_check(ctx, node->is_external)) {
|
||||
add_pollfd(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timeout = blocking ? aio_compute_timeout(ctx) : 0;
|
||||
|
||||
/* wait until next event */
|
||||
if (timeout) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
|
||||
AioHandler epoll_handler;
|
||||
|
||||
epoll_handler.pfd.fd = ctx->epollfd;
|
||||
epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR;
|
||||
npfd = 0;
|
||||
add_pollfd(&epoll_handler);
|
||||
ret = aio_epoll(ctx, pollfds, npfd, timeout);
|
||||
} else {
|
||||
ret = qemu_poll_ns(pollfds, npfd, timeout);
|
||||
}
|
||||
if (timeout) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
timeout = blocking ? aio_compute_timeout(ctx) : 0;
|
||||
|
||||
/* wait until next event */
|
||||
if (timeout) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
|
||||
AioHandler epoll_handler;
|
||||
|
||||
epoll_handler.pfd.fd = ctx->epollfd;
|
||||
epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR;
|
||||
npfd = 0;
|
||||
add_pollfd(&epoll_handler);
|
||||
ret = aio_epoll(ctx, pollfds, npfd, timeout);
|
||||
} else {
|
||||
ret = qemu_poll_ns(pollfds, npfd, timeout);
|
||||
}
|
||||
if (blocking) {
|
||||
atomic_sub(&ctx->notify_me, 2);
|
||||
}
|
||||
if (timeout) {
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
/* Adjust polling time */
|
||||
if (ctx->poll_max_ns) {
|
||||
int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
|
||||
|
||||
if (block_ns <= ctx->poll_ns) {
|
||||
/* This is the sweet spot, no adjustment needed */
|
||||
} else if (block_ns > ctx->poll_max_ns) {
|
||||
/* We'd have to poll for too long, poll less */
|
||||
int64_t old = ctx->poll_ns;
|
||||
|
||||
if (ctx->poll_shrink) {
|
||||
ctx->poll_ns /= ctx->poll_shrink;
|
||||
} else {
|
||||
ctx->poll_ns = 0;
|
||||
}
|
||||
|
||||
trace_poll_shrink(ctx, old, ctx->poll_ns);
|
||||
} else if (ctx->poll_ns < ctx->poll_max_ns &&
|
||||
block_ns < ctx->poll_max_ns) {
|
||||
/* There is room to grow, poll longer */
|
||||
int64_t old = ctx->poll_ns;
|
||||
int64_t grow = ctx->poll_grow;
|
||||
|
||||
if (grow == 0) {
|
||||
grow = 2;
|
||||
}
|
||||
|
||||
if (ctx->poll_ns) {
|
||||
ctx->poll_ns *= grow;
|
||||
} else {
|
||||
ctx->poll_ns = 4000; /* start polling at 4 microseconds */
|
||||
}
|
||||
|
||||
if (ctx->poll_ns > ctx->poll_max_ns) {
|
||||
ctx->poll_ns = ctx->poll_max_ns;
|
||||
}
|
||||
|
||||
trace_poll_grow(ctx, old, ctx->poll_ns);
|
||||
}
|
||||
}
|
||||
|
||||
aio_notify_accept(ctx);
|
||||
@@ -473,7 +694,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||
ctx->walking_handlers--;
|
||||
|
||||
/* Run dispatch even if there were no readable fds to run timers */
|
||||
if (aio_dispatch(ctx)) {
|
||||
if (aio_dispatch(ctx, ret > 0)) {
|
||||
progress = true;
|
||||
}
|
||||
|
||||
@@ -484,6 +705,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||
|
||||
void aio_context_setup(AioContext *ctx)
|
||||
{
|
||||
/* TODO remove this in final patch submission */
|
||||
if (getenv("QEMU_AIO_POLL_MAX_NS")) {
|
||||
fprintf(stderr, "The QEMU_AIO_POLL_MAX_NS environment variable has "
|
||||
"been replaced with -object iothread,poll-max-ns=NUM\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EPOLL_CREATE1
|
||||
assert(!ctx->epollfd);
|
||||
ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
|
||||
@@ -495,3 +723,17 @@ void aio_context_setup(AioContext *ctx)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
|
||||
int64_t grow, int64_t shrink, Error **errp)
|
||||
{
|
||||
/* No thread synchronization here, it doesn't matter if an incorrect value
|
||||
* is used once.
|
||||
*/
|
||||
ctx->poll_max_ns = max_ns;
|
||||
ctx->poll_ns = 0;
|
||||
ctx->poll_grow = grow;
|
||||
ctx->poll_shrink = shrink;
|
||||
|
||||
aio_notify(ctx);
|
||||
}
|
||||
|
32
aio-win32.c
32
aio-win32.c
@@ -20,6 +20,7 @@
|
||||
#include "block/block.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/sockets.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
struct AioHandler {
|
||||
EventNotifier *e;
|
||||
@@ -38,6 +39,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
bool is_external,
|
||||
IOHandler *io_read,
|
||||
IOHandler *io_write,
|
||||
AioPollFn *io_poll,
|
||||
void *opaque)
|
||||
{
|
||||
/* fd is a SOCKET in our case */
|
||||
@@ -100,10 +102,18 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
aio_notify(ctx);
|
||||
}
|
||||
|
||||
void aio_set_fd_poll(AioContext *ctx, int fd,
|
||||
IOHandler *io_poll_begin,
|
||||
IOHandler *io_poll_end)
|
||||
{
|
||||
/* Not implemented */
|
||||
}
|
||||
|
||||
void aio_set_event_notifier(AioContext *ctx,
|
||||
EventNotifier *e,
|
||||
bool is_external,
|
||||
EventNotifierHandler *io_notify)
|
||||
EventNotifierHandler *io_notify,
|
||||
AioPollFn *io_poll)
|
||||
{
|
||||
AioHandler *node;
|
||||
|
||||
@@ -150,6 +160,14 @@ void aio_set_event_notifier(AioContext *ctx,
|
||||
aio_notify(ctx);
|
||||
}
|
||||
|
||||
void aio_set_event_notifier_poll(AioContext *ctx,
|
||||
EventNotifier *notifier,
|
||||
EventNotifierHandler *io_poll_begin,
|
||||
EventNotifierHandler *io_poll_end)
|
||||
{
|
||||
/* Not implemented */
|
||||
}
|
||||
|
||||
bool aio_prepare(AioContext *ctx)
|
||||
{
|
||||
static struct timeval tv0;
|
||||
@@ -271,12 +289,14 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
|
||||
return progress;
|
||||
}
|
||||
|
||||
bool aio_dispatch(AioContext *ctx)
|
||||
bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
|
||||
{
|
||||
bool progress;
|
||||
|
||||
progress = aio_bh_poll(ctx);
|
||||
progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
|
||||
if (dispatch_fds) {
|
||||
progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
|
||||
}
|
||||
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
||||
return progress;
|
||||
}
|
||||
@@ -374,3 +394,9 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||
void aio_context_setup(AioContext *ctx)
|
||||
{
|
||||
}
|
||||
|
||||
void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
|
||||
int64_t grow, int64_t shrink, Error **errp)
|
||||
{
|
||||
error_setg(errp, "AioContext polling is not implemented on Windows");
|
||||
}
|
||||
|
21
async.c
21
async.c
@@ -251,7 +251,7 @@ aio_ctx_dispatch(GSource *source,
|
||||
AioContext *ctx = (AioContext *) source;
|
||||
|
||||
assert(callback == NULL);
|
||||
aio_dispatch(ctx);
|
||||
aio_dispatch(ctx, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ aio_ctx_finalize(GSource *source)
|
||||
}
|
||||
qemu_mutex_unlock(&ctx->bh_lock);
|
||||
|
||||
aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
|
||||
aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
|
||||
event_notifier_cleanup(&ctx->notifier);
|
||||
qemu_rec_mutex_destroy(&ctx->lock);
|
||||
qemu_mutex_destroy(&ctx->bh_lock);
|
||||
@@ -349,6 +349,15 @@ static void event_notifier_dummy_cb(EventNotifier *e)
|
||||
{
|
||||
}
|
||||
|
||||
/* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
|
||||
static bool event_notifier_poll(void *opaque)
|
||||
{
|
||||
EventNotifier *e = opaque;
|
||||
AioContext *ctx = container_of(e, AioContext, notifier);
|
||||
|
||||
return atomic_read(&ctx->notified);
|
||||
}
|
||||
|
||||
AioContext *aio_context_new(Error **errp)
|
||||
{
|
||||
int ret;
|
||||
@@ -366,7 +375,8 @@ AioContext *aio_context_new(Error **errp)
|
||||
aio_set_event_notifier(ctx, &ctx->notifier,
|
||||
false,
|
||||
(EventNotifierHandler *)
|
||||
event_notifier_dummy_cb);
|
||||
event_notifier_dummy_cb,
|
||||
event_notifier_poll);
|
||||
#ifdef CONFIG_LINUX_AIO
|
||||
ctx->linux_aio = NULL;
|
||||
#endif
|
||||
@@ -375,6 +385,11 @@ AioContext *aio_context_new(Error **errp)
|
||||
qemu_rec_mutex_init(&ctx->lock);
|
||||
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
|
||||
|
||||
ctx->poll_ns = 0;
|
||||
ctx->poll_max_ns = 0;
|
||||
ctx->poll_grow = 0;
|
||||
ctx->poll_shrink = 0;
|
||||
|
||||
return ctx;
|
||||
fail:
|
||||
g_source_destroy(&ctx->source);
|
||||
|
@@ -27,12 +27,10 @@
|
||||
#include "sysemu/char.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "hw/usb.h"
|
||||
#include "ui/console.h"
|
||||
#include <brlapi.h>
|
||||
#include <brlapi_constants.h>
|
||||
#include <brlapi_keycodes.h>
|
||||
#ifdef CONFIG_SDL
|
||||
#include <SDL_syswm.h>
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
#define DPRINTF(fmt, ...) \
|
||||
@@ -227,12 +225,8 @@ static const uint8_t nabcc_translation[2][256] = {
|
||||
/* The guest OS has started discussing with us, finish initializing BrlAPI */
|
||||
static int baum_deferred_init(BaumDriverState *baum)
|
||||
{
|
||||
#if defined(CONFIG_SDL)
|
||||
#if SDL_COMPILEDVERSION < SDL_VERSIONNUM(2, 0, 0)
|
||||
SDL_SysWMinfo info;
|
||||
#endif
|
||||
#endif
|
||||
int tty;
|
||||
int tty = BRLAPI_TTY_DEFAULT;
|
||||
QemuConsole *con;
|
||||
|
||||
if (baum->deferred_init) {
|
||||
return 1;
|
||||
@@ -243,21 +237,12 @@ static int baum_deferred_init(BaumDriverState *baum)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SDL)
|
||||
#if SDL_COMPILEDVERSION < SDL_VERSIONNUM(2, 0, 0)
|
||||
memset(&info, 0, sizeof(info));
|
||||
SDL_VERSION(&info.version);
|
||||
if (SDL_GetWMInfo(&info)) {
|
||||
tty = info.info.x11.wmwindow;
|
||||
} else {
|
||||
#endif
|
||||
#endif
|
||||
tty = BRLAPI_TTY_DEFAULT;
|
||||
#if defined(CONFIG_SDL)
|
||||
#if SDL_COMPILEDVERSION < SDL_VERSIONNUM(2, 0, 0)
|
||||
con = qemu_console_lookup_by_index(0);
|
||||
if (con && qemu_console_is_graphic(con)) {
|
||||
tty = qemu_console_get_window_id(con);
|
||||
if (tty == -1)
|
||||
tty = BRLAPI_TTY_DEFAULT;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if (brlapi__enterTtyMode(baum->brlapi, tty, NULL) == -1) {
|
||||
brlapi_perror("baum: brlapi__enterTtyMode");
|
||||
|
@@ -192,19 +192,19 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
|
||||
switch (action) {
|
||||
case CURL_POLL_IN:
|
||||
aio_set_fd_handler(s->aio_context, fd, false,
|
||||
curl_multi_read, NULL, state);
|
||||
curl_multi_read, NULL, NULL, state);
|
||||
break;
|
||||
case CURL_POLL_OUT:
|
||||
aio_set_fd_handler(s->aio_context, fd, false,
|
||||
NULL, curl_multi_do, state);
|
||||
NULL, curl_multi_do, NULL, state);
|
||||
break;
|
||||
case CURL_POLL_INOUT:
|
||||
aio_set_fd_handler(s->aio_context, fd, false,
|
||||
curl_multi_read, curl_multi_do, state);
|
||||
curl_multi_read, curl_multi_do, NULL, state);
|
||||
break;
|
||||
case CURL_POLL_REMOVE:
|
||||
aio_set_fd_handler(s->aio_context, fd, false,
|
||||
NULL, NULL, NULL);
|
||||
NULL, NULL, NULL, NULL);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@@ -362,6 +362,7 @@ iscsi_set_events(IscsiLun *iscsilun)
|
||||
false,
|
||||
(ev & POLLIN) ? iscsi_process_read : NULL,
|
||||
(ev & POLLOUT) ? iscsi_process_write : NULL,
|
||||
NULL,
|
||||
iscsilun);
|
||||
iscsilun->events = ev;
|
||||
}
|
||||
@@ -1526,7 +1527,7 @@ static void iscsi_detach_aio_context(BlockDriverState *bs)
|
||||
IscsiLun *iscsilun = bs->opaque;
|
||||
|
||||
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi),
|
||||
false, NULL, NULL, NULL);
|
||||
false, NULL, NULL, NULL, NULL);
|
||||
iscsilun->events = 0;
|
||||
|
||||
if (iscsilun->nop_timer) {
|
||||
|
@@ -255,6 +255,20 @@ static void qemu_laio_completion_cb(EventNotifier *e)
|
||||
}
|
||||
}
|
||||
|
||||
static bool qemu_laio_poll_cb(void *opaque)
|
||||
{
|
||||
EventNotifier *e = opaque;
|
||||
LinuxAioState *s = container_of(e, LinuxAioState, e);
|
||||
struct io_event *events;
|
||||
|
||||
if (!io_getevents_peek(s->ctx, &events)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
qemu_laio_process_completions_and_submit(s);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void laio_cancel(BlockAIOCB *blockacb)
|
||||
{
|
||||
struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
|
||||
@@ -439,7 +453,7 @@ BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
|
||||
|
||||
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
|
||||
{
|
||||
aio_set_event_notifier(old_context, &s->e, false, NULL);
|
||||
aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
|
||||
qemu_bh_delete(s->completion_bh);
|
||||
}
|
||||
|
||||
@@ -448,7 +462,8 @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
|
||||
s->aio_context = new_context;
|
||||
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
|
||||
aio_set_event_notifier(new_context, &s->e, false,
|
||||
qemu_laio_completion_cb);
|
||||
qemu_laio_completion_cb,
|
||||
qemu_laio_poll_cb);
|
||||
}
|
||||
|
||||
LinuxAioState *laio_init(void)
|
||||
|
@@ -145,7 +145,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_set_fd_handler(aio_context, s->sioc->fd, false,
|
||||
nbd_reply_ready, nbd_restart_write, bs);
|
||||
nbd_reply_ready, nbd_restart_write, NULL, bs);
|
||||
if (qiov) {
|
||||
qio_channel_set_cork(s->ioc, true);
|
||||
rc = nbd_send_request(s->ioc, request);
|
||||
@@ -161,7 +161,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
||||
rc = nbd_send_request(s->ioc, request);
|
||||
}
|
||||
aio_set_fd_handler(aio_context, s->sioc->fd, false,
|
||||
nbd_reply_ready, NULL, bs);
|
||||
nbd_reply_ready, NULL, NULL, bs);
|
||||
s->send_coroutine = NULL;
|
||||
qemu_co_mutex_unlock(&s->send_mutex);
|
||||
return rc;
|
||||
@@ -366,14 +366,14 @@ void nbd_client_detach_aio_context(BlockDriverState *bs)
|
||||
{
|
||||
aio_set_fd_handler(bdrv_get_aio_context(bs),
|
||||
nbd_get_client_session(bs)->sioc->fd,
|
||||
false, NULL, NULL, NULL);
|
||||
false, NULL, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
void nbd_client_attach_aio_context(BlockDriverState *bs,
|
||||
AioContext *new_context)
|
||||
{
|
||||
aio_set_fd_handler(new_context, nbd_get_client_session(bs)->sioc->fd,
|
||||
false, nbd_reply_ready, NULL, bs);
|
||||
false, nbd_reply_ready, NULL, NULL, bs);
|
||||
}
|
||||
|
||||
void nbd_client_close(BlockDriverState *bs)
|
||||
|
@@ -197,7 +197,8 @@ static void nfs_set_events(NFSClient *client)
|
||||
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
|
||||
false,
|
||||
(ev & POLLIN) ? nfs_process_read : NULL,
|
||||
(ev & POLLOUT) ? nfs_process_write : NULL, client);
|
||||
(ev & POLLOUT) ? nfs_process_write : NULL,
|
||||
NULL, client);
|
||||
|
||||
}
|
||||
client->events = ev;
|
||||
@@ -395,7 +396,7 @@ static void nfs_detach_aio_context(BlockDriverState *bs)
|
||||
NFSClient *client = bs->opaque;
|
||||
|
||||
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
|
||||
false, NULL, NULL, NULL);
|
||||
false, NULL, NULL, NULL, NULL);
|
||||
client->events = 0;
|
||||
}
|
||||
|
||||
@@ -415,7 +416,7 @@ static void nfs_client_close(NFSClient *client)
|
||||
nfs_close(client->context, client->fh);
|
||||
}
|
||||
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
|
||||
false, NULL, NULL, NULL);
|
||||
false, NULL, NULL, NULL, NULL);
|
||||
nfs_destroy_context(client->context);
|
||||
}
|
||||
memset(client, 0, sizeof(NFSClient));
|
||||
|
@@ -664,7 +664,7 @@ static coroutine_fn void do_co_req(void *opaque)
|
||||
|
||||
co = qemu_coroutine_self();
|
||||
aio_set_fd_handler(srco->aio_context, sockfd, false,
|
||||
NULL, restart_co_req, co);
|
||||
NULL, restart_co_req, NULL, co);
|
||||
|
||||
ret = send_co_req(sockfd, hdr, data, wlen);
|
||||
if (ret < 0) {
|
||||
@@ -672,7 +672,7 @@ static coroutine_fn void do_co_req(void *opaque)
|
||||
}
|
||||
|
||||
aio_set_fd_handler(srco->aio_context, sockfd, false,
|
||||
restart_co_req, NULL, co);
|
||||
restart_co_req, NULL, NULL, co);
|
||||
|
||||
ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
|
||||
if (ret != sizeof(*hdr)) {
|
||||
@@ -698,7 +698,7 @@ out:
|
||||
/* there is at most one request for this sockfd, so it is safe to
|
||||
* set each handler to NULL. */
|
||||
aio_set_fd_handler(srco->aio_context, sockfd, false,
|
||||
NULL, NULL, NULL);
|
||||
NULL, NULL, NULL, NULL);
|
||||
|
||||
srco->ret = ret;
|
||||
srco->finished = true;
|
||||
@@ -760,7 +760,7 @@ static coroutine_fn void reconnect_to_sdog(void *opaque)
|
||||
AIOReq *aio_req, *next;
|
||||
|
||||
aio_set_fd_handler(s->aio_context, s->fd, false, NULL,
|
||||
NULL, NULL);
|
||||
NULL, NULL, NULL);
|
||||
close(s->fd);
|
||||
s->fd = -1;
|
||||
|
||||
@@ -964,7 +964,7 @@ static int get_sheep_fd(BDRVSheepdogState *s, Error **errp)
|
||||
}
|
||||
|
||||
aio_set_fd_handler(s->aio_context, fd, false,
|
||||
co_read_response, NULL, s);
|
||||
co_read_response, NULL, NULL, s);
|
||||
return fd;
|
||||
}
|
||||
|
||||
@@ -1226,7 +1226,7 @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
|
||||
qemu_co_mutex_lock(&s->lock);
|
||||
s->co_send = qemu_coroutine_self();
|
||||
aio_set_fd_handler(s->aio_context, s->fd, false,
|
||||
co_read_response, co_write_request, s);
|
||||
co_read_response, co_write_request, NULL, s);
|
||||
socket_set_cork(s->fd, 1);
|
||||
|
||||
/* send a header */
|
||||
@@ -1245,7 +1245,7 @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
|
||||
out:
|
||||
socket_set_cork(s->fd, 0);
|
||||
aio_set_fd_handler(s->aio_context, s->fd, false,
|
||||
co_read_response, NULL, s);
|
||||
co_read_response, NULL, NULL, s);
|
||||
s->co_send = NULL;
|
||||
qemu_co_mutex_unlock(&s->lock);
|
||||
}
|
||||
@@ -1396,7 +1396,7 @@ static void sd_detach_aio_context(BlockDriverState *bs)
|
||||
BDRVSheepdogState *s = bs->opaque;
|
||||
|
||||
aio_set_fd_handler(s->aio_context, s->fd, false, NULL,
|
||||
NULL, NULL);
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
static void sd_attach_aio_context(BlockDriverState *bs,
|
||||
@@ -1406,7 +1406,7 @@ static void sd_attach_aio_context(BlockDriverState *bs,
|
||||
|
||||
s->aio_context = new_context;
|
||||
aio_set_fd_handler(new_context, s->fd, false,
|
||||
co_read_response, NULL, s);
|
||||
co_read_response, NULL, NULL, s);
|
||||
}
|
||||
|
||||
/* TODO Convert to fine grained options */
|
||||
@@ -1520,7 +1520,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
return 0;
|
||||
out:
|
||||
aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
|
||||
false, NULL, NULL, NULL);
|
||||
false, NULL, NULL, NULL, NULL);
|
||||
if (s->fd >= 0) {
|
||||
closesocket(s->fd);
|
||||
}
|
||||
@@ -1559,7 +1559,7 @@ static void sd_reopen_commit(BDRVReopenState *state)
|
||||
|
||||
if (s->fd) {
|
||||
aio_set_fd_handler(s->aio_context, s->fd, false,
|
||||
NULL, NULL, NULL);
|
||||
NULL, NULL, NULL, NULL);
|
||||
closesocket(s->fd);
|
||||
}
|
||||
|
||||
@@ -1583,7 +1583,7 @@ static void sd_reopen_abort(BDRVReopenState *state)
|
||||
|
||||
if (re_s->fd) {
|
||||
aio_set_fd_handler(s->aio_context, re_s->fd, false,
|
||||
NULL, NULL, NULL);
|
||||
NULL, NULL, NULL, NULL);
|
||||
closesocket(re_s->fd);
|
||||
}
|
||||
|
||||
@@ -1972,7 +1972,7 @@ static void sd_close(BlockDriverState *bs)
|
||||
}
|
||||
|
||||
aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
|
||||
false, NULL, NULL, NULL);
|
||||
false, NULL, NULL, NULL, NULL);
|
||||
closesocket(s->fd);
|
||||
g_free(s->host_spec);
|
||||
}
|
||||
|
@@ -911,7 +911,7 @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s, BlockDriverState *bs)
|
||||
rd_handler, wr_handler);
|
||||
|
||||
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
|
||||
false, rd_handler, wr_handler, co);
|
||||
false, rd_handler, wr_handler, NULL, co);
|
||||
}
|
||||
|
||||
static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
|
||||
@@ -919,7 +919,7 @@ static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
|
||||
{
|
||||
DPRINTF("s->sock=%d", s->sock);
|
||||
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
|
||||
false, NULL, NULL, NULL);
|
||||
false, NULL, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
/* A non-blocking call returned EAGAIN, so yield, ensuring the
|
||||
|
@@ -175,7 +175,7 @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile)
|
||||
void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
|
||||
AioContext *old_context)
|
||||
{
|
||||
aio_set_event_notifier(old_context, &aio->e, false, NULL);
|
||||
aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL);
|
||||
aio->is_aio_context_attached = false;
|
||||
}
|
||||
|
||||
@@ -184,7 +184,7 @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
|
||||
{
|
||||
aio->is_aio_context_attached = true;
|
||||
aio_set_event_notifier(new_context, &aio->e, false,
|
||||
win32_aio_completion_cb);
|
||||
win32_aio_completion_cb, NULL);
|
||||
}
|
||||
|
||||
QEMUWin32AIOState *win32_aio_init(void)
|
||||
|
19
configure
vendored
19
configure
vendored
@@ -28,8 +28,6 @@ TMPB="qemu-conf"
|
||||
TMPC="${TMPDIR1}/${TMPB}.c"
|
||||
TMPO="${TMPDIR1}/${TMPB}.o"
|
||||
TMPCXX="${TMPDIR1}/${TMPB}.cxx"
|
||||
TMPL="${TMPDIR1}/${TMPB}.lo"
|
||||
TMPA="${TMPDIR1}/lib${TMPB}.la"
|
||||
TMPE="${TMPDIR1}/${TMPB}.exe"
|
||||
TMPMO="${TMPDIR1}/${TMPB}.mo"
|
||||
|
||||
@@ -313,6 +311,7 @@ gnutls_rnd=""
|
||||
nettle=""
|
||||
nettle_kdf="no"
|
||||
gcrypt=""
|
||||
gcrypt_hmac="no"
|
||||
gcrypt_kdf="no"
|
||||
vte=""
|
||||
virglrenderer=""
|
||||
@@ -2417,6 +2416,19 @@ EOF
|
||||
if compile_prog "$gcrypt_cflags" "$gcrypt_libs" ; then
|
||||
gcrypt_kdf=yes
|
||||
fi
|
||||
|
||||
cat > $TMPC << EOF
|
||||
#include <gcrypt.h>
|
||||
int main(void) {
|
||||
gcry_mac_hd_t handle;
|
||||
gcry_mac_open(&handle, GCRY_MAC_HMAC_MD5,
|
||||
GCRY_MAC_FLAG_SECURE, NULL);
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if compile_prog "$gcrypt_cflags" "$gcrypt_libs" ; then
|
||||
gcrypt_hmac=yes
|
||||
fi
|
||||
else
|
||||
if test "$gcrypt" = "yes"; then
|
||||
feature_not_found "gcrypt" "Install gcrypt devel"
|
||||
@@ -5387,6 +5399,9 @@ if test "$gnutls_rnd" = "yes" ; then
|
||||
fi
|
||||
if test "$gcrypt" = "yes" ; then
|
||||
echo "CONFIG_GCRYPT=y" >> $config_host_mak
|
||||
if test "$gcrypt_hmac" = "yes" ; then
|
||||
echo "CONFIG_GCRYPT_HMAC=y" >> $config_host_mak
|
||||
fi
|
||||
if test "$gcrypt_kdf" = "yes" ; then
|
||||
echo "CONFIG_GCRYPT_KDF=y" >> $config_host_mak
|
||||
fi
|
||||
|
@@ -3,6 +3,10 @@ crypto-obj-y += hash.o
|
||||
crypto-obj-$(CONFIG_NETTLE) += hash-nettle.o
|
||||
crypto-obj-$(if $(CONFIG_NETTLE),n,$(CONFIG_GCRYPT)) += hash-gcrypt.o
|
||||
crypto-obj-$(if $(CONFIG_NETTLE),n,$(if $(CONFIG_GCRYPT),n,y)) += hash-glib.o
|
||||
crypto-obj-y += hmac.o
|
||||
crypto-obj-$(CONFIG_NETTLE) += hmac-nettle.o
|
||||
crypto-obj-$(CONFIG_GCRYPT_HMAC) += hmac-gcrypt.o
|
||||
crypto-obj-$(if $(CONFIG_NETTLE),n,$(if $(CONFIG_GCRYPT_HMAC),n,y)) += hmac-glib.o
|
||||
crypto-obj-y += aes.o
|
||||
crypto-obj-y += desrfb.o
|
||||
crypto-obj-y += cipher.o
|
||||
|
@@ -29,6 +29,7 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg,
|
||||
{
|
||||
switch (alg) {
|
||||
case QCRYPTO_CIPHER_ALG_DES_RFB:
|
||||
case QCRYPTO_CIPHER_ALG_3DES:
|
||||
case QCRYPTO_CIPHER_ALG_AES_128:
|
||||
case QCRYPTO_CIPHER_ALG_AES_192:
|
||||
case QCRYPTO_CIPHER_ALG_AES_256:
|
||||
@@ -99,6 +100,10 @@ QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg,
|
||||
gcryalg = GCRY_CIPHER_DES;
|
||||
break;
|
||||
|
||||
case QCRYPTO_CIPHER_ALG_3DES:
|
||||
gcryalg = GCRY_CIPHER_3DES;
|
||||
break;
|
||||
|
||||
case QCRYPTO_CIPHER_ALG_AES_128:
|
||||
gcryalg = GCRY_CIPHER_AES128;
|
||||
break;
|
||||
@@ -200,6 +205,7 @@ QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg,
|
||||
case QCRYPTO_CIPHER_ALG_TWOFISH_256:
|
||||
ctx->blocksize = 16;
|
||||
break;
|
||||
case QCRYPTO_CIPHER_ALG_3DES:
|
||||
case QCRYPTO_CIPHER_ALG_CAST5_128:
|
||||
ctx->blocksize = 8;
|
||||
break;
|
||||
|
@@ -78,6 +78,18 @@ static void des_decrypt_native(cipher_ctx_t ctx, cipher_length_t length,
|
||||
des_decrypt(ctx, length, dst, src);
|
||||
}
|
||||
|
||||
static void des3_encrypt_native(cipher_ctx_t ctx, cipher_length_t length,
|
||||
uint8_t *dst, const uint8_t *src)
|
||||
{
|
||||
des3_encrypt(ctx, length, dst, src);
|
||||
}
|
||||
|
||||
static void des3_decrypt_native(cipher_ctx_t ctx, cipher_length_t length,
|
||||
uint8_t *dst, const uint8_t *src)
|
||||
{
|
||||
des3_decrypt(ctx, length, dst, src);
|
||||
}
|
||||
|
||||
static void cast128_encrypt_native(cipher_ctx_t ctx, cipher_length_t length,
|
||||
uint8_t *dst, const uint8_t *src)
|
||||
{
|
||||
@@ -140,6 +152,18 @@ static void des_decrypt_wrapper(const void *ctx, size_t length,
|
||||
des_decrypt(ctx, length, dst, src);
|
||||
}
|
||||
|
||||
static void des3_encrypt_wrapper(const void *ctx, size_t length,
|
||||
uint8_t *dst, const uint8_t *src)
|
||||
{
|
||||
des3_encrypt(ctx, length, dst, src);
|
||||
}
|
||||
|
||||
static void des3_decrypt_wrapper(const void *ctx, size_t length,
|
||||
uint8_t *dst, const uint8_t *src)
|
||||
{
|
||||
des3_decrypt(ctx, length, dst, src);
|
||||
}
|
||||
|
||||
static void cast128_encrypt_wrapper(const void *ctx, size_t length,
|
||||
uint8_t *dst, const uint8_t *src)
|
||||
{
|
||||
@@ -197,6 +221,7 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg,
|
||||
{
|
||||
switch (alg) {
|
||||
case QCRYPTO_CIPHER_ALG_DES_RFB:
|
||||
case QCRYPTO_CIPHER_ALG_3DES:
|
||||
case QCRYPTO_CIPHER_ALG_AES_128:
|
||||
case QCRYPTO_CIPHER_ALG_AES_192:
|
||||
case QCRYPTO_CIPHER_ALG_AES_256:
|
||||
@@ -254,6 +279,7 @@ QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg,
|
||||
cipher->mode = mode;
|
||||
|
||||
ctx = g_new0(QCryptoCipherNettle, 1);
|
||||
cipher->opaque = ctx;
|
||||
|
||||
switch (alg) {
|
||||
case QCRYPTO_CIPHER_ALG_DES_RFB:
|
||||
@@ -270,6 +296,18 @@ QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg,
|
||||
ctx->blocksize = DES_BLOCK_SIZE;
|
||||
break;
|
||||
|
||||
case QCRYPTO_CIPHER_ALG_3DES:
|
||||
ctx->ctx = g_new0(struct des3_ctx, 1);
|
||||
des3_set_key(ctx->ctx, key);
|
||||
|
||||
ctx->alg_encrypt_native = des3_encrypt_native;
|
||||
ctx->alg_decrypt_native = des3_decrypt_native;
|
||||
ctx->alg_encrypt_wrapper = des3_encrypt_wrapper;
|
||||
ctx->alg_decrypt_wrapper = des3_decrypt_wrapper;
|
||||
|
||||
ctx->blocksize = DES3_BLOCK_SIZE;
|
||||
break;
|
||||
|
||||
case QCRYPTO_CIPHER_ALG_AES_128:
|
||||
case QCRYPTO_CIPHER_ALG_AES_192:
|
||||
case QCRYPTO_CIPHER_ALG_AES_256:
|
||||
@@ -384,13 +422,11 @@ QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg,
|
||||
}
|
||||
|
||||
ctx->iv = g_new0(uint8_t, ctx->blocksize);
|
||||
cipher->opaque = ctx;
|
||||
|
||||
return cipher;
|
||||
|
||||
error:
|
||||
g_free(cipher);
|
||||
g_free(ctx);
|
||||
qcrypto_cipher_free(cipher);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@@ -28,6 +28,7 @@ static size_t alg_key_len[QCRYPTO_CIPHER_ALG__MAX] = {
|
||||
[QCRYPTO_CIPHER_ALG_AES_192] = 24,
|
||||
[QCRYPTO_CIPHER_ALG_AES_256] = 32,
|
||||
[QCRYPTO_CIPHER_ALG_DES_RFB] = 8,
|
||||
[QCRYPTO_CIPHER_ALG_3DES] = 24,
|
||||
[QCRYPTO_CIPHER_ALG_CAST5_128] = 16,
|
||||
[QCRYPTO_CIPHER_ALG_SERPENT_128] = 16,
|
||||
[QCRYPTO_CIPHER_ALG_SERPENT_192] = 24,
|
||||
@@ -42,6 +43,7 @@ static size_t alg_block_len[QCRYPTO_CIPHER_ALG__MAX] = {
|
||||
[QCRYPTO_CIPHER_ALG_AES_192] = 16,
|
||||
[QCRYPTO_CIPHER_ALG_AES_256] = 16,
|
||||
[QCRYPTO_CIPHER_ALG_DES_RFB] = 8,
|
||||
[QCRYPTO_CIPHER_ALG_3DES] = 8,
|
||||
[QCRYPTO_CIPHER_ALG_CAST5_128] = 8,
|
||||
[QCRYPTO_CIPHER_ALG_SERPENT_128] = 16,
|
||||
[QCRYPTO_CIPHER_ALG_SERPENT_192] = 16,
|
||||
@@ -107,8 +109,9 @@ qcrypto_cipher_validate_key_length(QCryptoCipherAlgorithm alg,
|
||||
}
|
||||
|
||||
if (mode == QCRYPTO_CIPHER_MODE_XTS) {
|
||||
if (alg == QCRYPTO_CIPHER_ALG_DES_RFB) {
|
||||
error_setg(errp, "XTS mode not compatible with DES-RFB");
|
||||
if (alg == QCRYPTO_CIPHER_ALG_DES_RFB
|
||||
|| alg == QCRYPTO_CIPHER_ALG_3DES) {
|
||||
error_setg(errp, "XTS mode not compatible with DES-RFB/3DES");
|
||||
return false;
|
||||
}
|
||||
if (nkey % 2) {
|
||||
|
152
crypto/hmac-gcrypt.c
Normal file
152
crypto/hmac-gcrypt.c
Normal file
@@ -0,0 +1,152 @@
|
||||
/*
|
||||
* QEMU Crypto hmac algorithms (based on libgcrypt)
|
||||
*
|
||||
* Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
|
||||
*
|
||||
* Authors:
|
||||
* Longpeng(Mike) <longpeng2@huawei.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or
|
||||
* (at your option) any later version. See the COPYING file in the
|
||||
* top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
#include "crypto/hmac.h"
|
||||
#include <gcrypt.h>
|
||||
|
||||
static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALG__MAX] = {
|
||||
[QCRYPTO_HASH_ALG_MD5] = GCRY_MAC_HMAC_MD5,
|
||||
[QCRYPTO_HASH_ALG_SHA1] = GCRY_MAC_HMAC_SHA1,
|
||||
[QCRYPTO_HASH_ALG_SHA224] = GCRY_MAC_HMAC_SHA224,
|
||||
[QCRYPTO_HASH_ALG_SHA256] = GCRY_MAC_HMAC_SHA256,
|
||||
[QCRYPTO_HASH_ALG_SHA384] = GCRY_MAC_HMAC_SHA384,
|
||||
[QCRYPTO_HASH_ALG_SHA512] = GCRY_MAC_HMAC_SHA512,
|
||||
[QCRYPTO_HASH_ALG_RIPEMD160] = GCRY_MAC_HMAC_RMD160,
|
||||
};
|
||||
|
||||
typedef struct QCryptoHmacGcrypt QCryptoHmacGcrypt;
|
||||
struct QCryptoHmacGcrypt {
|
||||
gcry_mac_hd_t handle;
|
||||
};
|
||||
|
||||
bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg)
|
||||
{
|
||||
if (alg < G_N_ELEMENTS(qcrypto_hmac_alg_map) &&
|
||||
qcrypto_hmac_alg_map[alg] != GCRY_MAC_NONE) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgorithm alg,
|
||||
const uint8_t *key, size_t nkey,
|
||||
Error **errp)
|
||||
{
|
||||
QCryptoHmac *hmac;
|
||||
QCryptoHmacGcrypt *ctx;
|
||||
gcry_error_t err;
|
||||
|
||||
if (!qcrypto_hmac_supports(alg)) {
|
||||
error_setg(errp, "Unsupported hmac algorithm %s",
|
||||
QCryptoHashAlgorithm_lookup[alg]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
hmac = g_new0(QCryptoHmac, 1);
|
||||
hmac->alg = alg;
|
||||
|
||||
ctx = g_new0(QCryptoHmacGcrypt, 1);
|
||||
|
||||
err = gcry_mac_open(&ctx->handle, qcrypto_hmac_alg_map[alg],
|
||||
GCRY_MAC_FLAG_SECURE, NULL);
|
||||
if (err != 0) {
|
||||
error_setg(errp, "Cannot initialize hmac: %s",
|
||||
gcry_strerror(err));
|
||||
goto error;
|
||||
}
|
||||
|
||||
err = gcry_mac_setkey(ctx->handle, (const void *)key, nkey);
|
||||
if (err != 0) {
|
||||
error_setg(errp, "Cannot set key: %s",
|
||||
gcry_strerror(err));
|
||||
goto error;
|
||||
}
|
||||
|
||||
hmac->opaque = ctx;
|
||||
return hmac;
|
||||
|
||||
error:
|
||||
g_free(ctx);
|
||||
g_free(hmac);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void qcrypto_hmac_free(QCryptoHmac *hmac)
|
||||
{
|
||||
QCryptoHmacGcrypt *ctx;
|
||||
|
||||
if (!hmac) {
|
||||
return;
|
||||
}
|
||||
|
||||
ctx = hmac->opaque;
|
||||
gcry_mac_close(ctx->handle);
|
||||
|
||||
g_free(ctx);
|
||||
g_free(hmac);
|
||||
}
|
||||
|
||||
int qcrypto_hmac_bytesv(QCryptoHmac *hmac,
|
||||
const struct iovec *iov,
|
||||
size_t niov,
|
||||
uint8_t **result,
|
||||
size_t *resultlen,
|
||||
Error **errp)
|
||||
{
|
||||
QCryptoHmacGcrypt *ctx;
|
||||
gcry_error_t err;
|
||||
uint32_t ret;
|
||||
int i;
|
||||
|
||||
ctx = hmac->opaque;
|
||||
|
||||
for (i = 0; i < niov; i++) {
|
||||
gcry_mac_write(ctx->handle, iov[i].iov_base, iov[i].iov_len);
|
||||
}
|
||||
|
||||
ret = gcry_mac_get_algo_maclen(qcrypto_hmac_alg_map[hmac->alg]);
|
||||
if (ret <= 0) {
|
||||
error_setg(errp, "Unable to get hmac length: %s",
|
||||
gcry_strerror(ret));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (*resultlen == 0) {
|
||||
*resultlen = ret;
|
||||
*result = g_new0(uint8_t, *resultlen);
|
||||
} else if (*resultlen != ret) {
|
||||
error_setg(errp, "Result buffer size %zu is smaller than hmac %d",
|
||||
*resultlen, ret);
|
||||
return -1;
|
||||
}
|
||||
|
||||
err = gcry_mac_read(ctx->handle, *result, resultlen);
|
||||
if (err != 0) {
|
||||
error_setg(errp, "Cannot get result: %s",
|
||||
gcry_strerror(err));
|
||||
return -1;
|
||||
}
|
||||
|
||||
err = gcry_mac_reset(ctx->handle);
|
||||
if (err != 0) {
|
||||
error_setg(errp, "Cannot reset hmac context: %s",
|
||||
gcry_strerror(err));
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
166
crypto/hmac-glib.c
Normal file
166
crypto/hmac-glib.c
Normal file
@@ -0,0 +1,166 @@
|
||||
/*
|
||||
* QEMU Crypto hmac algorithms (based on glib)
|
||||
*
|
||||
* Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
|
||||
*
|
||||
* Authors:
|
||||
* Longpeng(Mike) <longpeng2@huawei.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or
|
||||
* (at your option) any later version. See the COPYING file in the
|
||||
* top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
#include "crypto/hmac.h"
|
||||
|
||||
/* Support for HMAC Algos has been added in GLib 2.30 */
|
||||
#if GLIB_CHECK_VERSION(2, 30, 0)
|
||||
|
||||
static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALG__MAX] = {
|
||||
[QCRYPTO_HASH_ALG_MD5] = G_CHECKSUM_MD5,
|
||||
[QCRYPTO_HASH_ALG_SHA1] = G_CHECKSUM_SHA1,
|
||||
[QCRYPTO_HASH_ALG_SHA256] = G_CHECKSUM_SHA256,
|
||||
/* Support for HMAC SHA-512 in GLib 2.42 */
|
||||
#if GLIB_CHECK_VERSION(2, 42, 0)
|
||||
[QCRYPTO_HASH_ALG_SHA512] = G_CHECKSUM_SHA512,
|
||||
#else
|
||||
[QCRYPTO_HASH_ALG_SHA512] = -1,
|
||||
#endif
|
||||
[QCRYPTO_HASH_ALG_SHA224] = -1,
|
||||
[QCRYPTO_HASH_ALG_SHA384] = -1,
|
||||
[QCRYPTO_HASH_ALG_RIPEMD160] = -1,
|
||||
};
|
||||
|
||||
typedef struct QCryptoHmacGlib QCryptoHmacGlib;
|
||||
struct QCryptoHmacGlib {
|
||||
GHmac *ghmac;
|
||||
};
|
||||
|
||||
bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg)
|
||||
{
|
||||
if (alg < G_N_ELEMENTS(qcrypto_hmac_alg_map) &&
|
||||
qcrypto_hmac_alg_map[alg] != -1) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgorithm alg,
|
||||
const uint8_t *key, size_t nkey,
|
||||
Error **errp)
|
||||
{
|
||||
QCryptoHmac *hmac;
|
||||
QCryptoHmacGlib *ctx;
|
||||
|
||||
if (!qcrypto_hmac_supports(alg)) {
|
||||
error_setg(errp, "Unsupported hmac algorithm %s",
|
||||
QCryptoHashAlgorithm_lookup[alg]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
hmac = g_new0(QCryptoHmac, 1);
|
||||
hmac->alg = alg;
|
||||
|
||||
ctx = g_new0(QCryptoHmacGlib, 1);
|
||||
|
||||
ctx->ghmac = g_hmac_new(qcrypto_hmac_alg_map[alg],
|
||||
(const uint8_t *)key, nkey);
|
||||
if (!ctx->ghmac) {
|
||||
error_setg(errp, "Cannot initialize hmac and set key");
|
||||
goto error;
|
||||
}
|
||||
|
||||
hmac->opaque = ctx;
|
||||
return hmac;
|
||||
|
||||
error:
|
||||
g_free(ctx);
|
||||
g_free(hmac);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void qcrypto_hmac_free(QCryptoHmac *hmac)
|
||||
{
|
||||
QCryptoHmacGlib *ctx;
|
||||
|
||||
if (!hmac) {
|
||||
return;
|
||||
}
|
||||
|
||||
ctx = hmac->opaque;
|
||||
g_hmac_unref(ctx->ghmac);
|
||||
|
||||
g_free(ctx);
|
||||
g_free(hmac);
|
||||
}
|
||||
|
||||
int qcrypto_hmac_bytesv(QCryptoHmac *hmac,
|
||||
const struct iovec *iov,
|
||||
size_t niov,
|
||||
uint8_t **result,
|
||||
size_t *resultlen,
|
||||
Error **errp)
|
||||
{
|
||||
QCryptoHmacGlib *ctx;
|
||||
int i, ret;
|
||||
|
||||
ctx = hmac->opaque;
|
||||
|
||||
for (i = 0; i < niov; i++) {
|
||||
g_hmac_update(ctx->ghmac, iov[i].iov_base, iov[i].iov_len);
|
||||
}
|
||||
|
||||
ret = g_checksum_type_get_length(qcrypto_hmac_alg_map[hmac->alg]);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Unable to get hmac length");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (*resultlen == 0) {
|
||||
*resultlen = ret;
|
||||
*result = g_new0(uint8_t, *resultlen);
|
||||
} else if (*resultlen != ret) {
|
||||
error_setg(errp, "Result buffer size %zu is smaller than hmac %d",
|
||||
*resultlen, ret);
|
||||
return -1;
|
||||
}
|
||||
|
||||
g_hmac_get_digest(ctx->ghmac, *result, resultlen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgorithm alg,
|
||||
const uint8_t *key, size_t nkey,
|
||||
Error **errp)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void qcrypto_hmac_free(QCryptoHmac *hmac)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
int qcrypto_hmac_bytesv(QCryptoHmac *hmac,
|
||||
const struct iovec *iov,
|
||||
size_t niov,
|
||||
uint8_t **result,
|
||||
size_t *resultlen,
|
||||
Error **errp)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
#endif
|
175
crypto/hmac-nettle.c
Normal file
175
crypto/hmac-nettle.c
Normal file
@@ -0,0 +1,175 @@
|
||||
/*
|
||||
* QEMU Crypto hmac algorithms (based on nettle)
|
||||
*
|
||||
* Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
|
||||
*
|
||||
* Authors:
|
||||
* Longpeng(Mike) <longpeng2@huawei.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or
|
||||
* (at your option) any later version. See the COPYING file in the
|
||||
* top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
#include "crypto/hmac.h"
|
||||
#include <nettle/hmac.h>
|
||||
|
||||
typedef void (*qcrypto_nettle_hmac_setkey)(void *ctx,
|
||||
size_t key_length, const uint8_t *key);
|
||||
|
||||
typedef void (*qcrypto_nettle_hmac_update)(void *ctx,
|
||||
size_t length, const uint8_t *data);
|
||||
|
||||
typedef void (*qcrypto_nettle_hmac_digest)(void *ctx,
|
||||
size_t length, uint8_t *digest);
|
||||
|
||||
typedef struct QCryptoHmacNettle QCryptoHmacNettle;
|
||||
struct QCryptoHmacNettle {
|
||||
union qcrypto_nettle_hmac_ctx {
|
||||
struct hmac_md5_ctx md5_ctx;
|
||||
struct hmac_sha1_ctx sha1_ctx;
|
||||
struct hmac_sha256_ctx sha256_ctx; /* equals hmac_sha224_ctx */
|
||||
struct hmac_sha512_ctx sha512_ctx; /* equals hmac_sha384_ctx */
|
||||
struct hmac_ripemd160_ctx ripemd160_ctx;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct qcrypto_nettle_hmac_alg {
|
||||
qcrypto_nettle_hmac_setkey setkey;
|
||||
qcrypto_nettle_hmac_update update;
|
||||
qcrypto_nettle_hmac_digest digest;
|
||||
size_t len;
|
||||
} qcrypto_hmac_alg_map[QCRYPTO_HASH_ALG__MAX] = {
|
||||
[QCRYPTO_HASH_ALG_MD5] = {
|
||||
.setkey = (qcrypto_nettle_hmac_setkey)hmac_md5_set_key,
|
||||
.update = (qcrypto_nettle_hmac_update)hmac_md5_update,
|
||||
.digest = (qcrypto_nettle_hmac_digest)hmac_md5_digest,
|
||||
.len = MD5_DIGEST_SIZE,
|
||||
},
|
||||
[QCRYPTO_HASH_ALG_SHA1] = {
|
||||
.setkey = (qcrypto_nettle_hmac_setkey)hmac_sha1_set_key,
|
||||
.update = (qcrypto_nettle_hmac_update)hmac_sha1_update,
|
||||
.digest = (qcrypto_nettle_hmac_digest)hmac_sha1_digest,
|
||||
.len = SHA1_DIGEST_SIZE,
|
||||
},
|
||||
[QCRYPTO_HASH_ALG_SHA224] = {
|
||||
.setkey = (qcrypto_nettle_hmac_setkey)hmac_sha224_set_key,
|
||||
.update = (qcrypto_nettle_hmac_update)hmac_sha224_update,
|
||||
.digest = (qcrypto_nettle_hmac_digest)hmac_sha224_digest,
|
||||
.len = SHA224_DIGEST_SIZE,
|
||||
},
|
||||
[QCRYPTO_HASH_ALG_SHA256] = {
|
||||
.setkey = (qcrypto_nettle_hmac_setkey)hmac_sha256_set_key,
|
||||
.update = (qcrypto_nettle_hmac_update)hmac_sha256_update,
|
||||
.digest = (qcrypto_nettle_hmac_digest)hmac_sha256_digest,
|
||||
.len = SHA256_DIGEST_SIZE,
|
||||
},
|
||||
[QCRYPTO_HASH_ALG_SHA384] = {
|
||||
.setkey = (qcrypto_nettle_hmac_setkey)hmac_sha384_set_key,
|
||||
.update = (qcrypto_nettle_hmac_update)hmac_sha384_update,
|
||||
.digest = (qcrypto_nettle_hmac_digest)hmac_sha384_digest,
|
||||
.len = SHA384_DIGEST_SIZE,
|
||||
},
|
||||
[QCRYPTO_HASH_ALG_SHA512] = {
|
||||
.setkey = (qcrypto_nettle_hmac_setkey)hmac_sha512_set_key,
|
||||
.update = (qcrypto_nettle_hmac_update)hmac_sha512_update,
|
||||
.digest = (qcrypto_nettle_hmac_digest)hmac_sha512_digest,
|
||||
.len = SHA512_DIGEST_SIZE,
|
||||
},
|
||||
[QCRYPTO_HASH_ALG_RIPEMD160] = {
|
||||
.setkey = (qcrypto_nettle_hmac_setkey)hmac_ripemd160_set_key,
|
||||
.update = (qcrypto_nettle_hmac_update)hmac_ripemd160_update,
|
||||
.digest = (qcrypto_nettle_hmac_digest)hmac_ripemd160_digest,
|
||||
.len = RIPEMD160_DIGEST_SIZE,
|
||||
},
|
||||
};
|
||||
|
||||
bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg)
|
||||
{
|
||||
if (alg < G_N_ELEMENTS(qcrypto_hmac_alg_map) &&
|
||||
qcrypto_hmac_alg_map[alg].setkey != NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgorithm alg,
|
||||
const uint8_t *key, size_t nkey,
|
||||
Error **errp)
|
||||
{
|
||||
QCryptoHmac *hmac;
|
||||
QCryptoHmacNettle *ctx;
|
||||
|
||||
if (!qcrypto_hmac_supports(alg)) {
|
||||
error_setg(errp, "Unsupported hmac algorithm %s",
|
||||
QCryptoHashAlgorithm_lookup[alg]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
hmac = g_new0(QCryptoHmac, 1);
|
||||
hmac->alg = alg;
|
||||
|
||||
ctx = g_new0(QCryptoHmacNettle, 1);
|
||||
|
||||
qcrypto_hmac_alg_map[alg].setkey(&ctx->u, nkey, key);
|
||||
|
||||
hmac->opaque = ctx;
|
||||
|
||||
return hmac;
|
||||
}
|
||||
|
||||
void qcrypto_hmac_free(QCryptoHmac *hmac)
|
||||
{
|
||||
QCryptoHmacNettle *ctx;
|
||||
|
||||
if (!hmac) {
|
||||
return;
|
||||
}
|
||||
|
||||
ctx = hmac->opaque;
|
||||
|
||||
g_free(ctx);
|
||||
g_free(hmac);
|
||||
}
|
||||
|
||||
int qcrypto_hmac_bytesv(QCryptoHmac *hmac,
|
||||
const struct iovec *iov,
|
||||
size_t niov,
|
||||
uint8_t **result,
|
||||
size_t *resultlen,
|
||||
Error **errp)
|
||||
{
|
||||
QCryptoHmacNettle *ctx;
|
||||
int i;
|
||||
|
||||
ctx = (QCryptoHmacNettle *)hmac->opaque;
|
||||
|
||||
for (i = 0; i < niov; ++i) {
|
||||
size_t len = iov[i].iov_len;
|
||||
uint8_t *base = iov[i].iov_base;
|
||||
while (len) {
|
||||
size_t shortlen = MIN(len, UINT_MAX);
|
||||
qcrypto_hmac_alg_map[hmac->alg].update(&ctx->u, len, base);
|
||||
len -= shortlen;
|
||||
base += len;
|
||||
}
|
||||
}
|
||||
|
||||
if (*resultlen == 0) {
|
||||
*resultlen = qcrypto_hmac_alg_map[hmac->alg].len;
|
||||
*result = g_new0(uint8_t, *resultlen);
|
||||
} else if (*resultlen != qcrypto_hmac_alg_map[hmac->alg].len) {
|
||||
error_setg(errp,
|
||||
"Result buffer size %zu is smaller than hash %zu",
|
||||
*resultlen, qcrypto_hmac_alg_map[hmac->alg].len);
|
||||
return -1;
|
||||
}
|
||||
|
||||
qcrypto_hmac_alg_map[hmac->alg].digest(&ctx->u, *resultlen, *result);
|
||||
|
||||
return 0;
|
||||
}
|
72
crypto/hmac.c
Normal file
72
crypto/hmac.c
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* QEMU Crypto hmac algorithms
|
||||
*
|
||||
* Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or
|
||||
* (at your option) any later version. See the COPYING file in the
|
||||
* top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
#include "crypto/hmac.h"
|
||||
|
||||
static const char hex[] = "0123456789abcdef";
|
||||
|
||||
int qcrypto_hmac_bytes(QCryptoHmac *hmac,
|
||||
const char *buf,
|
||||
size_t len,
|
||||
uint8_t **result,
|
||||
size_t *resultlen,
|
||||
Error **errp)
|
||||
{
|
||||
struct iovec iov = {
|
||||
.iov_base = (char *)buf,
|
||||
.iov_len = len
|
||||
};
|
||||
|
||||
return qcrypto_hmac_bytesv(hmac, &iov, 1, result, resultlen, errp);
|
||||
}
|
||||
|
||||
int qcrypto_hmac_digestv(QCryptoHmac *hmac,
|
||||
const struct iovec *iov,
|
||||
size_t niov,
|
||||
char **digest,
|
||||
Error **errp)
|
||||
{
|
||||
uint8_t *result = NULL;
|
||||
size_t resultlen = 0;
|
||||
size_t i;
|
||||
|
||||
if (qcrypto_hmac_bytesv(hmac, iov, niov, &result, &resultlen, errp) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
*digest = g_new0(char, (resultlen * 2) + 1);
|
||||
|
||||
for (i = 0 ; i < resultlen ; i++) {
|
||||
(*digest)[(i * 2)] = hex[(result[i] >> 4) & 0xf];
|
||||
(*digest)[(i * 2) + 1] = hex[result[i] & 0xf];
|
||||
}
|
||||
|
||||
(*digest)[resultlen * 2] = '\0';
|
||||
|
||||
g_free(result);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qcrypto_hmac_digest(QCryptoHmac *hmac,
|
||||
const char *buf,
|
||||
size_t len,
|
||||
char **digest,
|
||||
Error **errp)
|
||||
{
|
||||
struct iovec iov = {
|
||||
.iov_base = (char *)buf,
|
||||
.iov_len = len
|
||||
};
|
||||
|
||||
return qcrypto_hmac_digestv(hmac, &iov, 1, digest, errp);
|
||||
}
|
166
crypto/hmac.h
Normal file
166
crypto/hmac.h
Normal file
@@ -0,0 +1,166 @@
|
||||
/*
|
||||
* QEMU Crypto hmac algorithms
|
||||
*
|
||||
* Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or
|
||||
* (at your option) any later version. See the COPYING file in the
|
||||
* top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef QCRYPTO_HMAC_H
|
||||
#define QCRYPTO_HMAC_H
|
||||
|
||||
#include "qapi-types.h"
|
||||
|
||||
typedef struct QCryptoHmac QCryptoHmac;
|
||||
struct QCryptoHmac {
|
||||
QCryptoHashAlgorithm alg;
|
||||
void *opaque;
|
||||
};
|
||||
|
||||
/**
|
||||
* qcrypto_hmac_supports:
|
||||
* @alg: the hmac algorithm
|
||||
*
|
||||
* Determine if @alg hmac algorithm is supported by
|
||||
* the current configured build
|
||||
*
|
||||
* Returns:
|
||||
* true if the algorithm is supported, false otherwise
|
||||
*/
|
||||
bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg);
|
||||
|
||||
/**
|
||||
* qcrypto_hmac_new:
|
||||
* @alg: the hmac algorithm
|
||||
* @key: the key bytes
|
||||
* @nkey: the length of @key
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Creates a new hmac object with the algorithm @alg
|
||||
*
|
||||
* The @key parameter provides the bytes representing
|
||||
* the secret key to use. The @nkey parameter specifies
|
||||
* the length of @key in bytes
|
||||
*
|
||||
* Note: must use qcrypto_hmac_free() to release the
|
||||
* returned hmac object when no longer required
|
||||
*
|
||||
* Returns:
|
||||
* a new hmac object, or NULL on error
|
||||
*/
|
||||
QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgorithm alg,
|
||||
const uint8_t *key, size_t nkey,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* qcrypto_hmac_free:
|
||||
* @hmac: the hmac object
|
||||
*
|
||||
* Release the memory associated with @hmac that was
|
||||
* previously allocated by qcrypto_hmac_new()
|
||||
*/
|
||||
void qcrypto_hmac_free(QCryptoHmac *hmac);
|
||||
|
||||
/**
|
||||
* qcrypto_hmac_bytesv:
|
||||
* @hmac: the hmac object
|
||||
* @iov: the array of memory regions to hmac
|
||||
* @niov: the length of @iov
|
||||
* @result: pointer to hold output hmac
|
||||
* @resultlen: pointer to hold length of @result
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Computes the hmac across all the memory regions
|
||||
* present in @iov. The @result pointer will be
|
||||
* filled with raw bytes representing the computed
|
||||
* hmac, which will have length @resultlen. The
|
||||
* memory pointer in @result must be released
|
||||
* with a call to g_free() when no longer required.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, -1 on error
|
||||
*/
|
||||
int qcrypto_hmac_bytesv(QCryptoHmac *hmac,
|
||||
const struct iovec *iov,
|
||||
size_t niov,
|
||||
uint8_t **result,
|
||||
size_t *resultlen,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* qcrypto_hmac_bytes:
|
||||
* @hmac: the hmac object
|
||||
* @buf: the memory region to hmac
|
||||
* @len: the length of @buf
|
||||
* @result: pointer to hold output hmac
|
||||
* @resultlen: pointer to hold length of @result
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Computes the hmac across all the memory region
|
||||
* @buf of length @len. The @result pointer will be
|
||||
* filled with raw bytes representing the computed
|
||||
* hmac, which will have length @resultlen. The
|
||||
* memory pointer in @result must be released
|
||||
* with a call to g_free() when no longer required.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, -1 on error
|
||||
*/
|
||||
int qcrypto_hmac_bytes(QCryptoHmac *hmac,
|
||||
const char *buf,
|
||||
size_t len,
|
||||
uint8_t **result,
|
||||
size_t *resultlen,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* qcrypto_hmac_digestv:
|
||||
* @hmac: the hmac object
|
||||
* @iov: the array of memory regions to hmac
|
||||
* @niov: the length of @iov
|
||||
* @digest: pointer to hold output hmac
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Computes the hmac across all the memory regions
|
||||
* present in @iov. The @digest pointer will be
|
||||
* filled with the printable hex digest of the computed
|
||||
* hmac, which will be terminated by '\0'. The
|
||||
* memory pointer in @digest must be released
|
||||
* with a call to g_free() when no longer required.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, -1 on error
|
||||
*/
|
||||
int qcrypto_hmac_digestv(QCryptoHmac *hmac,
|
||||
const struct iovec *iov,
|
||||
size_t niov,
|
||||
char **digest,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* qcrypto_hmac_digest:
|
||||
* @hmac: the hmac object
|
||||
* @buf: the memory region to hmac
|
||||
* @len: the length of @buf
|
||||
* @digest: pointer to hold output hmac
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Computes the hmac across all the memory region
|
||||
* @buf of length @len. The @digest pointer will be
|
||||
* filled with the printable hex digest of the computed
|
||||
* hmac, which will be terminated by '\0'. The
|
||||
* memory pointer in @digest must be released
|
||||
* with a call to g_free() when no longer required.
|
||||
*
|
||||
* Returns: 0 on success, -1 on error
|
||||
*/
|
||||
int qcrypto_hmac_digest(QCryptoHmac *hmac,
|
||||
const char *buf,
|
||||
size_t len,
|
||||
char **digest,
|
||||
Error **errp);
|
||||
|
||||
#endif
|
@@ -21,9 +21,7 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "disas/bfd.h"
|
||||
//#include "sysdep.h"
|
||||
#include "target-cris/opcode-cris.h"
|
||||
//#include "libiberty.h"
|
||||
#include "target/cris/opcode-cris.h"
|
||||
|
||||
#define CONST_STRNEQ(STR1,STR2) (strncmp ((STR1), (STR2), sizeof (STR2) - 1) == 0)
|
||||
|
||||
|
@@ -4698,10 +4698,6 @@ get_field (const unsigned char *data, enum floatformat_byteorders order,
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifndef min
|
||||
#define min(a, b) ((a) < (b) ? (a) : (b))
|
||||
#endif
|
||||
|
||||
/* Convert from FMT to a double.
|
||||
FROM is the address of the extended float.
|
||||
Store the double in *TO. */
|
||||
@@ -4733,7 +4729,7 @@ floatformat_to_double (const struct floatformat *fmt,
|
||||
nan = 0;
|
||||
while (mant_bits_left > 0)
|
||||
{
|
||||
mant_bits = min (mant_bits_left, 32);
|
||||
mant_bits = MIN(mant_bits_left, 32);
|
||||
|
||||
if (get_field (ufrom, fmt->byteorder, fmt->totalsize,
|
||||
mant_off, mant_bits) != 0)
|
||||
@@ -4793,7 +4789,7 @@ floatformat_to_double (const struct floatformat *fmt,
|
||||
|
||||
while (mant_bits_left > 0)
|
||||
{
|
||||
mant_bits = min (mant_bits_left, 32);
|
||||
mant_bits = MIN(mant_bits_left, 32);
|
||||
|
||||
mant = get_field (ufrom, fmt->byteorder, fmt->totalsize,
|
||||
mant_off, mant_bits);
|
||||
|
703
exec.c
703
exec.c
@@ -2938,6 +2938,31 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
|
||||
return true;
|
||||
}
|
||||
|
||||
static hwaddr
|
||||
address_space_extend_translation(AddressSpace *as, hwaddr addr, hwaddr target_len,
|
||||
MemoryRegion *mr, hwaddr base, hwaddr len,
|
||||
bool is_write)
|
||||
{
|
||||
hwaddr done = 0;
|
||||
hwaddr xlat;
|
||||
MemoryRegion *this_mr;
|
||||
|
||||
for (;;) {
|
||||
target_len -= len;
|
||||
addr += len;
|
||||
done += len;
|
||||
if (target_len == 0) {
|
||||
return done;
|
||||
}
|
||||
|
||||
len = target_len;
|
||||
this_mr = address_space_translate(as, addr, &xlat, &len, is_write);
|
||||
if (this_mr != mr || xlat != base + done) {
|
||||
return done;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Map a physical memory region into a host virtual address.
|
||||
* May map a subset of the requested range, given by and returned in *plen.
|
||||
* May return NULL if resources needed to perform the mapping are exhausted.
|
||||
@@ -2951,9 +2976,8 @@ void *address_space_map(AddressSpace *as,
|
||||
bool is_write)
|
||||
{
|
||||
hwaddr len = *plen;
|
||||
hwaddr done = 0;
|
||||
hwaddr l, xlat, base;
|
||||
MemoryRegion *mr, *this_mr;
|
||||
hwaddr l, xlat;
|
||||
MemoryRegion *mr;
|
||||
void *ptr;
|
||||
|
||||
if (len == 0) {
|
||||
@@ -2987,26 +3011,10 @@ void *address_space_map(AddressSpace *as,
|
||||
return bounce.buffer;
|
||||
}
|
||||
|
||||
base = xlat;
|
||||
|
||||
for (;;) {
|
||||
len -= l;
|
||||
addr += l;
|
||||
done += l;
|
||||
if (len == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
l = len;
|
||||
this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
|
||||
if (this_mr != mr || xlat != base + done) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
memory_region_ref(mr);
|
||||
*plen = done;
|
||||
ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
|
||||
*plen = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write);
|
||||
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ptr;
|
||||
@@ -3058,597 +3066,92 @@ void cpu_physical_memory_unmap(void *buffer, hwaddr len,
|
||||
return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
#define ARG1_DECL AddressSpace *as
|
||||
#define ARG1 as
|
||||
#define SUFFIX
|
||||
#define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
|
||||
#define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
|
||||
#define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
|
||||
#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
|
||||
#define RCU_READ_LOCK(...) rcu_read_lock()
|
||||
#define RCU_READ_UNLOCK(...) rcu_read_unlock()
|
||||
#include "memory_ldst.inc.c"
|
||||
|
||||
int64_t address_space_cache_init(MemoryRegionCache *cache,
|
||||
AddressSpace *as,
|
||||
hwaddr addr,
|
||||
hwaddr len,
|
||||
bool is_write)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
hwaddr l, xlat;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 4;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
bool release_lock = false;
|
||||
void *ptr;
|
||||
|
||||
rcu_read_lock();
|
||||
mr = address_space_translate(as, addr, &addr1, &l, false);
|
||||
if (l < 4 || !memory_access_is_direct(mr, false)) {
|
||||
release_lock |= prepare_mmio_access(mr);
|
||||
assert(len > 0);
|
||||
|
||||
/* I/O case */
|
||||
r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
#else
|
||||
if (endian == DEVICE_BIG_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
|
||||
switch (endian) {
|
||||
case DEVICE_LITTLE_ENDIAN:
|
||||
val = ldl_le_p(ptr);
|
||||
break;
|
||||
case DEVICE_BIG_ENDIAN:
|
||||
val = ldl_be_p(ptr);
|
||||
break;
|
||||
default:
|
||||
val = ldl_p(ptr);
|
||||
break;
|
||||
}
|
||||
r = MEMTX_OK;
|
||||
l = len;
|
||||
mr = address_space_translate(as, addr, &xlat, &l, is_write);
|
||||
if (!memory_access_is_direct(mr, is_write)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
|
||||
l = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write);
|
||||
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, &l);
|
||||
|
||||
cache->xlat = xlat;
|
||||
cache->is_write = is_write;
|
||||
cache->mr = mr;
|
||||
cache->ptr = ptr;
|
||||
cache->len = l;
|
||||
memory_region_ref(cache->mr);
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
void address_space_cache_invalidate(MemoryRegionCache *cache,
|
||||
hwaddr addr,
|
||||
hwaddr access_len)
|
||||
{
|
||||
assert(cache->is_write);
|
||||
invalidate_and_set_dirty(cache->mr, addr + cache->xlat, access_len);
|
||||
}
|
||||
|
||||
void address_space_cache_destroy(MemoryRegionCache *cache)
|
||||
{
|
||||
if (!cache->mr) {
|
||||
return;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
if (xen_enabled()) {
|
||||
xen_invalidate_map_cache_entry(cache->ptr);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return val;
|
||||
memory_region_unref(cache->mr);
|
||||
}
|
||||
|
||||
uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
/* Called from RCU critical section. This function has the same
|
||||
* semantics as address_space_translate, but it only works on a
|
||||
* predefined range of a MemoryRegion that was mapped with
|
||||
* address_space_cache_init.
|
||||
*/
|
||||
static inline MemoryRegion *address_space_translate_cached(
|
||||
MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat,
|
||||
hwaddr *plen, bool is_write)
|
||||
{
|
||||
return address_space_ldl_internal(as, addr, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
assert(addr < cache->len && *plen <= cache->len - addr);
|
||||
*xlat = addr + cache->xlat;
|
||||
return cache->mr;
|
||||
}
|
||||
|
||||
uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldl_internal(as, addr, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldl_internal(as, addr, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 8;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
bool release_lock = false;
|
||||
|
||||
rcu_read_lock();
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
false);
|
||||
if (l < 8 || !memory_access_is_direct(mr, false)) {
|
||||
release_lock |= prepare_mmio_access(mr);
|
||||
|
||||
/* I/O case */
|
||||
r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap64(val);
|
||||
}
|
||||
#else
|
||||
if (endian == DEVICE_BIG_ENDIAN) {
|
||||
val = bswap64(val);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
|
||||
switch (endian) {
|
||||
case DEVICE_LITTLE_ENDIAN:
|
||||
val = ldq_le_p(ptr);
|
||||
break;
|
||||
case DEVICE_BIG_ENDIAN:
|
||||
val = ldq_be_p(ptr);
|
||||
break;
|
||||
default:
|
||||
val = ldq_p(ptr);
|
||||
break;
|
||||
}
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return val;
|
||||
}
|
||||
|
||||
uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldq_internal(as, addr, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldq_internal(as, addr, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldq_internal(as, addr, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
uint8_t val;
|
||||
MemTxResult r;
|
||||
|
||||
r = address_space_rw(as, addr, attrs, &val, 1, 0);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline uint32_t address_space_lduw_internal(AddressSpace *as,
|
||||
hwaddr addr,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 2;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
bool release_lock = false;
|
||||
|
||||
rcu_read_lock();
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
false);
|
||||
if (l < 2 || !memory_access_is_direct(mr, false)) {
|
||||
release_lock |= prepare_mmio_access(mr);
|
||||
|
||||
/* I/O case */
|
||||
r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
#else
|
||||
if (endian == DEVICE_BIG_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
|
||||
switch (endian) {
|
||||
case DEVICE_LITTLE_ENDIAN:
|
||||
val = lduw_le_p(ptr);
|
||||
break;
|
||||
case DEVICE_BIG_ENDIAN:
|
||||
val = lduw_be_p(ptr);
|
||||
break;
|
||||
default:
|
||||
val = lduw_p(ptr);
|
||||
break;
|
||||
}
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_lduw_internal(as, addr, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_lduw_internal(as, addr, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_lduw_internal(as, addr, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned. The ram page is not masked as dirty
|
||||
and the code inside is not invalidated. It is useful if the dirty
|
||||
bits are used to track modified PTEs */
|
||||
void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 4;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
uint8_t dirty_log_mask;
|
||||
bool release_lock = false;
|
||||
|
||||
rcu_read_lock();
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
true);
|
||||
if (l < 4 || !memory_access_is_direct(mr, true)) {
|
||||
release_lock |= prepare_mmio_access(mr);
|
||||
|
||||
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
|
||||
} else {
|
||||
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
|
||||
stl_p(ptr, val);
|
||||
|
||||
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
|
||||
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
|
||||
cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
|
||||
4, dirty_log_mask);
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline void address_space_stl_internal(AddressSpace *as,
|
||||
hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 4;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
bool release_lock = false;
|
||||
|
||||
rcu_read_lock();
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
true);
|
||||
if (l < 4 || !memory_access_is_direct(mr, true)) {
|
||||
release_lock |= prepare_mmio_access(mr);
|
||||
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
#else
|
||||
if (endian == DEVICE_BIG_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
#endif
|
||||
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
|
||||
switch (endian) {
|
||||
case DEVICE_LITTLE_ENDIAN:
|
||||
stl_le_p(ptr, val);
|
||||
break;
|
||||
case DEVICE_BIG_ENDIAN:
|
||||
stl_be_p(ptr, val);
|
||||
break;
|
||||
default:
|
||||
stl_p(ptr, val);
|
||||
break;
|
||||
}
|
||||
invalidate_and_set_dirty(mr, addr1, 4);
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stl_internal(as, addr, val, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stl_internal(as, addr, val, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stl_internal(as, addr, val, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
uint8_t v = val;
|
||||
MemTxResult r;
|
||||
|
||||
r = address_space_rw(as, addr, attrs, &v, 1, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline void address_space_stw_internal(AddressSpace *as,
|
||||
hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 2;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
bool release_lock = false;
|
||||
|
||||
rcu_read_lock();
|
||||
mr = address_space_translate(as, addr, &addr1, &l, true);
|
||||
if (l < 2 || !memory_access_is_direct(mr, true)) {
|
||||
release_lock |= prepare_mmio_access(mr);
|
||||
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
#else
|
||||
if (endian == DEVICE_BIG_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
#endif
|
||||
r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
|
||||
switch (endian) {
|
||||
case DEVICE_LITTLE_ENDIAN:
|
||||
stw_le_p(ptr, val);
|
||||
break;
|
||||
case DEVICE_BIG_ENDIAN:
|
||||
stw_be_p(ptr, val);
|
||||
break;
|
||||
default:
|
||||
stw_p(ptr, val);
|
||||
break;
|
||||
}
|
||||
invalidate_and_set_dirty(mr, addr1, 2);
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stw_internal(as, addr, val, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stw_internal(as, addr, val, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stw_internal(as, addr, val, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
MemTxResult r;
|
||||
val = tswap64(val);
|
||||
r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
MemTxResult r;
|
||||
val = cpu_to_le64(val);
|
||||
r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
MemTxResult r;
|
||||
val = cpu_to_be64(val);
|
||||
r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
|
||||
{
|
||||
address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
|
||||
{
|
||||
address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
|
||||
{
|
||||
address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
#define ARG1_DECL MemoryRegionCache *cache
|
||||
#define ARG1 cache
|
||||
#define SUFFIX _cached
|
||||
#define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
|
||||
#define IS_DIRECT(mr, is_write) true
|
||||
#define MAP_RAM(mr, ofs) (cache->ptr + (ofs - cache->xlat))
|
||||
#define INVALIDATE(mr, ofs, len) ((void)0)
|
||||
#define RCU_READ_LOCK() ((void)0)
|
||||
#define RCU_READ_UNLOCK() ((void)0)
|
||||
#include "memory_ldst.inc.c"
|
||||
|
||||
/* virtual memory access for debug (includes writing to ROM) */
|
||||
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
|
||||
|
78
hw/9pfs/9p.c
78
hw/9pfs/9p.c
@@ -47,7 +47,7 @@ ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = virtio_pdu_vmarshal(pdu, offset, fmt, ap);
|
||||
ret = pdu->s->transport->pdu_vmarshal(pdu, offset, fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
@@ -59,7 +59,7 @@ ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = virtio_pdu_vunmarshal(pdu, offset, fmt, ap);
|
||||
ret = pdu->s->transport->pdu_vunmarshal(pdu, offset, fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
@@ -67,7 +67,7 @@ ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
|
||||
|
||||
static void pdu_push_and_notify(V9fsPDU *pdu)
|
||||
{
|
||||
virtio_9p_push_and_notify(pdu);
|
||||
pdu->s->transport->push_and_notify(pdu);
|
||||
}
|
||||
|
||||
static int omode_to_uflags(int8_t mode)
|
||||
@@ -1633,14 +1633,43 @@ out_nofid:
|
||||
pdu_complete(pdu, err);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a QEMUIOVector for a sub-region of PDU iovecs
|
||||
*
|
||||
* @qiov: uninitialized QEMUIOVector
|
||||
* @skip: number of bytes to skip from beginning of PDU
|
||||
* @size: number of bytes to include
|
||||
* @is_write: true - write, false - read
|
||||
*
|
||||
* The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
|
||||
* with qemu_iovec_destroy().
|
||||
*/
|
||||
static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
|
||||
size_t skip, size_t size,
|
||||
bool is_write)
|
||||
{
|
||||
QEMUIOVector elem;
|
||||
struct iovec *iov;
|
||||
unsigned int niov;
|
||||
|
||||
if (is_write) {
|
||||
pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov);
|
||||
} else {
|
||||
pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size);
|
||||
}
|
||||
|
||||
qemu_iovec_init_external(&elem, iov, niov);
|
||||
qemu_iovec_init(qiov, niov);
|
||||
qemu_iovec_concat(qiov, &elem, skip, size);
|
||||
}
|
||||
|
||||
static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
|
||||
uint64_t off, uint32_t max_count)
|
||||
{
|
||||
ssize_t err;
|
||||
size_t offset = 7;
|
||||
uint64_t read_count;
|
||||
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
|
||||
VirtQueueElement *elem = v->elems[pdu->idx];
|
||||
QEMUIOVector qiov_full;
|
||||
|
||||
if (fidp->fs.xattr.len < off) {
|
||||
read_count = 0;
|
||||
@@ -1656,9 +1685,11 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
|
||||
}
|
||||
offset += err;
|
||||
|
||||
err = v9fs_pack(elem->in_sg, elem->in_num, offset,
|
||||
v9fs_init_qiov_from_pdu(&qiov_full, pdu, 0, read_count, false);
|
||||
err = v9fs_pack(qiov_full.iov, qiov_full.niov, offset,
|
||||
((char *)fidp->fs.xattr.value) + off,
|
||||
read_count);
|
||||
qemu_iovec_destroy(&qiov_full);
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
@@ -1732,32 +1763,6 @@ static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu,
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a QEMUIOVector for a sub-region of PDU iovecs
|
||||
*
|
||||
* @qiov: uninitialized QEMUIOVector
|
||||
* @skip: number of bytes to skip from beginning of PDU
|
||||
* @size: number of bytes to include
|
||||
* @is_write: true - write, false - read
|
||||
*
|
||||
* The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
|
||||
* with qemu_iovec_destroy().
|
||||
*/
|
||||
static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
|
||||
size_t skip, size_t size,
|
||||
bool is_write)
|
||||
{
|
||||
QEMUIOVector elem;
|
||||
struct iovec *iov;
|
||||
unsigned int niov;
|
||||
|
||||
virtio_init_iov_from_pdu(pdu, &iov, &niov, is_write);
|
||||
|
||||
qemu_iovec_init_external(&elem, iov, niov);
|
||||
qemu_iovec_init(qiov, niov);
|
||||
qemu_iovec_concat(qiov, &elem, skip, size);
|
||||
}
|
||||
|
||||
static void coroutine_fn v9fs_read(void *opaque)
|
||||
{
|
||||
int32_t fid;
|
||||
@@ -3440,7 +3445,6 @@ void pdu_submit(V9fsPDU *pdu)
|
||||
/* Returns 0 on success, 1 on failure. */
|
||||
int v9fs_device_realize_common(V9fsState *s, Error **errp)
|
||||
{
|
||||
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
|
||||
int i, len;
|
||||
struct stat stat;
|
||||
FsDriverEntry *fse;
|
||||
@@ -3451,9 +3455,9 @@ int v9fs_device_realize_common(V9fsState *s, Error **errp)
|
||||
QLIST_INIT(&s->free_list);
|
||||
QLIST_INIT(&s->active_list);
|
||||
for (i = 0; i < (MAX_REQ - 1); i++) {
|
||||
QLIST_INSERT_HEAD(&s->free_list, &v->pdus[i], next);
|
||||
v->pdus[i].s = s;
|
||||
v->pdus[i].idx = i;
|
||||
QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next);
|
||||
s->pdus[i].s = s;
|
||||
s->pdus[i].idx = i;
|
||||
}
|
||||
|
||||
v9fs_path_init(&path);
|
||||
@@ -3521,7 +3525,7 @@ int v9fs_device_realize_common(V9fsState *s, Error **errp)
|
||||
rc = 0;
|
||||
out:
|
||||
if (rc) {
|
||||
if (s->ops->cleanup && s->ctx.private) {
|
||||
if (s->ops && s->ops->cleanup && s->ctx.private) {
|
||||
s->ops->cleanup(&s->ctx);
|
||||
}
|
||||
g_free(s->tag);
|
||||
|
26
hw/9pfs/9p.h
26
hw/9pfs/9p.h
@@ -99,8 +99,8 @@ enum p9_proto_version {
|
||||
V9FS_PROTO_2000L = 0x02,
|
||||
};
|
||||
|
||||
#define P9_NOTAG (u16)(~0)
|
||||
#define P9_NOFID (u32)(~0)
|
||||
#define P9_NOTAG UINT16_MAX
|
||||
#define P9_NOFID UINT32_MAX
|
||||
#define P9_MAXWELEM 16
|
||||
|
||||
#define FID_REFERENCED 0x1
|
||||
@@ -229,6 +229,8 @@ typedef struct V9fsState
|
||||
char *tag;
|
||||
enum p9_proto_version proto_version;
|
||||
int32_t msize;
|
||||
V9fsPDU pdus[MAX_REQ];
|
||||
const struct V9fsTransport *transport;
|
||||
/*
|
||||
* lock ensuring atomic path update
|
||||
* on rename.
|
||||
@@ -342,4 +344,24 @@ void pdu_free(V9fsPDU *pdu);
|
||||
void pdu_submit(V9fsPDU *pdu);
|
||||
void v9fs_reset(V9fsState *s);
|
||||
|
||||
struct V9fsTransport {
|
||||
ssize_t (*pdu_vmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
|
||||
va_list ap);
|
||||
ssize_t (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
|
||||
va_list ap);
|
||||
void (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
|
||||
unsigned int *pniov, size_t size);
|
||||
void (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
|
||||
unsigned int *pniov);
|
||||
void (*push_and_notify)(V9fsPDU *pdu);
|
||||
};
|
||||
|
||||
static inline int v9fs_register_transport(V9fsState *s,
|
||||
const struct V9fsTransport *t)
|
||||
{
|
||||
assert(!s->transport);
|
||||
s->transport = t;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -20,7 +20,9 @@
|
||||
#include "hw/virtio/virtio-access.h"
|
||||
#include "qemu/iov.h"
|
||||
|
||||
void virtio_9p_push_and_notify(V9fsPDU *pdu)
|
||||
static const struct V9fsTransport virtio_9p_transport;
|
||||
|
||||
static void virtio_9p_push_and_notify(V9fsPDU *pdu)
|
||||
{
|
||||
V9fsState *s = pdu->s;
|
||||
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
|
||||
@@ -126,6 +128,7 @@ static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
|
||||
v->config_size = sizeof(struct virtio_9p_config) + strlen(s->fsconf.tag);
|
||||
virtio_init(vdev, "virtio-9p", VIRTIO_ID_9P, v->config_size);
|
||||
v->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output);
|
||||
v9fs_register_transport(s, &virtio_9p_transport);
|
||||
|
||||
out:
|
||||
return;
|
||||
@@ -148,8 +151,8 @@ static void virtio_9p_reset(VirtIODevice *vdev)
|
||||
v9fs_reset(&v->state);
|
||||
}
|
||||
|
||||
ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
|
||||
const char *fmt, va_list ap)
|
||||
static ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
V9fsState *s = pdu->s;
|
||||
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
|
||||
@@ -158,8 +161,8 @@ ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
|
||||
return v9fs_iov_vmarshal(elem->in_sg, elem->in_num, offset, 1, fmt, ap);
|
||||
}
|
||||
|
||||
ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
|
||||
const char *fmt, va_list ap)
|
||||
static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
V9fsState *s = pdu->s;
|
||||
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
|
||||
@@ -168,22 +171,37 @@ ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
|
||||
return v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, ap);
|
||||
}
|
||||
|
||||
void virtio_init_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
|
||||
unsigned int *pniov, bool is_write)
|
||||
/* The size parameter is used by other transports. Do not drop it. */
|
||||
static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
|
||||
unsigned int *pniov, size_t size)
|
||||
{
|
||||
V9fsState *s = pdu->s;
|
||||
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
|
||||
VirtQueueElement *elem = v->elems[pdu->idx];
|
||||
|
||||
if (is_write) {
|
||||
*piov = elem->out_sg;
|
||||
*pniov = elem->out_num;
|
||||
} else {
|
||||
*piov = elem->in_sg;
|
||||
*pniov = elem->in_num;
|
||||
}
|
||||
*piov = elem->in_sg;
|
||||
*pniov = elem->in_num;
|
||||
}
|
||||
|
||||
static void virtio_init_out_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
|
||||
unsigned int *pniov)
|
||||
{
|
||||
V9fsState *s = pdu->s;
|
||||
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
|
||||
VirtQueueElement *elem = v->elems[pdu->idx];
|
||||
|
||||
*piov = elem->out_sg;
|
||||
*pniov = elem->out_num;
|
||||
}
|
||||
|
||||
static const struct V9fsTransport virtio_9p_transport = {
|
||||
.pdu_vmarshal = virtio_pdu_vmarshal,
|
||||
.pdu_vunmarshal = virtio_pdu_vunmarshal,
|
||||
.init_in_iov_from_pdu = virtio_init_in_iov_from_pdu,
|
||||
.init_out_iov_from_pdu = virtio_init_out_iov_from_pdu,
|
||||
.push_and_notify = virtio_9p_push_and_notify,
|
||||
};
|
||||
|
||||
/* virtio-9p device */
|
||||
|
||||
static const VMStateDescription vmstate_virtio_9p = {
|
||||
|
@@ -10,20 +10,10 @@ typedef struct V9fsVirtioState
|
||||
VirtIODevice parent_obj;
|
||||
VirtQueue *vq;
|
||||
size_t config_size;
|
||||
V9fsPDU pdus[MAX_REQ];
|
||||
VirtQueueElement *elems[MAX_REQ];
|
||||
V9fsState state;
|
||||
} V9fsVirtioState;
|
||||
|
||||
void virtio_9p_push_and_notify(V9fsPDU *pdu);
|
||||
|
||||
ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
|
||||
const char *fmt, va_list ap);
|
||||
ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
|
||||
const char *fmt, va_list ap);
|
||||
void virtio_init_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
|
||||
unsigned int *pniov, bool is_write);
|
||||
|
||||
#define TYPE_VIRTIO_9P "virtio-9p-device"
|
||||
#define VIRTIO_9P(obj) \
|
||||
OBJECT_CHECK(V9fsVirtioState, (obj), TYPE_VIRTIO_9P)
|
||||
|
@@ -3,7 +3,7 @@
|
||||
#ifndef HW_ALPHA_SYS_H
|
||||
#define HW_ALPHA_SYS_H
|
||||
|
||||
#include "target-alpha/cpu-qom.h"
|
||||
#include "target/alpha/cpu-qom.h"
|
||||
#include "hw/pci/pci.h"
|
||||
#include "hw/pci/pci_host.h"
|
||||
#include "hw/ide.h"
|
||||
|
@@ -34,13 +34,18 @@ typedef struct AspeedBoardState {
|
||||
typedef struct AspeedBoardConfig {
|
||||
const char *soc_name;
|
||||
uint32_t hw_strap1;
|
||||
const char *fmc_model;
|
||||
const char *spi_model;
|
||||
uint32_t num_cs;
|
||||
} AspeedBoardConfig;
|
||||
|
||||
enum {
|
||||
PALMETTO_BMC,
|
||||
AST2500_EVB,
|
||||
ROMULUS_BMC,
|
||||
};
|
||||
|
||||
/* Palmetto hardware value: 0x120CE416 */
|
||||
#define PALMETTO_BMC_HW_STRAP1 ( \
|
||||
SCU_AST2400_HW_STRAP_DRAM_SIZE(DRAM_SIZE_256MB) | \
|
||||
SCU_AST2400_HW_STRAP_DRAM_CONFIG(2 /* DDR3 with CL=6, CWL=5 */) | \
|
||||
@@ -54,6 +59,7 @@ enum {
|
||||
SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \
|
||||
SCU_AST2400_HW_STRAP_BOOT_MODE(AST2400_SPI_BOOT))
|
||||
|
||||
/* AST2500 evb hardware value: 0xF100C2E6 */
|
||||
#define AST2500_EVB_HW_STRAP1 (( \
|
||||
AST2500_HW_STRAP1_DEFAULTS | \
|
||||
SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \
|
||||
@@ -64,9 +70,38 @@ enum {
|
||||
SCU_HW_STRAP_MAC0_RGMII) & \
|
||||
~SCU_HW_STRAP_2ND_BOOT_WDT)
|
||||
|
||||
/* Romulus hardware value: 0xF10AD206 */
|
||||
#define ROMULUS_BMC_HW_STRAP1 ( \
|
||||
AST2500_HW_STRAP1_DEFAULTS | \
|
||||
SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \
|
||||
SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \
|
||||
SCU_AST2500_HW_STRAP_UART_DEBUG | \
|
||||
SCU_AST2500_HW_STRAP_DDR4_ENABLE | \
|
||||
SCU_AST2500_HW_STRAP_ACPI_ENABLE | \
|
||||
SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER))
|
||||
|
||||
static const AspeedBoardConfig aspeed_boards[] = {
|
||||
[PALMETTO_BMC] = { "ast2400-a0", PALMETTO_BMC_HW_STRAP1 },
|
||||
[AST2500_EVB] = { "ast2500-a1", AST2500_EVB_HW_STRAP1 },
|
||||
[PALMETTO_BMC] = {
|
||||
.soc_name = "ast2400-a1",
|
||||
.hw_strap1 = PALMETTO_BMC_HW_STRAP1,
|
||||
.fmc_model = "n25q256a",
|
||||
.spi_model = "mx25l25635e",
|
||||
.num_cs = 1,
|
||||
},
|
||||
[AST2500_EVB] = {
|
||||
.soc_name = "ast2500-a1",
|
||||
.hw_strap1 = AST2500_EVB_HW_STRAP1,
|
||||
.fmc_model = "n25q256a",
|
||||
.spi_model = "mx25l25635e",
|
||||
.num_cs = 1,
|
||||
},
|
||||
[ROMULUS_BMC] = {
|
||||
.soc_name = "ast2500-a1",
|
||||
.hw_strap1 = ROMULUS_BMC_HW_STRAP1,
|
||||
.fmc_model = "n25q256a",
|
||||
.spi_model = "mx66l1g45g",
|
||||
.num_cs = 2,
|
||||
},
|
||||
};
|
||||
|
||||
static void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype,
|
||||
@@ -112,6 +147,8 @@ static void aspeed_board_init(MachineState *machine,
|
||||
&error_abort);
|
||||
object_property_set_int(OBJECT(&bmc->soc), cfg->hw_strap1, "hw-strap1",
|
||||
&error_abort);
|
||||
object_property_set_int(OBJECT(&bmc->soc), cfg->num_cs, "num-cs",
|
||||
&error_abort);
|
||||
object_property_set_bool(OBJECT(&bmc->soc), true, "realized",
|
||||
&error_abort);
|
||||
|
||||
@@ -128,8 +165,8 @@ static void aspeed_board_init(MachineState *machine,
|
||||
object_property_add_const_link(OBJECT(&bmc->soc), "ram", OBJECT(&bmc->ram),
|
||||
&error_abort);
|
||||
|
||||
aspeed_board_init_flashes(&bmc->soc.fmc, "n25q256a", &error_abort);
|
||||
aspeed_board_init_flashes(&bmc->soc.spi[0], "mx25l25635e", &error_abort);
|
||||
aspeed_board_init_flashes(&bmc->soc.fmc, cfg->fmc_model, &error_abort);
|
||||
aspeed_board_init_flashes(&bmc->soc.spi[0], cfg->spi_model, &error_abort);
|
||||
|
||||
aspeed_board_binfo.kernel_filename = machine->kernel_filename;
|
||||
aspeed_board_binfo.initrd_filename = machine->initrd_filename;
|
||||
@@ -188,10 +225,35 @@ static const TypeInfo ast2500_evb_type = {
|
||||
.class_init = ast2500_evb_class_init,
|
||||
};
|
||||
|
||||
static void romulus_bmc_init(MachineState *machine)
|
||||
{
|
||||
aspeed_board_init(machine, &aspeed_boards[ROMULUS_BMC]);
|
||||
}
|
||||
|
||||
static void romulus_bmc_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
MachineClass *mc = MACHINE_CLASS(oc);
|
||||
|
||||
mc->desc = "OpenPOWER Romulus BMC (ARM1176)";
|
||||
mc->init = romulus_bmc_init;
|
||||
mc->max_cpus = 1;
|
||||
mc->no_sdcard = 1;
|
||||
mc->no_floppy = 1;
|
||||
mc->no_cdrom = 1;
|
||||
mc->no_parallel = 1;
|
||||
}
|
||||
|
||||
static const TypeInfo romulus_bmc_type = {
|
||||
.name = MACHINE_TYPE_NAME("romulus-bmc"),
|
||||
.parent = TYPE_MACHINE,
|
||||
.class_init = romulus_bmc_class_init,
|
||||
};
|
||||
|
||||
static void aspeed_machine_init(void)
|
||||
{
|
||||
type_register_static(&palmetto_bmc_type);
|
||||
type_register_static(&ast2500_evb_type);
|
||||
type_register_static(&romulus_bmc_type);
|
||||
}
|
||||
|
||||
type_init(aspeed_machine_init)
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#define ASPEED_SOC_VIC_BASE 0x1E6C0000
|
||||
#define ASPEED_SOC_SDMC_BASE 0x1E6E0000
|
||||
#define ASPEED_SOC_SCU_BASE 0x1E6E2000
|
||||
#define ASPEED_SOC_SRAM_BASE 0x1E720000
|
||||
#define ASPEED_SOC_TIMER_BASE 0x1E782000
|
||||
#define ASPEED_SOC_I2C_BASE 0x1E78A000
|
||||
|
||||
@@ -47,15 +48,47 @@ static const char *aspeed_soc_ast2500_typenames[] = {
|
||||
"aspeed.smc.ast2500-spi1", "aspeed.smc.ast2500-spi2" };
|
||||
|
||||
static const AspeedSoCInfo aspeed_socs[] = {
|
||||
{ "ast2400-a0", "arm926", AST2400_A0_SILICON_REV, AST2400_SDRAM_BASE,
|
||||
1, aspeed_soc_ast2400_spi_bases,
|
||||
"aspeed.smc.fmc", aspeed_soc_ast2400_typenames },
|
||||
{ "ast2400", "arm926", AST2400_A0_SILICON_REV, AST2400_SDRAM_BASE,
|
||||
1, aspeed_soc_ast2400_spi_bases,
|
||||
"aspeed.smc.fmc", aspeed_soc_ast2400_typenames },
|
||||
{ "ast2500-a1", "arm1176", AST2500_A1_SILICON_REV, AST2500_SDRAM_BASE,
|
||||
2, aspeed_soc_ast2500_spi_bases,
|
||||
"aspeed.smc.ast2500-fmc", aspeed_soc_ast2500_typenames },
|
||||
{
|
||||
.name = "ast2400-a0",
|
||||
.cpu_model = "arm926",
|
||||
.silicon_rev = AST2400_A0_SILICON_REV,
|
||||
.sdram_base = AST2400_SDRAM_BASE,
|
||||
.sram_size = 0x8000,
|
||||
.spis_num = 1,
|
||||
.spi_bases = aspeed_soc_ast2400_spi_bases,
|
||||
.fmc_typename = "aspeed.smc.fmc",
|
||||
.spi_typename = aspeed_soc_ast2400_typenames,
|
||||
}, {
|
||||
.name = "ast2400-a1",
|
||||
.cpu_model = "arm926",
|
||||
.silicon_rev = AST2400_A1_SILICON_REV,
|
||||
.sdram_base = AST2400_SDRAM_BASE,
|
||||
.sram_size = 0x8000,
|
||||
.spis_num = 1,
|
||||
.spi_bases = aspeed_soc_ast2400_spi_bases,
|
||||
.fmc_typename = "aspeed.smc.fmc",
|
||||
.spi_typename = aspeed_soc_ast2400_typenames,
|
||||
}, {
|
||||
.name = "ast2400",
|
||||
.cpu_model = "arm926",
|
||||
.silicon_rev = AST2400_A0_SILICON_REV,
|
||||
.sdram_base = AST2400_SDRAM_BASE,
|
||||
.sram_size = 0x8000,
|
||||
.spis_num = 1,
|
||||
.spi_bases = aspeed_soc_ast2400_spi_bases,
|
||||
.fmc_typename = "aspeed.smc.fmc",
|
||||
.spi_typename = aspeed_soc_ast2400_typenames,
|
||||
}, {
|
||||
.name = "ast2500-a1",
|
||||
.cpu_model = "arm1176",
|
||||
.silicon_rev = AST2500_A1_SILICON_REV,
|
||||
.sdram_base = AST2500_SDRAM_BASE,
|
||||
.sram_size = 0x9000,
|
||||
.spis_num = 2,
|
||||
.spi_bases = aspeed_soc_ast2500_spi_bases,
|
||||
.fmc_typename = "aspeed.smc.ast2500-fmc",
|
||||
.spi_typename = aspeed_soc_ast2500_typenames,
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -87,9 +120,13 @@ static void aspeed_soc_init(Object *obj)
|
||||
{
|
||||
AspeedSoCState *s = ASPEED_SOC(obj);
|
||||
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
|
||||
char *cpu_typename;
|
||||
int i;
|
||||
|
||||
s->cpu = cpu_arm_init(sc->info->cpu_model);
|
||||
cpu_typename = g_strdup_printf("%s-" TYPE_ARM_CPU, sc->info->cpu_model);
|
||||
object_initialize(&s->cpu, sizeof(s->cpu), cpu_typename);
|
||||
object_property_add_child(obj, "cpu", OBJECT(&s->cpu), NULL);
|
||||
g_free(cpu_typename);
|
||||
|
||||
object_initialize(&s->vic, sizeof(s->vic), TYPE_ASPEED_VIC);
|
||||
object_property_add_child(obj, "vic", OBJECT(&s->vic), NULL);
|
||||
@@ -116,11 +153,13 @@ static void aspeed_soc_init(Object *obj)
|
||||
object_initialize(&s->fmc, sizeof(s->fmc), sc->info->fmc_typename);
|
||||
object_property_add_child(obj, "fmc", OBJECT(&s->fmc), NULL);
|
||||
qdev_set_parent_bus(DEVICE(&s->fmc), sysbus_get_default());
|
||||
object_property_add_alias(obj, "num-cs", OBJECT(&s->fmc), "num-cs",
|
||||
&error_abort);
|
||||
|
||||
for (i = 0; i < sc->info->spis_num; i++) {
|
||||
object_initialize(&s->spi[i], sizeof(s->spi[i]),
|
||||
sc->info->spi_typename[i]);
|
||||
object_property_add_child(obj, "spi", OBJECT(&s->spi[i]), NULL);
|
||||
object_property_add_child(obj, "spi[*]", OBJECT(&s->spi[i]), NULL);
|
||||
qdev_set_parent_bus(DEVICE(&s->spi[i]), sysbus_get_default());
|
||||
}
|
||||
|
||||
@@ -146,6 +185,24 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
|
||||
memory_region_add_subregion_overlap(get_system_memory(),
|
||||
ASPEED_SOC_IOMEM_BASE, &s->iomem, -1);
|
||||
|
||||
/* CPU */
|
||||
object_property_set_bool(OBJECT(&s->cpu), true, "realized", &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
return;
|
||||
}
|
||||
|
||||
/* SRAM */
|
||||
memory_region_init_ram(&s->sram, OBJECT(dev), "aspeed.sram",
|
||||
sc->info->sram_size, &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
return;
|
||||
}
|
||||
vmstate_register_ram_global(&s->sram);
|
||||
memory_region_add_subregion(get_system_memory(), ASPEED_SOC_SRAM_BASE,
|
||||
&s->sram);
|
||||
|
||||
/* VIC */
|
||||
object_property_set_bool(OBJECT(&s->vic), true, "realized", &err);
|
||||
if (err) {
|
||||
@@ -154,9 +211,9 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
|
||||
}
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(&s->vic), 0, ASPEED_SOC_VIC_BASE);
|
||||
sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 0,
|
||||
qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ));
|
||||
qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ));
|
||||
sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 1,
|
||||
qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_FIQ));
|
||||
qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ));
|
||||
|
||||
/* Timer */
|
||||
object_property_set_bool(OBJECT(&s->timerctrl), true, "realized", &err);
|
||||
@@ -195,10 +252,8 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
|
||||
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0,
|
||||
qdev_get_gpio_in(DEVICE(&s->vic), 12));
|
||||
|
||||
/* FMC */
|
||||
object_property_set_int(OBJECT(&s->fmc), 1, "num-cs", &err);
|
||||
object_property_set_bool(OBJECT(&s->fmc), true, "realized", &local_err);
|
||||
error_propagate(&err, local_err);
|
||||
/* FMC, The number of CS is set at the board level */
|
||||
object_property_set_bool(OBJECT(&s->fmc), true, "realized", &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
return;
|
||||
@@ -240,12 +295,6 @@ static void aspeed_soc_class_init(ObjectClass *oc, void *data)
|
||||
|
||||
sc->info = (AspeedSoCInfo *) data;
|
||||
dc->realize = aspeed_soc_realize;
|
||||
|
||||
/*
|
||||
* Reason: creates an ARM CPU, thus use after free(), see
|
||||
* arm_cpu_class_init()
|
||||
*/
|
||||
dc->cannot_destroy_with_object_finalize_yet = true;
|
||||
}
|
||||
|
||||
static const TypeInfo aspeed_soc_type_info = {
|
||||
|
@@ -1449,17 +1449,10 @@ static const VMStateDescription vmstate_pxa2xx_i2c = {
|
||||
}
|
||||
};
|
||||
|
||||
static int pxa2xx_i2c_slave_init(I2CSlave *i2c)
|
||||
{
|
||||
/* Nothing to do. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pxa2xx_i2c_slave_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
|
||||
|
||||
k->init = pxa2xx_i2c_slave_init;
|
||||
k->event = pxa2xx_i2c_event;
|
||||
k->recv = pxa2xx_i2c_rx;
|
||||
k->send = pxa2xx_i2c_tx;
|
||||
@@ -2070,7 +2063,7 @@ PXA2xxState *pxa270_init(MemoryRegion *address_space,
|
||||
}
|
||||
if (!revision)
|
||||
revision = "pxa270";
|
||||
|
||||
|
||||
s->cpu = cpu_arm_init(revision);
|
||||
if (s->cpu == NULL) {
|
||||
fprintf(stderr, "Unable to find CPU definition\n");
|
||||
|
@@ -2,7 +2,7 @@
|
||||
#define STRONGARM_H
|
||||
|
||||
#include "exec/memory.h"
|
||||
#include "target-arm/cpu-qom.h"
|
||||
#include "target/arm/cpu-qom.h"
|
||||
|
||||
#define SA_CS0 0x00000000
|
||||
#define SA_CS1 0x08000000
|
||||
|
@@ -202,12 +202,6 @@ static int tosa_dac_recv(I2CSlave *s)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int tosa_dac_init(I2CSlave *i2c)
|
||||
{
|
||||
/* Nothing to do. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tosa_tg_init(PXA2xxState *cpu)
|
||||
{
|
||||
I2CBus *bus = pxa2xx_i2c_bus(cpu->i2c[0]);
|
||||
@@ -275,7 +269,6 @@ static void tosa_dac_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
|
||||
|
||||
k->init = tosa_dac_init;
|
||||
k->event = tosa_dac_event;
|
||||
k->recv = tosa_dac_recv;
|
||||
k->send = tosa_dac_send;
|
||||
|
@@ -33,7 +33,7 @@
|
||||
#include "qemu/bitmap.h"
|
||||
#include "trace.h"
|
||||
#include "qom/cpu.h"
|
||||
#include "target-arm/cpu.h"
|
||||
#include "target/arm/cpu.h"
|
||||
#include "hw/acpi/acpi-defs.h"
|
||||
#include "hw/acpi/acpi.h"
|
||||
#include "hw/nvram/fw_cfg.h"
|
||||
|
@@ -1525,7 +1525,7 @@ static void machvirt_machine_init(void)
|
||||
}
|
||||
type_init(machvirt_machine_init);
|
||||
|
||||
static void virt_2_8_instance_init(Object *obj)
|
||||
static void virt_2_9_instance_init(Object *obj)
|
||||
{
|
||||
VirtMachineState *vms = VIRT_MACHINE(obj);
|
||||
|
||||
@@ -1558,10 +1558,25 @@ static void virt_2_8_instance_init(Object *obj)
|
||||
"Valid values are 2, 3 and host", NULL);
|
||||
}
|
||||
|
||||
static void virt_machine_2_8_options(MachineClass *mc)
|
||||
static void virt_machine_2_9_options(MachineClass *mc)
|
||||
{
|
||||
}
|
||||
DEFINE_VIRT_MACHINE_AS_LATEST(2, 8)
|
||||
DEFINE_VIRT_MACHINE_AS_LATEST(2, 9)
|
||||
|
||||
#define VIRT_COMPAT_2_8 \
|
||||
HW_COMPAT_2_8
|
||||
|
||||
static void virt_2_8_instance_init(Object *obj)
|
||||
{
|
||||
virt_2_9_instance_init(obj);
|
||||
}
|
||||
|
||||
static void virt_machine_2_8_options(MachineClass *mc)
|
||||
{
|
||||
virt_machine_2_9_options(mc);
|
||||
SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_8);
|
||||
}
|
||||
DEFINE_VIRT_MACHINE(2, 8)
|
||||
|
||||
#define VIRT_COMPAT_2_7 \
|
||||
HW_COMPAT_2_7
|
||||
|
@@ -263,12 +263,6 @@ static int aer915_recv(I2CSlave *slave)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int aer915_init(I2CSlave *i2c)
|
||||
{
|
||||
/* Nothing to do. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static VMStateDescription vmstate_aer915_state = {
|
||||
.name = "aer915",
|
||||
.version_id = 1,
|
||||
@@ -285,7 +279,6 @@ static void aer915_class_init(ObjectClass *klass, void *data)
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
|
||||
|
||||
k->init = aer915_init;
|
||||
k->event = aer915_event;
|
||||
k->recv = aer915_recv;
|
||||
k->send = aer915_send;
|
||||
|
@@ -203,6 +203,7 @@ static const FlashPartInfo known_devices[] = {
|
||||
{ INFO("mx25l25655e", 0xc22619, 0, 64 << 10, 512, 0) },
|
||||
{ INFO("mx66u51235f", 0xc2253a, 0, 64 << 10, 1024, ER_4K | ER_32K) },
|
||||
{ INFO("mx66u1g45g", 0xc2253b, 0, 64 << 10, 2048, ER_4K | ER_32K) },
|
||||
{ INFO("mx66l1g45g", 0xc2201b, 0, 64 << 10, 2048, ER_4K | ER_32K) },
|
||||
|
||||
/* Micron */
|
||||
{ INFO("n25q032a11", 0x20bb16, 0, 64 << 10, 64, ER_4K) },
|
||||
|
@@ -707,6 +707,19 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp)
|
||||
int num_devices;
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (pfl->sector_len == 0) {
|
||||
error_setg(errp, "attribute \"sector-length\" not specified or zero.");
|
||||
return;
|
||||
}
|
||||
if (pfl->nb_blocs == 0) {
|
||||
error_setg(errp, "attribute \"num-blocks\" not specified or zero.");
|
||||
return;
|
||||
}
|
||||
if (pfl->name == NULL) {
|
||||
error_setg(errp, "attribute \"name\" not specified.");
|
||||
return;
|
||||
}
|
||||
|
||||
total_len = pfl->sector_len * pfl->nb_blocs;
|
||||
|
||||
/* These are only used to expose the parameters of each device
|
||||
|
@@ -600,6 +600,19 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp)
|
||||
int ret;
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (pfl->sector_len == 0) {
|
||||
error_setg(errp, "attribute \"sector-length\" not specified or zero.");
|
||||
return;
|
||||
}
|
||||
if (pfl->nb_blocs == 0) {
|
||||
error_setg(errp, "attribute \"num-blocks\" not specified or zero.");
|
||||
return;
|
||||
}
|
||||
if (pfl->name == NULL) {
|
||||
error_setg(errp, "attribute \"name\" not specified.");
|
||||
return;
|
||||
}
|
||||
|
||||
chip_len = pfl->sector_len * pfl->nb_blocs;
|
||||
/* XXX: to be fixed */
|
||||
#if 0
|
||||
|
@@ -588,13 +588,19 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
|
||||
|
||||
blk_io_plug(s->blk);
|
||||
|
||||
while ((req = virtio_blk_get_request(s, vq))) {
|
||||
if (virtio_blk_handle_request(req, &mrb)) {
|
||||
virtqueue_detach_element(req->vq, &req->elem, 0);
|
||||
virtio_blk_free_request(req);
|
||||
break;
|
||||
do {
|
||||
virtio_queue_set_notification(vq, 0);
|
||||
|
||||
while ((req = virtio_blk_get_request(s, vq))) {
|
||||
if (virtio_blk_handle_request(req, &mrb)) {
|
||||
virtqueue_detach_element(req->vq, &req->elem, 0);
|
||||
virtio_blk_free_request(req);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
virtio_queue_set_notification(vq, 1);
|
||||
} while (!virtio_queue_empty(vq));
|
||||
|
||||
if (mrb.num_reqs) {
|
||||
virtio_blk_submit_multireq(s->blk, &mrb);
|
||||
|
@@ -138,9 +138,10 @@ static void fifo_trigger_update(void *opaque)
|
||||
{
|
||||
CadenceUARTState *s = opaque;
|
||||
|
||||
s->r[R_CISR] |= UART_INTR_TIMEOUT;
|
||||
|
||||
uart_update_status(s);
|
||||
if (s->r[R_RTOR]) {
|
||||
s->r[R_CISR] |= UART_INTR_TIMEOUT;
|
||||
uart_update_status(s);
|
||||
}
|
||||
}
|
||||
|
||||
static void uart_rx_reset(CadenceUARTState *s)
|
||||
@@ -502,6 +503,13 @@ static int cadence_uart_post_load(void *opaque, int version_id)
|
||||
{
|
||||
CadenceUARTState *s = opaque;
|
||||
|
||||
/* Ensure these two aren't invalid numbers */
|
||||
if (s->r[R_BRGR] < 1 || s->r[R_BRGR] & ~0xFFFF ||
|
||||
s->r[R_BDIV] <= 3 || s->r[R_BDIV] & ~0xFF) {
|
||||
/* Value is invalid, abort */
|
||||
return 1;
|
||||
}
|
||||
|
||||
uart_parameters_setup(s);
|
||||
uart_update_status(s);
|
||||
return 0;
|
||||
|
@@ -291,8 +291,11 @@ static void virgl_resource_attach_backing(VirtIOGPU *g,
|
||||
return;
|
||||
}
|
||||
|
||||
virgl_renderer_resource_attach_iov(att_rb.resource_id,
|
||||
res_iovs, att_rb.nr_entries);
|
||||
ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
|
||||
res_iovs, att_rb.nr_entries);
|
||||
|
||||
if (ret != 0)
|
||||
virtio_gpu_cleanup_mapping_iov(res_iovs, att_rb.nr_entries);
|
||||
}
|
||||
|
||||
static void virgl_resource_detach_backing(VirtIOGPU *g,
|
||||
@@ -371,8 +374,12 @@ static void virgl_cmd_get_capset(VirtIOGPU *g,
|
||||
|
||||
virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
|
||||
&max_size);
|
||||
resp = g_malloc(sizeof(*resp) + max_size);
|
||||
if (!max_size) {
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
|
||||
return;
|
||||
}
|
||||
|
||||
resp = g_malloc(sizeof(*resp) + max_size);
|
||||
resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
|
||||
virgl_renderer_fill_caps(gc.capset_id,
|
||||
gc.capset_version,
|
||||
|
@@ -28,6 +28,8 @@
|
||||
static struct virtio_gpu_simple_resource*
|
||||
virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
|
||||
|
||||
static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res);
|
||||
|
||||
#ifdef CONFIG_VIRGL
|
||||
#include <virglrenderer.h>
|
||||
#define VIRGL(_g, _virgl, _simple, ...) \
|
||||
@@ -338,10 +340,14 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
|
||||
return;
|
||||
}
|
||||
res->image = pixman_image_create_bits(pformat,
|
||||
c2d.width,
|
||||
c2d.height,
|
||||
NULL, 0);
|
||||
|
||||
res->hostmem = PIXMAN_FORMAT_BPP(pformat) * c2d.width * c2d.height;
|
||||
if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
|
||||
res->image = pixman_image_create_bits(pformat,
|
||||
c2d.width,
|
||||
c2d.height,
|
||||
NULL, 0);
|
||||
}
|
||||
|
||||
if (!res->image) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
@@ -353,13 +359,16 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
|
||||
}
|
||||
|
||||
QTAILQ_INSERT_HEAD(&g->reslist, res, next);
|
||||
g->hostmem += res->hostmem;
|
||||
}
|
||||
|
||||
static void virtio_gpu_resource_destroy(VirtIOGPU *g,
|
||||
struct virtio_gpu_simple_resource *res)
|
||||
{
|
||||
pixman_image_unref(res->image);
|
||||
virtio_gpu_cleanup_mapping(res);
|
||||
QTAILQ_REMOVE(&g->reslist, res, next);
|
||||
g->hostmem -= res->hostmem;
|
||||
g_free(res);
|
||||
}
|
||||
|
||||
@@ -705,6 +714,11 @@ virtio_gpu_resource_attach_backing(VirtIOGPU *g,
|
||||
return;
|
||||
}
|
||||
|
||||
if (res->iov) {
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
|
||||
return;
|
||||
}
|
||||
|
||||
ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
|
||||
if (ret != 0) {
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
|
||||
@@ -1241,6 +1255,8 @@ static const VMStateDescription vmstate_virtio_gpu = {
|
||||
|
||||
static Property virtio_gpu_properties[] = {
|
||||
DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
|
||||
DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem,
|
||||
256 * 1024 * 1024),
|
||||
#ifdef CONFIG_VIRGL
|
||||
DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
|
||||
VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
|
||||
|
@@ -260,7 +260,11 @@ static int i2c_slave_qdev_init(DeviceState *dev)
|
||||
I2CSlave *s = I2C_SLAVE(dev);
|
||||
I2CSlaveClass *sc = I2C_SLAVE_GET_CLASS(s);
|
||||
|
||||
return sc->init(s);
|
||||
if (sc->init) {
|
||||
return sc->init(s);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DeviceState *i2c_create_slave(I2CBus *bus, const char *name, uint8_t addr)
|
||||
|
@@ -29,7 +29,7 @@
|
||||
#include "hw/pci/pci.h"
|
||||
#include "qom/cpu.h"
|
||||
#include "hw/i386/pc.h"
|
||||
#include "target-i386/cpu.h"
|
||||
#include "target/i386/cpu.h"
|
||||
#include "hw/timer/hpet.h"
|
||||
#include "hw/acpi/acpi-defs.h"
|
||||
#include "hw/acpi/acpi.h"
|
||||
|
@@ -15,7 +15,7 @@
|
||||
#include "hw/i386/apic_internal.h"
|
||||
#include "hw/pci/msi.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "target-i386/kvm_i386.h"
|
||||
#include "target/i386/kvm_i386.h"
|
||||
|
||||
static inline void kvm_apic_set_reg(struct kvm_lapic_state *kapic,
|
||||
int reg_id, uint32_t val)
|
||||
|
@@ -36,6 +36,13 @@ typedef struct KVMClockState {
|
||||
|
||||
uint64_t clock;
|
||||
bool clock_valid;
|
||||
|
||||
/* whether machine type supports reliable KVM_GET_CLOCK */
|
||||
bool mach_use_reliable_get_clock;
|
||||
|
||||
/* whether the 'clock' value was obtained in a host with
|
||||
* reliable KVM_GET_CLOCK */
|
||||
bool clock_is_reliable;
|
||||
} KVMClockState;
|
||||
|
||||
struct pvclock_vcpu_time_info {
|
||||
@@ -81,6 +88,60 @@ static uint64_t kvmclock_current_nsec(KVMClockState *s)
|
||||
return nsec + time.system_time;
|
||||
}
|
||||
|
||||
static void kvm_update_clock(KVMClockState *s)
|
||||
{
|
||||
struct kvm_clock_data data;
|
||||
int ret;
|
||||
|
||||
ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret));
|
||||
abort();
|
||||
}
|
||||
s->clock = data.clock;
|
||||
|
||||
/* If kvm_has_adjust_clock_stable() is false, KVM_GET_CLOCK returns
|
||||
* essentially CLOCK_MONOTONIC plus a guest-specific adjustment. This
|
||||
* can drift from the TSC-based value that is computed by the guest,
|
||||
* so we need to go through kvmclock_current_nsec(). If
|
||||
* kvm_has_adjust_clock_stable() is true, and the flags contain
|
||||
* KVM_CLOCK_TSC_STABLE, then KVM_GET_CLOCK returns a TSC-based value
|
||||
* and kvmclock_current_nsec() is not necessary.
|
||||
*
|
||||
* Here, however, we need not check KVM_CLOCK_TSC_STABLE. This is because:
|
||||
*
|
||||
* - if the host has disabled the kvmclock master clock, the guest already
|
||||
* has protection against time going backwards. This "safety net" is only
|
||||
* absent when kvmclock is stable;
|
||||
*
|
||||
* - therefore, we can replace a check like
|
||||
*
|
||||
* if last KVM_GET_CLOCK was not reliable then
|
||||
* read from memory
|
||||
*
|
||||
* with
|
||||
*
|
||||
* if last KVM_GET_CLOCK was not reliable && masterclock is enabled
|
||||
* read from memory
|
||||
*
|
||||
* However:
|
||||
*
|
||||
* - if kvm_has_adjust_clock_stable() returns false, the left side is
|
||||
* always true (KVM_GET_CLOCK is never reliable), and the right side is
|
||||
* unknown (because we don't have data.flags). We must assume it's true
|
||||
* and read from memory.
|
||||
*
|
||||
* - if kvm_has_adjust_clock_stable() returns true, the result of the &&
|
||||
* is always false (masterclock is enabled iff KVM_GET_CLOCK is reliable)
|
||||
*
|
||||
* So we can just use this instead:
|
||||
*
|
||||
* if !kvm_has_adjust_clock_stable() then
|
||||
* read from memory
|
||||
*/
|
||||
s->clock_is_reliable = kvm_has_adjust_clock_stable();
|
||||
}
|
||||
|
||||
static void kvmclock_vm_state_change(void *opaque, int running,
|
||||
RunState state)
|
||||
{
|
||||
@@ -91,15 +152,21 @@ static void kvmclock_vm_state_change(void *opaque, int running,
|
||||
|
||||
if (running) {
|
||||
struct kvm_clock_data data = {};
|
||||
uint64_t time_at_migration = kvmclock_current_nsec(s);
|
||||
|
||||
/*
|
||||
* If the host where s->clock was read did not support reliable
|
||||
* KVM_GET_CLOCK, read kvmclock value from memory.
|
||||
*/
|
||||
if (!s->clock_is_reliable) {
|
||||
uint64_t pvclock_via_mem = kvmclock_current_nsec(s);
|
||||
/* We can't rely on the saved clock value, just discard it */
|
||||
if (pvclock_via_mem) {
|
||||
s->clock = pvclock_via_mem;
|
||||
}
|
||||
}
|
||||
|
||||
s->clock_valid = false;
|
||||
|
||||
/* We can't rely on the migrated clock value, just discard it */
|
||||
if (time_at_migration) {
|
||||
s->clock = time_at_migration;
|
||||
}
|
||||
|
||||
data.clock = s->clock;
|
||||
ret = kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, &data);
|
||||
if (ret < 0) {
|
||||
@@ -120,8 +187,6 @@ static void kvmclock_vm_state_change(void *opaque, int running,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
struct kvm_clock_data data;
|
||||
int ret;
|
||||
|
||||
if (s->clock_valid) {
|
||||
return;
|
||||
@@ -129,13 +194,7 @@ static void kvmclock_vm_state_change(void *opaque, int running,
|
||||
|
||||
kvm_synchronize_all_tsc();
|
||||
|
||||
ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret));
|
||||
abort();
|
||||
}
|
||||
s->clock = data.clock;
|
||||
|
||||
kvm_update_clock(s);
|
||||
/*
|
||||
* If the VM is stopped, declare the clock state valid to
|
||||
* avoid re-reading it on next vmsave (which would return
|
||||
@@ -149,25 +208,78 @@ static void kvmclock_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
KVMClockState *s = KVM_CLOCK(dev);
|
||||
|
||||
kvm_update_clock(s);
|
||||
|
||||
qemu_add_vm_change_state_handler(kvmclock_vm_state_change, s);
|
||||
}
|
||||
|
||||
static bool kvmclock_clock_is_reliable_needed(void *opaque)
|
||||
{
|
||||
KVMClockState *s = opaque;
|
||||
|
||||
return s->mach_use_reliable_get_clock;
|
||||
}
|
||||
|
||||
static const VMStateDescription kvmclock_reliable_get_clock = {
|
||||
.name = "kvmclock/clock_is_reliable",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = kvmclock_clock_is_reliable_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_BOOL(clock_is_reliable, KVMClockState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* When migrating, read the clock just before migration,
|
||||
* so that the guest clock counts during the events
|
||||
* between:
|
||||
*
|
||||
* * vm_stop()
|
||||
* *
|
||||
* * pre_save()
|
||||
*
|
||||
* This reduces kvmclock difference on migration from 5s
|
||||
* to 0.1s (when max_downtime == 5s), because sending the
|
||||
* final pages of memory (which happens between vm_stop()
|
||||
* and pre_save()) takes max_downtime.
|
||||
*/
|
||||
static void kvmclock_pre_save(void *opaque)
|
||||
{
|
||||
KVMClockState *s = opaque;
|
||||
|
||||
kvm_update_clock(s);
|
||||
}
|
||||
|
||||
static const VMStateDescription kvmclock_vmsd = {
|
||||
.name = "kvmclock",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.pre_save = kvmclock_pre_save,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT64(clock, KVMClockState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
.subsections = (const VMStateDescription * []) {
|
||||
&kvmclock_reliable_get_clock,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
||||
static Property kvmclock_properties[] = {
|
||||
DEFINE_PROP_BOOL("x-mach-use-reliable-get-clock", KVMClockState,
|
||||
mach_use_reliable_get_clock, true),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void kvmclock_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
dc->realize = kvmclock_realize;
|
||||
dc->vmsd = &kvmclock_vmsd;
|
||||
dc->props = kvmclock_properties;
|
||||
}
|
||||
|
||||
static const TypeInfo kvmclock_info = {
|
||||
|
@@ -109,7 +109,7 @@ static uint32_t mb_add_cmdline(MultibootState *s, const char *cmdline)
|
||||
hwaddr p = s->offset_cmdlines;
|
||||
char *b = (char *)s->mb_buf + p;
|
||||
|
||||
get_opt_value(b, strlen(cmdline) + 1, cmdline);
|
||||
memcpy(b, cmdline, strlen(cmdline) + 1);
|
||||
s->offset_cmdlines += strlen(b) + 1;
|
||||
return s->mb_buf_phys + p;
|
||||
}
|
||||
@@ -287,7 +287,8 @@ int load_multiboot(FWCfgState *fw_cfg,
|
||||
mbs.offset_bootloader = mbs.offset_cmdlines + cmdline_len;
|
||||
|
||||
if (initrd_filename) {
|
||||
char *next_initrd, not_last;
|
||||
const char *next_initrd;
|
||||
char not_last, tmpbuf[strlen(initrd_filename) + 1];
|
||||
|
||||
mbs.offset_mods = mbs.mb_buf_size;
|
||||
|
||||
@@ -296,25 +297,24 @@ int load_multiboot(FWCfgState *fw_cfg,
|
||||
int mb_mod_length;
|
||||
uint32_t offs = mbs.mb_buf_size;
|
||||
|
||||
next_initrd = (char *)get_opt_value(NULL, 0, initrd_filename);
|
||||
next_initrd = get_opt_value(tmpbuf, sizeof(tmpbuf), initrd_filename);
|
||||
not_last = *next_initrd;
|
||||
*next_initrd = '\0';
|
||||
/* if a space comes after the module filename, treat everything
|
||||
after that as parameters */
|
||||
hwaddr c = mb_add_cmdline(&mbs, initrd_filename);
|
||||
if ((next_space = strchr(initrd_filename, ' ')))
|
||||
hwaddr c = mb_add_cmdline(&mbs, tmpbuf);
|
||||
if ((next_space = strchr(tmpbuf, ' ')))
|
||||
*next_space = '\0';
|
||||
mb_debug("multiboot loading module: %s\n", initrd_filename);
|
||||
mb_mod_length = get_image_size(initrd_filename);
|
||||
mb_debug("multiboot loading module: %s\n", tmpbuf);
|
||||
mb_mod_length = get_image_size(tmpbuf);
|
||||
if (mb_mod_length < 0) {
|
||||
fprintf(stderr, "Failed to open file '%s'\n", initrd_filename);
|
||||
fprintf(stderr, "Failed to open file '%s'\n", tmpbuf);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
mbs.mb_buf_size = TARGET_PAGE_ALIGN(mb_mod_length + mbs.mb_buf_size);
|
||||
mbs.mb_buf = g_realloc(mbs.mb_buf, mbs.mb_buf_size);
|
||||
|
||||
load_image(initrd_filename, (unsigned char *)mbs.mb_buf + offs);
|
||||
load_image(tmpbuf, (unsigned char *)mbs.mb_buf + offs);
|
||||
mb_add_mod(&mbs, mbs.mb_buf_phys + offs,
|
||||
mbs.mb_buf_phys + offs + mb_mod_length, c);
|
||||
|
||||
|
68
hw/i386/pc.c
68
hw/i386/pc.c
@@ -400,13 +400,13 @@ static void pc_cmos_init_late(void *opaque)
|
||||
int i, trans;
|
||||
|
||||
val = 0;
|
||||
if (ide_get_geometry(arg->idebus[0], 0,
|
||||
&cylinders, &heads, §ors) >= 0) {
|
||||
if (arg->idebus[0] && ide_get_geometry(arg->idebus[0], 0,
|
||||
&cylinders, &heads, §ors) >= 0) {
|
||||
cmos_init_hd(s, 0x19, 0x1b, cylinders, heads, sectors);
|
||||
val |= 0xf0;
|
||||
}
|
||||
if (ide_get_geometry(arg->idebus[0], 1,
|
||||
&cylinders, &heads, §ors) >= 0) {
|
||||
if (arg->idebus[0] && ide_get_geometry(arg->idebus[0], 1,
|
||||
&cylinders, &heads, §ors) >= 0) {
|
||||
cmos_init_hd(s, 0x1a, 0x24, cylinders, heads, sectors);
|
||||
val |= 0x0f;
|
||||
}
|
||||
@@ -418,7 +418,8 @@ static void pc_cmos_init_late(void *opaque)
|
||||
geometry. It is always such that: 1 <= sects <= 63, 1
|
||||
<= heads <= 16, 1 <= cylinders <= 16383. The BIOS
|
||||
geometry can be different if a translation is done. */
|
||||
if (ide_get_geometry(arg->idebus[i / 2], i % 2,
|
||||
if (arg->idebus[i / 2] &&
|
||||
ide_get_geometry(arg->idebus[i / 2], i % 2,
|
||||
&cylinders, &heads, §ors) >= 0) {
|
||||
trans = ide_get_bios_chs_trans(arg->idebus[i / 2], i % 2) - 1;
|
||||
assert((trans & ~3) == 0);
|
||||
@@ -1535,6 +1536,7 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
|
||||
ISADevice **rtc_state,
|
||||
bool create_fdctrl,
|
||||
bool no_vmport,
|
||||
bool has_pit,
|
||||
uint32_t hpet_irqs)
|
||||
{
|
||||
int i;
|
||||
@@ -1588,7 +1590,7 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
|
||||
|
||||
qemu_register_boot_set(pc_boot_set, *rtc_state);
|
||||
|
||||
if (!xen_enabled()) {
|
||||
if (!xen_enabled() && has_pit) {
|
||||
if (kvm_pit_in_kernel()) {
|
||||
pit = kvm_pit_init(isa_bus, 0x40);
|
||||
} else {
|
||||
@@ -2158,6 +2160,48 @@ static void pc_machine_set_nvdimm(Object *obj, bool value, Error **errp)
|
||||
pcms->acpi_nvdimm_state.is_enabled = value;
|
||||
}
|
||||
|
||||
static bool pc_machine_get_smbus(Object *obj, Error **errp)
|
||||
{
|
||||
PCMachineState *pcms = PC_MACHINE(obj);
|
||||
|
||||
return pcms->smbus;
|
||||
}
|
||||
|
||||
static void pc_machine_set_smbus(Object *obj, bool value, Error **errp)
|
||||
{
|
||||
PCMachineState *pcms = PC_MACHINE(obj);
|
||||
|
||||
pcms->smbus = value;
|
||||
}
|
||||
|
||||
static bool pc_machine_get_sata(Object *obj, Error **errp)
|
||||
{
|
||||
PCMachineState *pcms = PC_MACHINE(obj);
|
||||
|
||||
return pcms->sata;
|
||||
}
|
||||
|
||||
static void pc_machine_set_sata(Object *obj, bool value, Error **errp)
|
||||
{
|
||||
PCMachineState *pcms = PC_MACHINE(obj);
|
||||
|
||||
pcms->sata = value;
|
||||
}
|
||||
|
||||
static bool pc_machine_get_pit(Object *obj, Error **errp)
|
||||
{
|
||||
PCMachineState *pcms = PC_MACHINE(obj);
|
||||
|
||||
return pcms->pit;
|
||||
}
|
||||
|
||||
static void pc_machine_set_pit(Object *obj, bool value, Error **errp)
|
||||
{
|
||||
PCMachineState *pcms = PC_MACHINE(obj);
|
||||
|
||||
pcms->pit = value;
|
||||
}
|
||||
|
||||
static void pc_machine_initfn(Object *obj)
|
||||
{
|
||||
PCMachineState *pcms = PC_MACHINE(obj);
|
||||
@@ -2169,6 +2213,9 @@ static void pc_machine_initfn(Object *obj)
|
||||
pcms->acpi_nvdimm_state.is_enabled = false;
|
||||
/* acpi build is enabled by default if machine supports it */
|
||||
pcms->acpi_build_enabled = PC_MACHINE_GET_CLASS(pcms)->has_acpi_build;
|
||||
pcms->smbus = true;
|
||||
pcms->sata = true;
|
||||
pcms->pit = true;
|
||||
}
|
||||
|
||||
static void pc_machine_reset(void)
|
||||
@@ -2329,6 +2376,15 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
|
||||
|
||||
object_class_property_add_bool(oc, PC_MACHINE_NVDIMM,
|
||||
pc_machine_get_nvdimm, pc_machine_set_nvdimm, &error_abort);
|
||||
|
||||
object_class_property_add_bool(oc, PC_MACHINE_SMBUS,
|
||||
pc_machine_get_smbus, pc_machine_set_smbus, &error_abort);
|
||||
|
||||
object_class_property_add_bool(oc, PC_MACHINE_SATA,
|
||||
pc_machine_get_sata, pc_machine_set_sata, &error_abort);
|
||||
|
||||
object_class_property_add_bool(oc, PC_MACHINE_PIT,
|
||||
pc_machine_get_pit, pc_machine_set_pit, &error_abort);
|
||||
}
|
||||
|
||||
static const TypeInfo pc_machine_info = {
|
||||
|
@@ -235,7 +235,7 @@ static void pc_init1(MachineState *machine,
|
||||
|
||||
/* init basic PC hardware */
|
||||
pc_basic_device_init(isa_bus, pcms->gsi, &rtc_state, true,
|
||||
(pcms->vmport != ON_OFF_AUTO_ON), 0x4);
|
||||
(pcms->vmport != ON_OFF_AUTO_ON), pcms->pit, 0x4);
|
||||
|
||||
pc_nic_init(isa_bus, pci_bus);
|
||||
|
||||
|
@@ -227,32 +227,39 @@ static void pc_q35_init(MachineState *machine)
|
||||
|
||||
/* init basic PC hardware */
|
||||
pc_basic_device_init(isa_bus, pcms->gsi, &rtc_state, !mc->no_floppy,
|
||||
(pcms->vmport != ON_OFF_AUTO_ON), 0xff0104);
|
||||
(pcms->vmport != ON_OFF_AUTO_ON), pcms->pit,
|
||||
0xff0104);
|
||||
|
||||
/* connect pm stuff to lpc */
|
||||
ich9_lpc_pm_init(lpc, pc_machine_is_smm_enabled(pcms));
|
||||
|
||||
/* ahci and SATA device, for q35 1 ahci controller is built-in */
|
||||
ahci = pci_create_simple_multifunction(host_bus,
|
||||
PCI_DEVFN(ICH9_SATA1_DEV,
|
||||
ICH9_SATA1_FUNC),
|
||||
true, "ich9-ahci");
|
||||
idebus[0] = qdev_get_child_bus(&ahci->qdev, "ide.0");
|
||||
idebus[1] = qdev_get_child_bus(&ahci->qdev, "ide.1");
|
||||
g_assert(MAX_SATA_PORTS == ICH_AHCI(ahci)->ahci.ports);
|
||||
ide_drive_get(hd, ICH_AHCI(ahci)->ahci.ports);
|
||||
ahci_ide_create_devs(ahci, hd);
|
||||
if (pcms->sata) {
|
||||
/* ahci and SATA device, for q35 1 ahci controller is built-in */
|
||||
ahci = pci_create_simple_multifunction(host_bus,
|
||||
PCI_DEVFN(ICH9_SATA1_DEV,
|
||||
ICH9_SATA1_FUNC),
|
||||
true, "ich9-ahci");
|
||||
idebus[0] = qdev_get_child_bus(&ahci->qdev, "ide.0");
|
||||
idebus[1] = qdev_get_child_bus(&ahci->qdev, "ide.1");
|
||||
g_assert(MAX_SATA_PORTS == ICH_AHCI(ahci)->ahci.ports);
|
||||
ide_drive_get(hd, ICH_AHCI(ahci)->ahci.ports);
|
||||
ahci_ide_create_devs(ahci, hd);
|
||||
} else {
|
||||
idebus[0] = idebus[1] = NULL;
|
||||
}
|
||||
|
||||
if (machine_usb(machine)) {
|
||||
/* Should we create 6 UHCI according to ich9 spec? */
|
||||
ehci_create_ich9_with_companions(host_bus, 0x1d);
|
||||
}
|
||||
|
||||
/* TODO: Populate SPD eeprom data. */
|
||||
smbus_eeprom_init(ich9_smb_init(host_bus,
|
||||
PCI_DEVFN(ICH9_SMB_DEV, ICH9_SMB_FUNC),
|
||||
0xb100),
|
||||
8, NULL, 0);
|
||||
if (pcms->smbus) {
|
||||
/* TODO: Populate SPD eeprom data. */
|
||||
smbus_eeprom_init(ich9_smb_init(host_bus,
|
||||
PCI_DEVFN(ICH9_SMB_DEV, ICH9_SMB_FUNC),
|
||||
0xb100),
|
||||
8, NULL, 0);
|
||||
}
|
||||
|
||||
pc_cmos_init(pcms, idebus[0], idebus[1], rtc_state);
|
||||
|
||||
|
@@ -252,6 +252,9 @@ static const uint16_t qcode_to_keycode_set1[Q_KEY_CODE__MAX] = {
|
||||
[Q_KEY_CODE_ASTERISK] = 0x37,
|
||||
[Q_KEY_CODE_LESS] = 0x56,
|
||||
[Q_KEY_CODE_RO] = 0x73,
|
||||
[Q_KEY_CODE_HIRAGANA] = 0x70,
|
||||
[Q_KEY_CODE_HENKAN] = 0x79,
|
||||
[Q_KEY_CODE_YEN] = 0x7d,
|
||||
[Q_KEY_CODE_KP_COMMA] = 0x7e,
|
||||
};
|
||||
|
||||
@@ -394,6 +397,9 @@ static const uint16_t qcode_to_keycode_set2[Q_KEY_CODE__MAX] = {
|
||||
[Q_KEY_CODE_LESS] = 0x61,
|
||||
[Q_KEY_CODE_SYSRQ] = 0x7f,
|
||||
[Q_KEY_CODE_RO] = 0x51,
|
||||
[Q_KEY_CODE_HIRAGANA] = 0x13,
|
||||
[Q_KEY_CODE_HENKAN] = 0x64,
|
||||
[Q_KEY_CODE_YEN] = 0x6a,
|
||||
[Q_KEY_CODE_KP_COMMA] = 0x6d,
|
||||
};
|
||||
|
||||
@@ -504,6 +510,10 @@ static const uint16_t qcode_to_keycode_set3[Q_KEY_CODE__MAX] = {
|
||||
[Q_KEY_CODE_COMMA] = 0x41,
|
||||
[Q_KEY_CODE_DOT] = 0x49,
|
||||
[Q_KEY_CODE_SLASH] = 0x4a,
|
||||
|
||||
[Q_KEY_CODE_HIRAGANA] = 0x87,
|
||||
[Q_KEY_CODE_HENKAN] = 0x86,
|
||||
[Q_KEY_CODE_YEN] = 0x5d,
|
||||
};
|
||||
|
||||
static uint8_t translate_table[256] = {
|
||||
|
@@ -54,6 +54,7 @@ static uint32_t gicd_int_pending(GICv3State *s, int irq)
|
||||
* + the PENDING latch is set OR it is level triggered and the input is 1
|
||||
* + its ENABLE bit is set
|
||||
* + the GICD enable bit for its group is set
|
||||
* + its ACTIVE bit is not set (otherwise it would be Active+Pending)
|
||||
* Conveniently we can bulk-calculate this with bitwise operations.
|
||||
*/
|
||||
uint32_t pend, grpmask;
|
||||
@@ -63,9 +64,11 @@ static uint32_t gicd_int_pending(GICv3State *s, int irq)
|
||||
uint32_t group = *gic_bmp_ptr32(s->group, irq);
|
||||
uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq);
|
||||
uint32_t enable = *gic_bmp_ptr32(s->enabled, irq);
|
||||
uint32_t active = *gic_bmp_ptr32(s->active, irq);
|
||||
|
||||
pend = pending | (~edge_trigger & level);
|
||||
pend &= enable;
|
||||
pend &= ~active;
|
||||
|
||||
if (s->gicd_ctlr & GICD_CTLR_DS) {
|
||||
grpmod = 0;
|
||||
@@ -96,12 +99,14 @@ static uint32_t gicr_int_pending(GICv3CPUState *cs)
|
||||
* + the PENDING latch is set OR it is level triggered and the input is 1
|
||||
* + its ENABLE bit is set
|
||||
* + the GICD enable bit for its group is set
|
||||
* + its ACTIVE bit is not set (otherwise it would be Active+Pending)
|
||||
* Conveniently we can bulk-calculate this with bitwise operations.
|
||||
*/
|
||||
uint32_t pend, grpmask, grpmod;
|
||||
|
||||
pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
|
||||
pend &= cs->gicr_ienabler0;
|
||||
pend &= ~cs->gicr_iactiver0;
|
||||
|
||||
if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
|
||||
grpmod = 0;
|
||||
|
@@ -204,7 +204,8 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
|
||||
/* The CPU mp-affinity property is in MPIDR register format; squash
|
||||
* the affinity bytes into 32 bits as the GICR_TYPER has them.
|
||||
*/
|
||||
cpu_affid = (cpu_affid & 0xFF00000000ULL >> 8) | (cpu_affid & 0xFFFFFF);
|
||||
cpu_affid = ((cpu_affid & 0xFF00000000ULL) >> 8) |
|
||||
(cpu_affid & 0xFFFFFF);
|
||||
s->cpu[i].gicr_typer = (cpu_affid << 32) |
|
||||
(1 << 24) |
|
||||
(i << 8) |
|
||||
|
@@ -1118,35 +1118,35 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.fieldoffset = offsetof(GICv3CPUState, icc_bpr[GICV3_G0]),
|
||||
.readfn = icc_bpr_read,
|
||||
.writefn = icc_bpr_write,
|
||||
},
|
||||
{ .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.fieldoffset = offsetof(GICv3CPUState, icc_apr[GICV3_G0][0]),
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.fieldoffset = offsetof(GICv3CPUState, icc_apr[GICV3_G0][1]),
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.fieldoffset = offsetof(GICv3CPUState, icc_apr[GICV3_G0][2]),
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.fieldoffset = offsetof(GICv3CPUState, icc_apr[GICV3_G0][3]),
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
/* All the ICC_AP1R*_EL1 registers are banked */
|
||||
@@ -1275,7 +1275,7 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.fieldoffset = offsetof(GICv3CPUState, icc_igrpen[GICV3_G0]),
|
||||
.readfn = icc_igrpen_read,
|
||||
.writefn = icc_igrpen_write,
|
||||
},
|
||||
/* This register is banked */
|
||||
@@ -1299,7 +1299,6 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
|
||||
.opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL3_RW,
|
||||
.fieldoffset = offsetof(GICv3CPUState, icc_ctlr_el3),
|
||||
.readfn = icc_ctlr_el3_read,
|
||||
.writefn = icc_ctlr_el3_write,
|
||||
},
|
||||
|
@@ -30,7 +30,7 @@
|
||||
#include "hw/i386/ioapic_internal.h"
|
||||
#include "include/hw/pci/msi.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "target-i386/cpu.h"
|
||||
#include "target/i386/cpu.h"
|
||||
#include "hw/i386/apic-msidef.h"
|
||||
#include "hw/i386/x86-iommu.h"
|
||||
|
||||
|
@@ -86,7 +86,7 @@
|
||||
#define BMC_DEV_ID TO_REG(0x1A4)
|
||||
|
||||
#define PROT_KEY_UNLOCK 0x1688A8A8
|
||||
#define SCU_IO_REGION_SIZE 0x20000
|
||||
#define SCU_IO_REGION_SIZE 0x1000
|
||||
|
||||
static const uint32_t ast2400_a0_resets[ASPEED_SCU_NR_REGS] = {
|
||||
[SYS_RST_CTRL] = 0xFFCFFEDCU,
|
||||
@@ -231,6 +231,7 @@ static void aspeed_scu_reset(DeviceState *dev)
|
||||
|
||||
switch (s->silicon_rev) {
|
||||
case AST2400_A0_SILICON_REV:
|
||||
case AST2400_A1_SILICON_REV:
|
||||
reset = ast2400_a0_resets;
|
||||
break;
|
||||
case AST2500_A0_SILICON_REV:
|
||||
@@ -249,6 +250,7 @@ static void aspeed_scu_reset(DeviceState *dev)
|
||||
|
||||
static uint32_t aspeed_silicon_revs[] = {
|
||||
AST2400_A0_SILICON_REV,
|
||||
AST2400_A1_SILICON_REV,
|
||||
AST2500_A0_SILICON_REV,
|
||||
AST2500_A1_SILICON_REV,
|
||||
};
|
||||
|
@@ -119,6 +119,7 @@ static void aspeed_sdmc_write(void *opaque, hwaddr addr, uint64_t data,
|
||||
/* Make sure readonly bits are kept */
|
||||
switch (s->silicon_rev) {
|
||||
case AST2400_A0_SILICON_REV:
|
||||
case AST2400_A1_SILICON_REV:
|
||||
data &= ~ASPEED_SDMC_READONLY_MASK;
|
||||
break;
|
||||
case AST2500_A0_SILICON_REV:
|
||||
@@ -193,6 +194,7 @@ static void aspeed_sdmc_reset(DeviceState *dev)
|
||||
/* Set ram size bit and defaults values */
|
||||
switch (s->silicon_rev) {
|
||||
case AST2400_A0_SILICON_REV:
|
||||
case AST2400_A1_SILICON_REV:
|
||||
s->regs[R_CONF] |=
|
||||
ASPEED_SDMC_VGA_COMPAT |
|
||||
ASPEED_SDMC_DRAM_SIZE(s->ram_bits);
|
||||
@@ -224,6 +226,7 @@ static void aspeed_sdmc_realize(DeviceState *dev, Error **errp)
|
||||
|
||||
switch (s->silicon_rev) {
|
||||
case AST2400_A0_SILICON_REV:
|
||||
case AST2400_A1_SILICON_REV:
|
||||
s->ram_bits = ast2400_rambits(s);
|
||||
break;
|
||||
case AST2500_A0_SILICON_REV:
|
||||
|
@@ -17,7 +17,7 @@
|
||||
#include "hw/qdev.h"
|
||||
#include "hw/isa/isa.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "target-i386/hyperv.h"
|
||||
#include "target/i386/hyperv.h"
|
||||
#include "kvm_i386.h"
|
||||
|
||||
#define HV_TEST_DEV_MAX_SINT_ROUTES 64
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
#include "target-ppc/cpu.h"
|
||||
#include "target/ppc/cpu.h"
|
||||
|
||||
#include "hw/ppc/fdt.h"
|
||||
|
||||
|
@@ -22,7 +22,7 @@
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/numa.h"
|
||||
#include "hw/hw.h"
|
||||
#include "target-ppc/cpu.h"
|
||||
#include "target/ppc/cpu.h"
|
||||
#include "qemu/log.h"
|
||||
#include "hw/ppc/fdt.h"
|
||||
#include "hw/ppc/ppc.h"
|
||||
|
@@ -20,7 +20,7 @@
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/log.h"
|
||||
#include "target-ppc/cpu.h"
|
||||
#include "target/ppc/cpu.h"
|
||||
#include "hw/ppc/ppc.h"
|
||||
#include "hw/ppc/pnv.h"
|
||||
#include "hw/ppc/pnv_core.h"
|
||||
|
@@ -19,7 +19,7 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "target-ppc/cpu.h"
|
||||
#include "target/ppc/cpu.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/log.h"
|
||||
|
||||
|
@@ -21,7 +21,7 @@
|
||||
#include "hw/hw.h"
|
||||
#include "qemu/log.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "target-ppc/cpu.h"
|
||||
#include "target/ppc/cpu.h"
|
||||
#include "hw/sysbus.h"
|
||||
|
||||
#include "hw/ppc/fdt.h"
|
||||
|
@@ -8,14 +8,14 @@
|
||||
*/
|
||||
#include "hw/cpu/core.h"
|
||||
#include "hw/ppc/spapr_cpu_core.h"
|
||||
#include "target-ppc/cpu.h"
|
||||
#include "target/ppc/cpu.h"
|
||||
#include "hw/ppc/spapr.h"
|
||||
#include "hw/boards.h"
|
||||
#include "qapi/error.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "target-ppc/kvm_ppc.h"
|
||||
#include "target/ppc/kvm_ppc.h"
|
||||
#include "hw/ppc/ppc.h"
|
||||
#include "target-ppc/mmu-hash64.h"
|
||||
#include "target/ppc/mmu-hash64.h"
|
||||
#include "sysemu/numa.h"
|
||||
|
||||
static void spapr_cpu_reset(void *opaque)
|
||||
|
@@ -2157,6 +2157,13 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
|
||||
DPRINTF("Write %s(sector %" PRId64 ", count %u)\n",
|
||||
(command & 0xe) == 0xe ? "And Verify " : "",
|
||||
r->req.cmd.lba, len);
|
||||
case VERIFY_10:
|
||||
case VERIFY_12:
|
||||
case VERIFY_16:
|
||||
/* We get here only for BYTCHK == 0x01 and only for scsi-block.
|
||||
* As far as DMA is concerned, we can treat it the same as a write;
|
||||
* scsi_block_do_sgio will send VERIFY commands.
|
||||
*/
|
||||
if (r->req.cmd.buf[1] & 0xe0) {
|
||||
goto illegal_request;
|
||||
}
|
||||
@@ -2712,7 +2719,7 @@ static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
|
||||
case WRITE_VERIFY_16:
|
||||
/* MMC writing cannot be done via DMA helpers, because it sometimes
|
||||
* involves writing beyond the maximum LBA or to negative LBA (lead-in).
|
||||
* We might use scsi_disk_dma_reqops as long as no writing commands are
|
||||
* We might use scsi_block_dma_reqops as long as no writing commands are
|
||||
* seen, but performance usually isn't paramount on optical media. So,
|
||||
* just make scsi-block operate the same as scsi-generic for them.
|
||||
*/
|
||||
|
@@ -420,6 +420,20 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void virtio_scsi_acquire(VirtIOSCSI *s)
|
||||
{
|
||||
if (s->ctx) {
|
||||
aio_context_acquire(s->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void virtio_scsi_release(VirtIOSCSI *s)
|
||||
{
|
||||
if (s->ctx) {
|
||||
aio_context_release(s->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
|
||||
{
|
||||
VirtIOSCSIReq *req;
|
||||
@@ -578,26 +592,32 @@ static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
||||
void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
|
||||
{
|
||||
VirtIOSCSIReq *req, *next;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
|
||||
|
||||
while ((req = virtio_scsi_pop_req(s, vq))) {
|
||||
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
|
||||
if (!ret) {
|
||||
QTAILQ_INSERT_TAIL(&reqs, req, next);
|
||||
} else if (ret == -EINVAL) {
|
||||
/* The device is broken and shouldn't process any request */
|
||||
while (!QTAILQ_EMPTY(&reqs)) {
|
||||
req = QTAILQ_FIRST(&reqs);
|
||||
QTAILQ_REMOVE(&reqs, req, next);
|
||||
blk_io_unplug(req->sreq->dev->conf.blk);
|
||||
scsi_req_unref(req->sreq);
|
||||
virtqueue_detach_element(req->vq, &req->elem, 0);
|
||||
virtio_scsi_free_req(req);
|
||||
do {
|
||||
virtio_queue_set_notification(vq, 0);
|
||||
|
||||
while ((req = virtio_scsi_pop_req(s, vq))) {
|
||||
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
|
||||
if (!ret) {
|
||||
QTAILQ_INSERT_TAIL(&reqs, req, next);
|
||||
} else if (ret == -EINVAL) {
|
||||
/* The device is broken and shouldn't process any request */
|
||||
while (!QTAILQ_EMPTY(&reqs)) {
|
||||
req = QTAILQ_FIRST(&reqs);
|
||||
QTAILQ_REMOVE(&reqs, req, next);
|
||||
blk_io_unplug(req->sreq->dev->conf.blk);
|
||||
scsi_req_unref(req->sreq);
|
||||
virtqueue_detach_element(req->vq, &req->elem, 0);
|
||||
virtio_scsi_free_req(req);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
virtio_queue_set_notification(vq, 1);
|
||||
} while (ret != -EINVAL && !virtio_queue_empty(vq));
|
||||
|
||||
QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
|
||||
virtio_scsi_handle_cmd_req_submit(s, req);
|
||||
@@ -691,10 +711,7 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->dataplane_started) {
|
||||
assert(s->ctx);
|
||||
aio_context_acquire(s->ctx);
|
||||
}
|
||||
virtio_scsi_acquire(s);
|
||||
|
||||
req = virtio_scsi_pop_req(s, vs->event_vq);
|
||||
if (!req) {
|
||||
@@ -730,9 +747,7 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
|
||||
}
|
||||
virtio_scsi_complete_req(req);
|
||||
out:
|
||||
if (s->dataplane_started) {
|
||||
aio_context_release(s->ctx);
|
||||
}
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
|
||||
void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
|
||||
@@ -778,9 +793,9 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||
if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
|
||||
return;
|
||||
}
|
||||
aio_context_acquire(s->ctx);
|
||||
virtio_scsi_acquire(s);
|
||||
blk_set_aio_context(sd->conf.blk, s->ctx);
|
||||
aio_context_release(s->ctx);
|
||||
virtio_scsi_release(s);
|
||||
|
||||
}
|
||||
|
||||
|
@@ -25,7 +25,7 @@
|
||||
Shix 2.0 board by Alexis Polti, described at
|
||||
https://web.archive.org/web/20070917001736/perso.enst.fr/~polti/realisations/shix20
|
||||
|
||||
More information in target-sh4/README.sh4
|
||||
More information in target/sh4/README.sh4
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
|
@@ -253,7 +253,8 @@ static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs,
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"%s: Tried to change CS0 start address to 0x%"
|
||||
HWADDR_PRIx "\n", s->ctrl->name, seg.addr);
|
||||
return;
|
||||
seg.addr = s->ctrl->flash_window_base;
|
||||
new = aspeed_smc_segment_to_reg(&seg);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -267,8 +268,10 @@ static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs,
|
||||
s->ctrl->segments[cs].size) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"%s: Tried to change CS%d end address to 0x%"
|
||||
HWADDR_PRIx "\n", s->ctrl->name, cs, seg.addr);
|
||||
return;
|
||||
HWADDR_PRIx "\n", s->ctrl->name, cs, seg.addr + seg.size);
|
||||
seg.size = s->ctrl->segments[cs].addr + s->ctrl->segments[cs].size -
|
||||
seg.addr;
|
||||
new = aspeed_smc_segment_to_reg(&seg);
|
||||
}
|
||||
|
||||
/* Keep the segment in the overall flash window */
|
||||
@@ -281,16 +284,14 @@ static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs,
|
||||
}
|
||||
|
||||
/* Check start address vs. alignment */
|
||||
if (seg.addr % seg.size) {
|
||||
if (seg.size && !QEMU_IS_ALIGNED(seg.addr, seg.size)) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is not "
|
||||
"aligned : [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n",
|
||||
s->ctrl->name, cs, seg.addr, seg.addr + seg.size);
|
||||
}
|
||||
|
||||
/* And segments should not overlap */
|
||||
if (aspeed_smc_flash_overlap(s, &seg, cs)) {
|
||||
return;
|
||||
}
|
||||
/* And segments should not overlap (in the specs) */
|
||||
aspeed_smc_flash_overlap(s, &seg, cs);
|
||||
|
||||
/* All should be fine now to move the region */
|
||||
memory_region_transaction_begin();
|
||||
|
@@ -198,11 +198,6 @@ static int ds1338_send(I2CSlave *i2c, uint8_t data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ds1338_init(I2CSlave *i2c)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ds1338_reset(DeviceState *dev)
|
||||
{
|
||||
DS1338State *s = DS1338(dev);
|
||||
@@ -220,7 +215,6 @@ static void ds1338_class_init(ObjectClass *klass, void *data)
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
|
||||
|
||||
k->init = ds1338_init;
|
||||
k->event = ds1338_event;
|
||||
k->recv = ds1338_recv;
|
||||
k->send = ds1338_send;
|
||||
|
@@ -87,8 +87,8 @@ struct VirtQueue
|
||||
/* Last used index value we have signalled on */
|
||||
bool signalled_used_valid;
|
||||
|
||||
/* Notification enabled? */
|
||||
bool notification;
|
||||
/* Nested host->guest notification disabled counter */
|
||||
unsigned int notification_disabled;
|
||||
|
||||
uint16_t queue_index;
|
||||
|
||||
@@ -201,7 +201,7 @@ static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
|
||||
static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
|
||||
{
|
||||
hwaddr pa;
|
||||
if (!vq->notification) {
|
||||
if (vq->notification_disabled) {
|
||||
return;
|
||||
}
|
||||
pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
|
||||
@@ -210,7 +210,13 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
|
||||
|
||||
void virtio_queue_set_notification(VirtQueue *vq, int enable)
|
||||
{
|
||||
vq->notification = enable;
|
||||
if (enable) {
|
||||
assert(vq->notification_disabled > 0);
|
||||
vq->notification_disabled--;
|
||||
} else {
|
||||
vq->notification_disabled++;
|
||||
}
|
||||
|
||||
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
||||
vring_set_avail_event(vq, vring_avail_idx(vq));
|
||||
} else if (enable) {
|
||||
@@ -959,7 +965,7 @@ void virtio_reset(void *opaque)
|
||||
virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
|
||||
vdev->vq[i].signalled_used = 0;
|
||||
vdev->vq[i].signalled_used_valid = false;
|
||||
vdev->vq[i].notification = true;
|
||||
vdev->vq[i].notification_disabled = 0;
|
||||
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
|
||||
vdev->vq[i].inuse = 0;
|
||||
}
|
||||
@@ -1770,7 +1776,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
|
||||
vdev->vq[i].vring.desc = qemu_get_be64(f);
|
||||
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
|
||||
vdev->vq[i].signalled_used_valid = false;
|
||||
vdev->vq[i].notification = true;
|
||||
vdev->vq[i].notification_disabled = 0;
|
||||
|
||||
if (vdev->vq[i].vring.desc) {
|
||||
/* XXX virtio-1 devices */
|
||||
@@ -2047,15 +2053,47 @@ static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
|
||||
{
|
||||
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
|
||||
|
||||
virtio_queue_set_notification(vq, 0);
|
||||
}
|
||||
|
||||
static bool virtio_queue_host_notifier_aio_poll(void *opaque)
|
||||
{
|
||||
EventNotifier *n = opaque;
|
||||
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
|
||||
|
||||
if (virtio_queue_empty(vq)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtio_queue_notify_aio_vq(vq);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
|
||||
{
|
||||
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
|
||||
|
||||
/* Caller polls once more after this to catch requests that race with us */
|
||||
virtio_queue_set_notification(vq, 1);
|
||||
}
|
||||
|
||||
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
|
||||
VirtIOHandleOutput handle_output)
|
||||
{
|
||||
if (handle_output) {
|
||||
vq->handle_aio_output = handle_output;
|
||||
aio_set_event_notifier(ctx, &vq->host_notifier, true,
|
||||
virtio_queue_host_notifier_aio_read);
|
||||
virtio_queue_host_notifier_aio_read,
|
||||
virtio_queue_host_notifier_aio_poll);
|
||||
aio_set_event_notifier_poll(ctx, &vq->host_notifier,
|
||||
virtio_queue_host_notifier_aio_poll_begin,
|
||||
virtio_queue_host_notifier_aio_poll_end);
|
||||
} else {
|
||||
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
|
||||
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
|
||||
/* Test and clear notifier before after disabling event,
|
||||
* in case poll callback didn't have time to run. */
|
||||
virtio_queue_host_notifier_aio_read(&vq->host_notifier);
|
||||
|
@@ -428,6 +428,14 @@ static void i6300esb_realize(PCIDevice *dev, Error **errp)
|
||||
/* qemu_register_coalesced_mmio (addr, 0x10); ? */
|
||||
}
|
||||
|
||||
static void i6300esb_exit(PCIDevice *dev)
|
||||
{
|
||||
I6300State *d = WATCHDOG_I6300ESB_DEVICE(dev);
|
||||
|
||||
timer_del(d->timer);
|
||||
timer_free(d->timer);
|
||||
}
|
||||
|
||||
static WatchdogTimerModel model = {
|
||||
.wdt_name = "i6300esb",
|
||||
.wdt_description = "Intel 6300ESB",
|
||||
@@ -441,6 +449,7 @@ static void i6300esb_class_init(ObjectClass *klass, void *data)
|
||||
k->config_read = i6300esb_config_read;
|
||||
k->config_write = i6300esb_config_write;
|
||||
k->realize = i6300esb_realize;
|
||||
k->exit = i6300esb_exit;
|
||||
k->vendor_id = PCI_VENDOR_ID_INTEL;
|
||||
k->device_id = PCI_DEVICE_ID_INTEL_ESB_9;
|
||||
k->class_id = PCI_CLASS_SYSTEM_OTHER;
|
||||
|
@@ -44,6 +44,7 @@ void qemu_aio_ref(void *p);
|
||||
|
||||
typedef struct AioHandler AioHandler;
|
||||
typedef void QEMUBHFunc(void *opaque);
|
||||
typedef bool AioPollFn(void *opaque);
|
||||
typedef void IOHandler(void *opaque);
|
||||
|
||||
struct ThreadPool;
|
||||
@@ -130,6 +131,18 @@ struct AioContext {
|
||||
|
||||
int external_disable_cnt;
|
||||
|
||||
/* Number of AioHandlers without .io_poll() */
|
||||
int poll_disable_cnt;
|
||||
|
||||
/* Polling mode parameters */
|
||||
int64_t poll_ns; /* current polling time in nanoseconds */
|
||||
int64_t poll_max_ns; /* maximum polling time in nanoseconds */
|
||||
int64_t poll_grow; /* polling time growth factor */
|
||||
int64_t poll_shrink; /* polling time shrink factor */
|
||||
|
||||
/* Are we in polling mode or monitoring file descriptors? */
|
||||
bool poll_started;
|
||||
|
||||
/* epoll(7) state used when built with CONFIG_EPOLL */
|
||||
int epollfd;
|
||||
bool epoll_enabled;
|
||||
@@ -195,8 +208,8 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
|
||||
* aio_notify: Force processing of pending events.
|
||||
*
|
||||
* Similar to signaling a condition variable, aio_notify forces
|
||||
* aio_wait to exit, so that the next call will re-examine pending events.
|
||||
* The caller of aio_notify will usually call aio_wait again very soon,
|
||||
* aio_poll to exit, so that the next call will re-examine pending events.
|
||||
* The caller of aio_notify will usually call aio_poll again very soon,
|
||||
* or go through another iteration of the GLib main loop. Hence, aio_notify
|
||||
* also has the side effect of recalculating the sets of file descriptors
|
||||
* that the main loop waits for.
|
||||
@@ -295,8 +308,12 @@ bool aio_pending(AioContext *ctx);
|
||||
/* Dispatch any pending callbacks from the GSource attached to the AioContext.
|
||||
*
|
||||
* This is used internally in the implementation of the GSource.
|
||||
*
|
||||
* @dispatch_fds: true to process fds, false to skip them
|
||||
* (can be used as an optimization by callers that know there
|
||||
* are no fds ready)
|
||||
*/
|
||||
bool aio_dispatch(AioContext *ctx);
|
||||
bool aio_dispatch(AioContext *ctx, bool dispatch_fds);
|
||||
|
||||
/* Progress in completing AIO work to occur. This can issue new pending
|
||||
* aio as a result of executing I/O completion or bh callbacks.
|
||||
@@ -325,8 +342,17 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
bool is_external,
|
||||
IOHandler *io_read,
|
||||
IOHandler *io_write,
|
||||
AioPollFn *io_poll,
|
||||
void *opaque);
|
||||
|
||||
/* Set polling begin/end callbacks for a file descriptor that has already been
|
||||
* registered with aio_set_fd_handler. Do nothing if the file descriptor is
|
||||
* not registered.
|
||||
*/
|
||||
void aio_set_fd_poll(AioContext *ctx, int fd,
|
||||
IOHandler *io_poll_begin,
|
||||
IOHandler *io_poll_end);
|
||||
|
||||
/* Register an event notifier and associated callbacks. Behaves very similarly
|
||||
* to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
|
||||
* will be invoked when using aio_poll().
|
||||
@@ -337,7 +363,17 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
void aio_set_event_notifier(AioContext *ctx,
|
||||
EventNotifier *notifier,
|
||||
bool is_external,
|
||||
EventNotifierHandler *io_read);
|
||||
EventNotifierHandler *io_read,
|
||||
AioPollFn *io_poll);
|
||||
|
||||
/* Set polling begin/end callbacks for an event notifier that has already been
|
||||
* registered with aio_set_event_notifier. Do nothing if the event notifier is
|
||||
* not registered.
|
||||
*/
|
||||
void aio_set_event_notifier_poll(AioContext *ctx,
|
||||
EventNotifier *notifier,
|
||||
EventNotifierHandler *io_poll_begin,
|
||||
EventNotifierHandler *io_poll_end);
|
||||
|
||||
/* Return a GSource that lets the main loop poll the file descriptors attached
|
||||
* to this AioContext.
|
||||
@@ -474,4 +510,17 @@ static inline bool aio_context_in_iothread(AioContext *ctx)
|
||||
*/
|
||||
void aio_context_setup(AioContext *ctx);
|
||||
|
||||
/**
|
||||
* aio_context_set_poll_params:
|
||||
* @ctx: the aio context
|
||||
* @max_ns: how long to busy poll for, in nanoseconds
|
||||
* @grow: polling time growth factor
|
||||
* @shrink: polling time shrink factor
|
||||
*
|
||||
* Poll mode can be disabled by setting poll_max_ns to 0.
|
||||
*/
|
||||
void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
|
||||
int64_t grow, int64_t shrink,
|
||||
Error **errp);
|
||||
|
||||
#endif
|
||||
|
@@ -186,6 +186,29 @@ void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
|
||||
uint32_t lduw_phys_cached(MemoryRegionCache *cache, hwaddr addr);
|
||||
uint32_t ldl_phys_cached(MemoryRegionCache *cache, hwaddr addr);
|
||||
uint64_t ldq_phys_cached(MemoryRegionCache *cache, hwaddr addr);
|
||||
void stl_phys_notdirty_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
|
||||
void stw_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
|
||||
void stl_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
|
||||
void stq_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
|
||||
|
||||
uint32_t address_space_lduw_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_ldl_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint64_t address_space_ldq_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl_notdirty_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
uint32_t val, MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stw_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stq_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
#endif
|
||||
|
||||
/* page related stuff */
|
||||
|
@@ -94,21 +94,6 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr);
|
||||
*/
|
||||
void qemu_flush_coalesced_mmio_buffer(void);
|
||||
|
||||
uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
|
||||
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
|
||||
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
|
||||
void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
|
||||
void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
|
||||
|
||||
void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
|
||||
const uint8_t *buf, int len);
|
||||
void cpu_flush_icache_range(hwaddr start, int len);
|
||||
|
@@ -1404,6 +1404,140 @@ void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
|
||||
uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
|
||||
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
|
||||
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
|
||||
void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
|
||||
void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
|
||||
void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
|
||||
|
||||
struct MemoryRegionCache {
|
||||
hwaddr xlat;
|
||||
void *ptr;
|
||||
hwaddr len;
|
||||
MemoryRegion *mr;
|
||||
bool is_write;
|
||||
};
|
||||
|
||||
/* address_space_cache_init: prepare for repeated access to a physical
|
||||
* memory region
|
||||
*
|
||||
* @cache: #MemoryRegionCache to be filled
|
||||
* @as: #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @len: length of buffer
|
||||
* @is_write: indicates the transfer direction
|
||||
*
|
||||
* Will only work with RAM, and may map a subset of the requested range by
|
||||
* returning a value that is less than @len. On failure, return a negative
|
||||
* errno value.
|
||||
*
|
||||
* Because it only works with RAM, this function can be used for
|
||||
* read-modify-write operations. In this case, is_write should be %true.
|
||||
*
|
||||
* Note that addresses passed to the address_space_*_cached functions
|
||||
* are relative to @addr.
|
||||
*/
|
||||
int64_t address_space_cache_init(MemoryRegionCache *cache,
|
||||
AddressSpace *as,
|
||||
hwaddr addr,
|
||||
hwaddr len,
|
||||
bool is_write);
|
||||
|
||||
/**
|
||||
* address_space_cache_invalidate: complete a write to a #MemoryRegionCache
|
||||
*
|
||||
* @cache: The #MemoryRegionCache to operate on.
|
||||
* @addr: The first physical address that was written, relative to the
|
||||
* address that was passed to @address_space_cache_init.
|
||||
* @access_len: The number of bytes that were written starting at @addr.
|
||||
*/
|
||||
void address_space_cache_invalidate(MemoryRegionCache *cache,
|
||||
hwaddr addr,
|
||||
hwaddr access_len);
|
||||
|
||||
/**
|
||||
* address_space_cache_destroy: free a #MemoryRegionCache
|
||||
*
|
||||
* @cache: The #MemoryRegionCache whose memory should be released.
|
||||
*/
|
||||
void address_space_cache_destroy(MemoryRegionCache *cache);
|
||||
|
||||
/* address_space_ld*_cached: load from a cached #MemoryRegion
|
||||
* address_space_st*_cached: store into a cached #MemoryRegion
|
||||
*
|
||||
* These functions perform a load or store of the byte, word,
|
||||
* longword or quad to the specified address. The address is
|
||||
* a physical address in the AddressSpace, but it must lie within
|
||||
* a #MemoryRegion that was mapped with address_space_cache_init.
|
||||
*
|
||||
* The _le suffixed functions treat the data as little endian;
|
||||
* _be indicates big endian; no suffix indicates "same endianness
|
||||
* as guest CPU".
|
||||
*
|
||||
* The "guest CPU endianness" accessors are deprecated for use outside
|
||||
* target-* code; devices should be CPU-agnostic and use either the LE
|
||||
* or the BE accessors.
|
||||
*
|
||||
* @cache: previously initialized #MemoryRegionCache to be accessed
|
||||
* @addr: address within the address space
|
||||
* @val: data value, for stores
|
||||
* @attrs: memory transaction attributes
|
||||
* @result: location to write the success/failure of the transaction;
|
||||
* if NULL, this information is discarded
|
||||
*/
|
||||
uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
|
||||
uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr);
|
||||
uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
|
||||
uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
|
||||
uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
|
||||
uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
|
||||
uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
|
||||
uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
|
||||
void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
|
||||
void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
|
||||
void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
|
||||
void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
|
||||
void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
|
||||
void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
|
||||
void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
|
||||
|
||||
/* address_space_translate: translate an address range into an address space
|
||||
* into a MemoryRegion and an address range into that section. Should be
|
||||
* called from an RCU critical section, to avoid that the last reference
|
||||
@@ -1529,6 +1663,38 @@ MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* address_space_read_cached: read from a cached RAM region
|
||||
*
|
||||
* @cache: Cached region to be addressed
|
||||
* @addr: address relative to the base of the RAM region
|
||||
* @buf: buffer with the data transferred
|
||||
* @len: length of the data transferred
|
||||
*/
|
||||
static inline void
|
||||
address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
void *buf, int len)
|
||||
{
|
||||
assert(addr < cache->len && len <= cache->len - addr);
|
||||
memcpy(buf, cache->ptr + addr, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* address_space_write_cached: write to a cached RAM region
|
||||
*
|
||||
* @cache: Cached region to be addressed
|
||||
* @addr: address relative to the base of the RAM region
|
||||
* @buf: buffer with the data transferred
|
||||
* @len: length of the data transferred
|
||||
*/
|
||||
static inline void
|
||||
address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
void *buf, int len)
|
||||
{
|
||||
assert(addr < cache->len && len <= cache->len - addr);
|
||||
memcpy(cache->ptr + addr, buf, len);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -12,7 +12,7 @@
|
||||
#define HW_ARM_H
|
||||
|
||||
#include "exec/memory.h"
|
||||
#include "target-arm/cpu-qom.h"
|
||||
#include "target/arm/cpu-qom.h"
|
||||
#include "hw/irq.h"
|
||||
#include "qemu/notify.h"
|
||||
|
||||
|
@@ -27,8 +27,9 @@ typedef struct AspeedSoCState {
|
||||
DeviceState parent;
|
||||
|
||||
/*< public >*/
|
||||
ARMCPU *cpu;
|
||||
ARMCPU cpu;
|
||||
MemoryRegion iomem;
|
||||
MemoryRegion sram;
|
||||
AspeedVICState vic;
|
||||
AspeedTimerCtrlState timerctrl;
|
||||
AspeedI2CState i2c;
|
||||
@@ -46,6 +47,7 @@ typedef struct AspeedSoCInfo {
|
||||
const char *cpu_model;
|
||||
uint32_t silicon_rev;
|
||||
hwaddr sdram_base;
|
||||
uint64_t sram_size;
|
||||
int spis_num;
|
||||
const hwaddr *spi_bases;
|
||||
const char *fmc_typename;
|
||||
|
@@ -27,7 +27,7 @@
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "exec/memory.h"
|
||||
#include "target-arm/cpu-qom.h"
|
||||
#include "target/arm/cpu-qom.h"
|
||||
|
||||
#define EXYNOS4210_NCPUS 2
|
||||
|
||||
|
@@ -20,7 +20,7 @@
|
||||
#include "exec/memory.h"
|
||||
# define hw_omap_h "omap.h"
|
||||
#include "hw/irq.h"
|
||||
#include "target-arm/cpu-qom.h"
|
||||
#include "target/arm/cpu-qom.h"
|
||||
|
||||
# define OMAP_EMIFS_BASE 0x00000000
|
||||
# define OMAP2_Q0_BASE 0x00000000
|
||||
|
@@ -11,7 +11,7 @@
|
||||
#define PXA_H
|
||||
|
||||
#include "exec/memory.h"
|
||||
#include "target-arm/cpu-qom.h"
|
||||
#include "target/arm/cpu-qom.h"
|
||||
|
||||
/* Interrupt numbers */
|
||||
# define PXA2XX_PIC_SSP3 0
|
||||
|
@@ -1,6 +1,9 @@
|
||||
#ifndef HW_COMPAT_H
|
||||
#define HW_COMPAT_H
|
||||
|
||||
#define HW_COMPAT_2_8 \
|
||||
/* empty */
|
||||
|
||||
#define HW_COMPAT_2_7 \
|
||||
{\
|
||||
.driver = "virtio-pci",\
|
||||
|
@@ -63,6 +63,9 @@ struct PCMachineState {
|
||||
AcpiNVDIMMState acpi_nvdimm_state;
|
||||
|
||||
bool acpi_build_enabled;
|
||||
bool smbus;
|
||||
bool sata;
|
||||
bool pit;
|
||||
|
||||
/* RAM information (sizes, addresses, configuration): */
|
||||
ram_addr_t below_4g_mem_size, above_4g_mem_size;
|
||||
@@ -88,6 +91,9 @@ struct PCMachineState {
|
||||
#define PC_MACHINE_VMPORT "vmport"
|
||||
#define PC_MACHINE_SMM "smm"
|
||||
#define PC_MACHINE_NVDIMM "nvdimm"
|
||||
#define PC_MACHINE_SMBUS "smbus"
|
||||
#define PC_MACHINE_SATA "sata"
|
||||
#define PC_MACHINE_PIT "pit"
|
||||
|
||||
/**
|
||||
* PCMachineClass:
|
||||
@@ -260,6 +266,7 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
|
||||
ISADevice **rtc_state,
|
||||
bool create_fdctrl,
|
||||
bool no_vmport,
|
||||
bool has_pit,
|
||||
uint32_t hpet_irqs);
|
||||
void pc_init_ne2k_isa(ISABus *bus, NICInfo *nd);
|
||||
void pc_cmos_init(PCMachineState *pcms,
|
||||
@@ -371,6 +378,11 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
|
||||
|
||||
#define PC_COMPAT_2_7 \
|
||||
HW_COMPAT_2_7 \
|
||||
{\
|
||||
.driver = "kvmclock",\
|
||||
.property = "x-mach-use-reliable-get-clock",\
|
||||
.value = "off",\
|
||||
},\
|
||||
{\
|
||||
.driver = TYPE_X86_CPU,\
|
||||
.property = "l3-cache",\
|
||||
|
@@ -2,7 +2,7 @@
|
||||
#define HW_MCF_H
|
||||
/* Motorola ColdFire device prototypes. */
|
||||
|
||||
#include "target-m68k/cpu-qom.h"
|
||||
#include "target/m68k/cpu-qom.h"
|
||||
|
||||
struct MemoryRegion;
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#ifndef HW_MIPS_CPUDEVS_H
|
||||
#define HW_MIPS_CPUDEVS_H
|
||||
|
||||
#include "target-mips/cpu-qom.h"
|
||||
#include "target/mips/cpu-qom.h"
|
||||
|
||||
/* Definitions for MIPS CPU internal devices. */
|
||||
|
||||
|
@@ -32,6 +32,7 @@ typedef struct AspeedSCUState {
|
||||
} AspeedSCUState;
|
||||
|
||||
#define AST2400_A0_SILICON_REV 0x02000303U
|
||||
#define AST2400_A1_SILICON_REV 0x02010303U
|
||||
#define AST2500_A0_SILICON_REV 0x04000303U
|
||||
#define AST2500_A1_SILICON_REV 0x04010303U
|
||||
|
||||
|
@@ -11,7 +11,7 @@
|
||||
#define PPC_FDT_H
|
||||
|
||||
#include "qemu/error-report.h"
|
||||
#include "target-ppc/cpu-qom.h"
|
||||
#include "target/ppc/cpu-qom.h"
|
||||
|
||||
#define _FDT(exp) \
|
||||
do { \
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#ifndef HW_PPC_H
|
||||
#define HW_PPC_H
|
||||
|
||||
#include "target-ppc/cpu-qom.h"
|
||||
#include "target/ppc/cpu-qom.h"
|
||||
|
||||
void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level);
|
||||
|
||||
|
@@ -11,7 +11,7 @@
|
||||
|
||||
#include "hw/qdev.h"
|
||||
#include "hw/cpu/core.h"
|
||||
#include "target-ppc/cpu-qom.h"
|
||||
#include "target/ppc/cpu-qom.h"
|
||||
|
||||
#define TYPE_SPAPR_CPU_CORE "spapr-cpu-core"
|
||||
#define SPAPR_CPU_CORE(obj) \
|
||||
|
@@ -3,7 +3,7 @@
|
||||
/* Definitions for SH board emulation. */
|
||||
|
||||
#include "hw/sh4/sh_intc.h"
|
||||
#include "target-sh4/cpu-qom.h"
|
||||
#include "target/sh4/cpu-qom.h"
|
||||
|
||||
#define A7ADDR(x) ((x) & 0x1fffffff)
|
||||
#define P4ADDR(x) ((x) | 0xe0000000)
|
||||
|
@@ -38,6 +38,7 @@ struct virtio_gpu_simple_resource {
|
||||
unsigned int iov_cnt;
|
||||
uint32_t scanout_bitmask;
|
||||
pixman_image_t *image;
|
||||
uint64_t hostmem;
|
||||
QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
|
||||
};
|
||||
|
||||
@@ -68,6 +69,7 @@ enum virtio_gpu_conf_flags {
|
||||
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_STATS_ENABLED))
|
||||
|
||||
struct virtio_gpu_conf {
|
||||
uint64_t max_hostmem;
|
||||
uint32_t max_outputs;
|
||||
uint32_t flags;
|
||||
};
|
||||
@@ -103,6 +105,7 @@ typedef struct VirtIOGPU {
|
||||
struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS];
|
||||
|
||||
struct virtio_gpu_conf conf;
|
||||
uint64_t hostmem;
|
||||
int enabled_output_bitmask;
|
||||
struct virtio_gpu_config virtio_config;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user