Compare commits
98 Commits
qdev-array
...
multifd-fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f184a8eab6 | ||
|
|
91db7ee50d | ||
|
|
a0113c848d | ||
|
|
516c84dc0b | ||
|
|
0ef4032931 | ||
|
|
b6a360fd5f | ||
|
|
5df51b4c38 | ||
|
|
e56fda417e | ||
|
|
7743485a6d | ||
|
|
413a3437be | ||
|
|
fd85347499 | ||
|
|
8e076e5aad | ||
|
|
394517d692 | ||
|
|
249667082c | ||
|
|
73b490eb10 | ||
|
|
88da08030c | ||
|
|
f99794cafe | ||
|
|
354899a1aa | ||
|
|
c518c0ff19 | ||
|
|
8b5b1db19a | ||
|
|
8d33878e6a | ||
|
|
4d19dbe93a | ||
|
|
5251f982e4 | ||
|
|
25bc8fe44b | ||
|
|
bd9e24883b | ||
|
|
d1b5e3e7ca | ||
|
|
05037743e5 | ||
|
|
49ec5915d2 | ||
|
|
6e9ae36d7a | ||
|
|
350ec7fac4 | ||
|
|
694d818a8c | ||
|
|
11fc708273 | ||
|
|
ba1cba6b3e | ||
|
|
c797e6ea59 | ||
|
|
dd2b7382d7 | ||
|
|
42872af9f2 | ||
|
|
33abbe30c9 | ||
|
|
74c0934b60 | ||
|
|
34a5cb6d84 | ||
|
|
0dfae4f948 | ||
|
|
e2faabee78 | ||
|
|
cf9b5790db | ||
|
|
9c673a41ee | ||
|
|
6d44474b3b | ||
|
|
52105c6458 | ||
|
|
9f7c4f60cc | ||
|
|
3b06e4058d | ||
|
|
315088134f | ||
|
|
d12a91e0ba | ||
|
|
e72629e514 | ||
|
|
816644b121 | ||
|
|
04c0a003dd | ||
|
|
0b2af475e9 | ||
|
|
f88131d931 | ||
|
|
ec6f3fc3ef | ||
|
|
e274d2a777 | ||
|
|
8066102df1 | ||
|
|
fa71b4f84f | ||
|
|
451d993d58 | ||
|
|
17fe594c59 | ||
|
|
2a23f0f118 | ||
|
|
e722e5a112 | ||
|
|
576fc9376d | ||
|
|
e5d487c972 | ||
|
|
f6e8d1ef05 | ||
|
|
0034d0395e | ||
|
|
4c7ae73caf | ||
|
|
52c773ce89 | ||
|
|
4d044472ab | ||
|
|
fc58891d04 | ||
|
|
1d675e59ea | ||
|
|
f78ea7ddb0 | ||
|
|
05fa22770a | ||
|
|
c86a59fd34 | ||
|
|
616425d452 | ||
|
|
c96c116e10 | ||
|
|
4409a6d855 | ||
|
|
f9a19bd8d2 | ||
|
|
2e990d81d9 | ||
|
|
00ac955b06 | ||
|
|
d229996b40 | ||
|
|
7c7e1f6017 | ||
|
|
42c31682ba | ||
|
|
261c1281e8 | ||
|
|
4940da2096 | ||
|
|
8011b508cf | ||
|
|
0ab3565840 | ||
|
|
34aee9c946 | ||
|
|
5c24c3e2f3 | ||
|
|
c375f05ef5 | ||
|
|
6d133eef98 | ||
|
|
569205e4e9 | ||
|
|
364eff6885 | ||
|
|
d90014fc33 | ||
|
|
fe73674af1 | ||
|
|
6ab4f1c9e2 | ||
|
|
881d1073d0 | ||
|
|
69680740ea |
18
MAINTAINERS
18
MAINTAINERS
@@ -131,6 +131,17 @@ K: ^Subject:.*(?i)mips
|
||||
F: docs/system/target-mips.rst
|
||||
F: configs/targets/mips*
|
||||
|
||||
X86 general architecture support
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: configs/devices/i386-softmmu/default.mak
|
||||
F: configs/targets/i386-softmmu.mak
|
||||
F: configs/targets/x86_64-softmmu.mak
|
||||
F: docs/system/target-i386*
|
||||
F: target/i386/*.[ch]
|
||||
F: target/i386/Kconfig
|
||||
F: target/i386/meson.build
|
||||
|
||||
Guest CPU cores (TCG)
|
||||
---------------------
|
||||
Overall TCG CPUs
|
||||
@@ -657,6 +668,7 @@ F: include/hw/dma/pl080.h
|
||||
F: hw/dma/pl330.c
|
||||
F: hw/gpio/pl061.c
|
||||
F: hw/input/pl050.c
|
||||
F: include/hw/input/pl050.h
|
||||
F: hw/intc/pl190.c
|
||||
F: hw/sd/pl181.c
|
||||
F: hw/ssi/pl022.c
|
||||
@@ -927,6 +939,7 @@ F: hw/*/pxa2xx*
|
||||
F: hw/display/tc6393xb.c
|
||||
F: hw/gpio/max7310.c
|
||||
F: hw/gpio/zaurus.c
|
||||
F: hw/input/ads7846.c
|
||||
F: hw/misc/mst_fpga.c
|
||||
F: hw/adc/max111x.c
|
||||
F: include/hw/adc/max111x.h
|
||||
@@ -979,7 +992,9 @@ M: Peter Maydell <peter.maydell@linaro.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/*/stellaris*
|
||||
F: hw/display/ssd03*
|
||||
F: include/hw/input/gamepad.h
|
||||
F: include/hw/timer/stellaris-gptm.h
|
||||
F: docs/system/arm/stellaris.rst
|
||||
|
||||
STM32VLDISCOVERY
|
||||
@@ -994,6 +1009,7 @@ M: Peter Maydell <peter.maydell@linaro.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/arm/vexpress.c
|
||||
F: hw/display/sii9022.c
|
||||
F: docs/system/arm/vexpress.rst
|
||||
|
||||
Versatile PB
|
||||
@@ -2241,7 +2257,7 @@ M: Stefan Hajnoczi <stefanha@redhat.com>
|
||||
S: Supported
|
||||
F: hw/virtio/vhost-user-fs*
|
||||
F: include/hw/virtio/vhost-user-fs.h
|
||||
L: virtio-fs@redhat.com
|
||||
L: virtio-fs@lists.linux.dev
|
||||
|
||||
virtio-input
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
|
||||
@@ -721,7 +721,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
/* Execute just one insn to trigger exception pending in the log */
|
||||
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
|
||||
| CF_LAST_IO | CF_NOIRQ | 1;
|
||||
| CF_NOIRQ | 1;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
|
||||
@@ -1479,7 +1479,8 @@ int probe_access_full(CPUArchState *env, vaddr addr, int size,
|
||||
|
||||
/* Handle clean RAM pages. */
|
||||
if (unlikely(flags & TLB_NOTDIRTY)) {
|
||||
notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
|
||||
int dirtysize = size == 0 ? 1 : size;
|
||||
notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr);
|
||||
flags &= ~TLB_NOTDIRTY;
|
||||
}
|
||||
|
||||
@@ -1502,7 +1503,8 @@ int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
|
||||
|
||||
/* Handle clean RAM pages. */
|
||||
if (unlikely(flags & TLB_NOTDIRTY)) {
|
||||
notdirty_write(env_cpu(env), addr, 1, *pfull, 0);
|
||||
int dirtysize = size == 0 ? 1 : size;
|
||||
notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0);
|
||||
flags &= ~TLB_NOTDIRTY;
|
||||
}
|
||||
|
||||
@@ -1524,7 +1526,8 @@ int probe_access_flags(CPUArchState *env, vaddr addr, int size,
|
||||
|
||||
/* Handle clean RAM pages. */
|
||||
if (unlikely(flags & TLB_NOTDIRTY)) {
|
||||
notdirty_write(env_cpu(env), addr, 1, full, retaddr);
|
||||
int dirtysize = size == 0 ? 1 : size;
|
||||
notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr);
|
||||
flags &= ~TLB_NOTDIRTY;
|
||||
}
|
||||
|
||||
@@ -1560,7 +1563,7 @@ void *probe_access(CPUArchState *env, vaddr addr, int size,
|
||||
|
||||
/* Handle clean RAM pages. */
|
||||
if (flags & TLB_NOTDIRTY) {
|
||||
notdirty_write(env_cpu(env), addr, 1, full, retaddr);
|
||||
notdirty_write(env_cpu(env), addr, size, full, retaddr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1083,8 +1083,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
|
||||
if (current_tb_modified) {
|
||||
/* Force execution of one insn next time. */
|
||||
CPUState *cpu = current_cpu;
|
||||
cpu->cflags_next_tb =
|
||||
1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu);
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -1154,8 +1153,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||
if (current_tb_modified) {
|
||||
page_collection_unlock(pages);
|
||||
/* Force execution of one insn next time. */
|
||||
current_cpu->cflags_next_tb =
|
||||
1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu);
|
||||
current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
|
||||
mmap_unlock();
|
||||
cpu_loop_exit_noexc(current_cpu);
|
||||
}
|
||||
|
||||
@@ -304,7 +304,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
|
||||
if (phys_pc == -1) {
|
||||
/* Generate a one-shot TB with 1 insn in it */
|
||||
cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1;
|
||||
cflags = (cflags & ~CF_COUNT_MASK) | 1;
|
||||
}
|
||||
|
||||
max_insns = cflags & CF_COUNT_MASK;
|
||||
@@ -632,7 +632,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
||||
* operations only (which execute after completion) so we don't
|
||||
* double instrument the instruction.
|
||||
*/
|
||||
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
|
||||
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
|
||||
vaddr pc = log_pc(cpu, tb);
|
||||
|
||||
@@ -89,7 +89,7 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
|
||||
* each translation block. The cost is minimal, plus it would be
|
||||
* very easy to forget doing it in the translator.
|
||||
*/
|
||||
set_can_do_io(db, db->max_insns == 1 && (cflags & CF_LAST_IO));
|
||||
set_can_do_io(db, db->max_insns == 1);
|
||||
|
||||
return icount_start_insn;
|
||||
}
|
||||
@@ -151,13 +151,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||
ops->tb_start(db, cpu);
|
||||
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
|
||||
|
||||
if (cflags & CF_MEMI_ONLY) {
|
||||
/* We should only see CF_MEMI_ONLY for io_recompile. */
|
||||
assert(cflags & CF_LAST_IO);
|
||||
plugin_enabled = plugin_gen_tb_start(cpu, db, true);
|
||||
} else {
|
||||
plugin_enabled = plugin_gen_tb_start(cpu, db, false);
|
||||
}
|
||||
plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
|
||||
db->plugin_enabled = plugin_enabled;
|
||||
|
||||
while (true) {
|
||||
@@ -169,11 +163,13 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||
plugin_gen_insn_start(cpu, db);
|
||||
}
|
||||
|
||||
/* Disassemble one instruction. The translate_insn hook should
|
||||
update db->pc_next and db->is_jmp to indicate what should be
|
||||
done next -- either exiting this loop or locate the start of
|
||||
the next instruction. */
|
||||
if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
|
||||
/*
|
||||
* Disassemble one instruction. The translate_insn hook should
|
||||
* update db->pc_next and db->is_jmp to indicate what should be
|
||||
* done next -- either exiting this loop or locate the start of
|
||||
* the next instruction.
|
||||
*/
|
||||
if (db->num_insns == db->max_insns) {
|
||||
/* Accept I/O on the last instruction. */
|
||||
set_can_do_io(db, true);
|
||||
}
|
||||
|
||||
@@ -629,7 +629,6 @@ int bdrv_all_goto_snapshot(const char *name,
|
||||
while (iterbdrvs) {
|
||||
BlockDriverState *bs = iterbdrvs->data;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
int ret = 0;
|
||||
bool all_snapshots_includes_bs;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
@@ -637,9 +636,8 @@ int bdrv_all_goto_snapshot(const char *name,
|
||||
all_snapshots_includes_bs = bdrv_all_snapshots_includes_bs(bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (devices || all_snapshots_includes_bs) {
|
||||
ret = bdrv_snapshot_goto(bs, name, errp);
|
||||
}
|
||||
ret = (devices || all_snapshots_includes_bs) ?
|
||||
bdrv_snapshot_goto(bs, name, errp) : 0;
|
||||
aio_context_release(ctx);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
|
||||
@@ -572,6 +572,27 @@ Others (especially either older devices or system devices which for
|
||||
some reason don't have a bus concept) make use of the ``instance id``
|
||||
for otherwise identically named devices.
|
||||
|
||||
Fixed-ram format
|
||||
----------------
|
||||
|
||||
When the ``fixed-ram`` capability is enabled, a slightly different
|
||||
stream format is used for the RAM section. Instead of having a
|
||||
sequential stream of pages that follow the RAMBlock headers, the dirty
|
||||
pages for a RAMBlock follow its header. This ensures that each RAM
|
||||
page has a fixed offset in the resulting migration file.
|
||||
|
||||
The ``fixed-ram`` capability must be enabled in both source and
|
||||
destination with:
|
||||
|
||||
``migrate_set_capability fixed-ram on``
|
||||
|
||||
Since pages are written to their relatives offsets and out of order
|
||||
(due to the memory dirtying patterns), streaming channels such as
|
||||
sockets are not supported. A seekable channel such as a file is
|
||||
required. This can be verified in the QIOChannel by the presence of
|
||||
the QIO_CHANNEL_FEATURE_SEEKABLE. In more practical terms, this
|
||||
migration format requires the ``file:`` URI when migrating.
|
||||
|
||||
Return path
|
||||
-----------
|
||||
|
||||
|
||||
@@ -62,12 +62,6 @@ To deal with this case, when an I/O access is made we:
|
||||
- re-compile a single [1]_ instruction block for the current PC
|
||||
- exit the cpu loop and execute the re-compiled block
|
||||
|
||||
The new block is created with the CF_LAST_IO compile flag which
|
||||
ensures the final instruction translation starts with a call to
|
||||
gen_io_start() so we don't enter a perpetual loop constantly
|
||||
recompiling a single instruction block. For translators using the
|
||||
common translator_loop this is done automatically.
|
||||
|
||||
.. [1] sometimes two instructions if dealing with delay slots
|
||||
|
||||
Other I/O operations
|
||||
|
||||
@@ -668,11 +668,11 @@ suppressing it. More information on the file format can be found here:
|
||||
|
||||
https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions
|
||||
|
||||
tests/tsan/blacklist.tsan - Has TSan warnings we wish to disable
|
||||
tests/tsan/ignore.tsan - Has TSan warnings we wish to disable
|
||||
at compile time for test or debug.
|
||||
Add flags to configure to enable:
|
||||
|
||||
"--extra-cflags=-fsanitize-blacklist=<src path>/tests/tsan/blacklist.tsan"
|
||||
"--extra-cflags=-fsanitize-blacklist=<src path>/tests/tsan/ignore.tsan"
|
||||
|
||||
More information on the file format can be found here under "Blacklist Format":
|
||||
|
||||
|
||||
@@ -515,7 +515,7 @@ class QAPIDocDirective(Directive):
|
||||
except QAPIError as err:
|
||||
# Launder QAPI parse errors into Sphinx extension errors
|
||||
# so they are displayed nicely to the user
|
||||
raise ExtensionError(str(err))
|
||||
raise ExtensionError(str(err)) from err
|
||||
|
||||
def do_parse(self, rstlist, node):
|
||||
"""Parse rST source lines and add them to the specified node
|
||||
|
||||
@@ -96,6 +96,10 @@ uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
|
||||
|
||||
static int dump_cleanup(DumpState *s)
|
||||
{
|
||||
if (s->dump_info.arch_cleanup_fn) {
|
||||
s->dump_info.arch_cleanup_fn(s);
|
||||
}
|
||||
|
||||
guest_phys_blocks_free(&s->guest_phys_blocks);
|
||||
memory_mapping_list_free(&s->list);
|
||||
close(s->fd);
|
||||
|
||||
@@ -28,7 +28,7 @@ atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
static inline Int128 ATTRIBUTE_ATOMIC128_OPT
|
||||
atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
{
|
||||
__int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
Int128Aligned *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
Int128Alias r, c, n;
|
||||
|
||||
c.s = cmp;
|
||||
|
||||
@@ -58,7 +58,7 @@ atomic16_read_rw(Int128 *ptr)
|
||||
static inline void ATTRIBUTE_ATOMIC128_OPT
|
||||
atomic16_set(Int128 *ptr, Int128 val)
|
||||
{
|
||||
__int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
Int128Aligned *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
__int128_t old;
|
||||
Int128Alias new;
|
||||
|
||||
|
||||
@@ -576,7 +576,8 @@ static void fdt_add_gic_node(VirtMachineState *vms)
|
||||
|
||||
if (vms->virt) {
|
||||
qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts",
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ,
|
||||
GIC_FDT_IRQ_TYPE_PPI,
|
||||
INTID_TO_PPI(ARCH_GIC_MAINT_IRQ),
|
||||
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
|
||||
}
|
||||
} else {
|
||||
@@ -600,7 +601,8 @@ static void fdt_add_gic_node(VirtMachineState *vms)
|
||||
2, vms->memmap[VIRT_GIC_VCPU].base,
|
||||
2, vms->memmap[VIRT_GIC_VCPU].size);
|
||||
qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts",
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ,
|
||||
GIC_FDT_IRQ_TYPE_PPI,
|
||||
INTID_TO_PPI(ARCH_GIC_MAINT_IRQ),
|
||||
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -670,8 +670,13 @@ static void es1370_transfer_audio (ES1370State *s, struct chan *d, int loop_sel,
|
||||
cnt += (transferred + d->leftover) >> 2;
|
||||
|
||||
if (s->sctl & loop_sel) {
|
||||
/* Bah, how stupid is that having a 0 represent true value?
|
||||
i just spent few hours on this shit */
|
||||
/*
|
||||
* loop_sel tells us which bit in the SCTL register to look at
|
||||
* (either P1_LOOP_SEL, P2_LOOP_SEL or R1_LOOP_SEL). The sense
|
||||
* of these bits is 0 for loop mode (set interrupt and keep recording
|
||||
* when the sample count reaches zero) or 1 for stop mode (set
|
||||
* interrupt and stop recording).
|
||||
*/
|
||||
AUD_log ("es1370: warning", "non looping mode\n");
|
||||
} else {
|
||||
d->frame_cnt = size;
|
||||
|
||||
@@ -35,7 +35,8 @@
|
||||
GlobalProperty hw_compat_8_1[] = {
|
||||
{ TYPE_PCI_BRIDGE, "x-pci-express-writeable-slt-bug", "true" },
|
||||
{ "ramfb", "x-migrate", "off" },
|
||||
{ "vfio-pci-nohotplug", "x-ramfb-migrate", "off" }
|
||||
{ "vfio-pci-nohotplug", "x-ramfb-migrate", "off" },
|
||||
{ "igb", "x-pcie-flr-init", "off" },
|
||||
};
|
||||
const size_t hw_compat_8_1_len = G_N_ELEMENTS(hw_compat_8_1);
|
||||
|
||||
|
||||
@@ -336,8 +336,8 @@ static inline bool vmsvga_verify_rect(DisplaySurface *surface,
|
||||
return false;
|
||||
}
|
||||
if (h > SVGA_MAX_HEIGHT) {
|
||||
trace_vmware_verify_rect_greater_than_bound(name, "y", SVGA_MAX_HEIGHT,
|
||||
y);
|
||||
trace_vmware_verify_rect_greater_than_bound(name, "h", SVGA_MAX_HEIGHT,
|
||||
h);
|
||||
return false;
|
||||
}
|
||||
if (y + h > surface_height(surface)) {
|
||||
|
||||
@@ -34,9 +34,10 @@
|
||||
#include "net/net.h"
|
||||
#include "qemu/log.h"
|
||||
|
||||
#define MIN_SEABIOS_HPPA_VERSION 10 /* require at least this fw version */
|
||||
#define MIN_SEABIOS_HPPA_VERSION 12 /* require at least this fw version */
|
||||
|
||||
#define HPA_POWER_BUTTON (FIRMWARE_END - 0x10)
|
||||
/* Power button address at &PAGE0->pad[4] */
|
||||
#define HPA_POWER_BUTTON (0x40 + 4 * sizeof(uint32_t))
|
||||
|
||||
#define enable_lasi_lan() 0
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ config LOONGSON3V
|
||||
select PCI_EXPRESS_GENERIC_BRIDGE
|
||||
select MSI_NONBROKEN
|
||||
select FW_CFG_MIPS
|
||||
select UNIMP
|
||||
|
||||
config MIPS_CPS
|
||||
bool
|
||||
|
||||
15
hw/net/igb.c
15
hw/net/igb.c
@@ -78,6 +78,7 @@ struct IGBState {
|
||||
uint32_t ioaddr;
|
||||
|
||||
IGBCore core;
|
||||
bool has_flr;
|
||||
};
|
||||
|
||||
#define IGB_CAP_SRIOV_OFFSET (0x160)
|
||||
@@ -101,6 +102,9 @@ static void igb_write_config(PCIDevice *dev, uint32_t addr,
|
||||
|
||||
trace_igb_write_config(addr, val, len);
|
||||
pci_default_write_config(dev, addr, val, len);
|
||||
if (s->has_flr) {
|
||||
pcie_cap_flr_write_config(dev, addr, val, len);
|
||||
}
|
||||
|
||||
if (range_covers_byte(addr, len, PCI_COMMAND) &&
|
||||
(dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
|
||||
@@ -122,6 +126,12 @@ igb_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
|
||||
igb_core_write(&s->core, addr, val, size);
|
||||
}
|
||||
|
||||
void igb_vf_reset(void *opaque, uint16_t vfn)
|
||||
{
|
||||
IGBState *s = opaque;
|
||||
igb_core_vf_reset(&s->core, vfn);
|
||||
}
|
||||
|
||||
static bool
|
||||
igb_io_get_reg_index(IGBState *s, uint32_t *idx)
|
||||
{
|
||||
@@ -427,6 +437,10 @@ static void igb_pci_realize(PCIDevice *pci_dev, Error **errp)
|
||||
}
|
||||
|
||||
/* PCIe extended capabilities (in order) */
|
||||
if (s->has_flr) {
|
||||
pcie_cap_flr_init(pci_dev);
|
||||
}
|
||||
|
||||
if (pcie_aer_init(pci_dev, 1, 0x100, 0x40, errp) < 0) {
|
||||
hw_error("Failed to initialize AER capability");
|
||||
}
|
||||
@@ -582,6 +596,7 @@ static const VMStateDescription igb_vmstate = {
|
||||
|
||||
static Property igb_properties[] = {
|
||||
DEFINE_NIC_PROPERTIES(IGBState, conf),
|
||||
DEFINE_PROP_BOOL("x-pcie-flr-init", IGBState, has_flr, true),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
||||
@@ -152,5 +152,6 @@ enum {
|
||||
|
||||
uint64_t igb_mmio_read(void *opaque, hwaddr addr, unsigned size);
|
||||
void igb_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size);
|
||||
void igb_vf_reset(void *opaque, uint16_t vfn);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -2477,11 +2477,13 @@ static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
|
||||
}
|
||||
}
|
||||
|
||||
static void igb_vf_reset(IGBCore *core, uint16_t vfn)
|
||||
void igb_core_vf_reset(IGBCore *core, uint16_t vfn)
|
||||
{
|
||||
uint16_t qn0 = vfn;
|
||||
uint16_t qn1 = vfn + IGB_NUM_VM_POOLS;
|
||||
|
||||
trace_igb_core_vf_reset(vfn);
|
||||
|
||||
/* disable Rx and Tx for the VF*/
|
||||
core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
|
||||
core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
|
||||
@@ -2560,7 +2562,7 @@ static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val)
|
||||
|
||||
if (val & E1000_CTRL_RST) {
|
||||
vfn = (index - PVTCTRL0) / 0x40;
|
||||
igb_vf_reset(core, vfn);
|
||||
igb_core_vf_reset(core, vfn);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -130,6 +130,9 @@ igb_core_set_link_status(IGBCore *core);
|
||||
void
|
||||
igb_core_pci_uninit(IGBCore *core);
|
||||
|
||||
void
|
||||
igb_core_vf_reset(IGBCore *core, uint16_t vfn);
|
||||
|
||||
bool
|
||||
igb_can_receive(IGBCore *core);
|
||||
|
||||
|
||||
@@ -204,6 +204,10 @@ static void igbvf_write_config(PCIDevice *dev, uint32_t addr, uint32_t val,
|
||||
{
|
||||
trace_igbvf_write_config(addr, val, len);
|
||||
pci_default_write_config(dev, addr, val, len);
|
||||
if (object_property_get_bool(OBJECT(pcie_sriov_get_pf(dev)),
|
||||
"x-pcie-flr-init", &error_abort)) {
|
||||
pcie_cap_flr_write_config(dev, addr, val, len);
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t igbvf_mmio_read(void *opaque, hwaddr addr, unsigned size)
|
||||
@@ -266,6 +270,11 @@ static void igbvf_pci_realize(PCIDevice *dev, Error **errp)
|
||||
hw_error("Failed to initialize PCIe capability");
|
||||
}
|
||||
|
||||
if (object_property_get_bool(OBJECT(pcie_sriov_get_pf(dev)),
|
||||
"x-pcie-flr-init", &error_abort)) {
|
||||
pcie_cap_flr_init(dev);
|
||||
}
|
||||
|
||||
if (pcie_aer_init(dev, 1, 0x100, 0x40, errp) < 0) {
|
||||
hw_error("Failed to initialize AER capability");
|
||||
}
|
||||
@@ -273,6 +282,13 @@ static void igbvf_pci_realize(PCIDevice *dev, Error **errp)
|
||||
pcie_ari_init(dev, 0x150);
|
||||
}
|
||||
|
||||
static void igbvf_qdev_reset_hold(Object *obj)
|
||||
{
|
||||
PCIDevice *vf = PCI_DEVICE(obj);
|
||||
|
||||
igb_vf_reset(pcie_sriov_get_pf(vf), pcie_sriov_vf_number(vf));
|
||||
}
|
||||
|
||||
static void igbvf_pci_uninit(PCIDevice *dev)
|
||||
{
|
||||
IgbVfState *s = IGBVF(dev);
|
||||
@@ -287,6 +303,7 @@ static void igbvf_class_init(ObjectClass *class, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(class);
|
||||
PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
|
||||
ResettableClass *rc = RESETTABLE_CLASS(class);
|
||||
|
||||
c->realize = igbvf_pci_realize;
|
||||
c->exit = igbvf_pci_uninit;
|
||||
@@ -295,6 +312,8 @@ static void igbvf_class_init(ObjectClass *class, void *data)
|
||||
c->revision = 1;
|
||||
c->class_id = PCI_CLASS_NETWORK_ETHERNET;
|
||||
|
||||
rc->phases.hold = igbvf_qdev_reset_hold;
|
||||
|
||||
dc->desc = "Intel 82576 Virtual Function";
|
||||
dc->user_creatable = false;
|
||||
|
||||
|
||||
@@ -274,6 +274,7 @@ igb_core_mdic_read(uint32_t addr, uint32_t data) "MDIC READ: PHY[%u] = 0x%x"
|
||||
igb_core_mdic_read_unhandled(uint32_t addr) "MDIC READ: PHY[%u] UNHANDLED"
|
||||
igb_core_mdic_write(uint32_t addr, uint32_t data) "MDIC WRITE: PHY[%u] = 0x%x"
|
||||
igb_core_mdic_write_unhandled(uint32_t addr) "MDIC WRITE: PHY[%u] UNHANDLED"
|
||||
igb_core_vf_reset(uint16_t vfn) "VF%d"
|
||||
|
||||
igb_link_set_ext_params(bool asd_check, bool speed_select_bypass, bool pfrstd) "Set extended link params: ASD check: %d, Speed select bypass: %d, PF reset done: %d"
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "hw/pci-host/astro.h"
|
||||
#include "hw/hppa/hppa_hardware.h"
|
||||
#include "migration/vmstate.h"
|
||||
#include "target/hppa/cpu.h"
|
||||
#include "trace.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
@@ -268,22 +269,6 @@ static const MemoryRegionOps elroy_config_addr_ops = {
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* A subroutine of astro_translate_iommu that builds an IOMMUTLBEntry using the
|
||||
* given translated address and mask.
|
||||
*/
|
||||
static bool make_iommu_tlbe(hwaddr addr, hwaddr taddr, hwaddr mask,
|
||||
IOMMUTLBEntry *ret)
|
||||
{
|
||||
hwaddr tce_mask = ~((1ull << 12) - 1);
|
||||
ret->target_as = &address_space_memory;
|
||||
ret->iova = addr & tce_mask;
|
||||
ret->translated_addr = taddr & tce_mask;
|
||||
ret->addr_mask = ~tce_mask;
|
||||
ret->perm = IOMMU_RW;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Handle PCI-to-system address translation. */
|
||||
static IOMMUTLBEntry astro_translate_iommu(IOMMUMemoryRegion *iommu,
|
||||
hwaddr addr,
|
||||
@@ -291,53 +276,59 @@ static IOMMUTLBEntry astro_translate_iommu(IOMMUMemoryRegion *iommu,
|
||||
int iommu_idx)
|
||||
{
|
||||
AstroState *s = container_of(iommu, AstroState, iommu);
|
||||
IOMMUTLBEntry ret = {
|
||||
.target_as = &address_space_memory,
|
||||
.iova = addr,
|
||||
.translated_addr = 0,
|
||||
.addr_mask = ~(hwaddr)0,
|
||||
.perm = IOMMU_NONE,
|
||||
};
|
||||
hwaddr pdir_ptr, index, a, ibase;
|
||||
hwaddr pdir_ptr, index, ibase;
|
||||
hwaddr addr_mask = 0xfff; /* 4k translation */
|
||||
uint64_t entry;
|
||||
|
||||
#define IOVP_SHIFT 12 /* equals PAGE_SHIFT */
|
||||
#define PDIR_INDEX(iovp) ((iovp) >> IOVP_SHIFT)
|
||||
#define IOVP_MASK PAGE_MASK
|
||||
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
|
||||
|
||||
addr &= ~addr_mask;
|
||||
|
||||
/*
|
||||
* Default translation: "32-bit PCI Addressing on 40-bit Runway".
|
||||
* For addresses in the 32-bit memory address range ... and then
|
||||
* language which not-coincidentally matches the PSW.W=0 mapping.
|
||||
*/
|
||||
if (addr <= UINT32_MAX) {
|
||||
entry = hppa_abs_to_phys_pa2_w0(addr);
|
||||
} else {
|
||||
entry = addr;
|
||||
}
|
||||
|
||||
/* "range enable" flag cleared? */
|
||||
if ((s->tlb_ibase & 1) == 0) {
|
||||
make_iommu_tlbe(addr, addr, addr_mask, &ret);
|
||||
return ret;
|
||||
goto skip;
|
||||
}
|
||||
|
||||
a = addr;
|
||||
ibase = s->tlb_ibase & ~1ULL;
|
||||
if ((a & s->tlb_imask) != ibase) {
|
||||
if ((addr & s->tlb_imask) != ibase) {
|
||||
/* do not translate this one! */
|
||||
make_iommu_tlbe(addr, addr, addr_mask, &ret);
|
||||
return ret;
|
||||
goto skip;
|
||||
}
|
||||
index = PDIR_INDEX(a);
|
||||
|
||||
index = PDIR_INDEX(addr);
|
||||
pdir_ptr = s->tlb_pdir_base + index * sizeof(entry);
|
||||
entry = ldq_le_phys(&address_space_memory, pdir_ptr);
|
||||
|
||||
if (!(entry & SBA_PDIR_VALID_BIT)) { /* I/O PDIR entry valid ? */
|
||||
g_assert_not_reached();
|
||||
goto failure;
|
||||
/* failure */
|
||||
return (IOMMUTLBEntry) { .perm = IOMMU_NONE };
|
||||
}
|
||||
|
||||
entry &= ~SBA_PDIR_VALID_BIT;
|
||||
entry >>= IOVP_SHIFT;
|
||||
entry <<= 12;
|
||||
entry |= addr & 0xfff;
|
||||
make_iommu_tlbe(addr, entry, addr_mask, &ret);
|
||||
goto success;
|
||||
|
||||
failure:
|
||||
ret = (IOMMUTLBEntry) { .perm = IOMMU_NONE };
|
||||
success:
|
||||
return ret;
|
||||
skip:
|
||||
return (IOMMUTLBEntry) {
|
||||
.target_as = &address_space_memory,
|
||||
.iova = addr,
|
||||
.translated_addr = entry,
|
||||
.addr_mask = addr_mask,
|
||||
.perm = IOMMU_RW,
|
||||
};
|
||||
}
|
||||
|
||||
static AddressSpace *elroy_pcihost_set_iommu(PCIBus *bus, void *opaque,
|
||||
|
||||
@@ -29,7 +29,7 @@ pci_ss.add(when: 'CONFIG_MV64361', if_true: files('mv64361.c'))
|
||||
pci_ss.add(when: 'CONFIG_VERSATILE_PCI', if_true: files('versatile.c'))
|
||||
|
||||
# HPPA devices
|
||||
pci_ss.add(when: 'CONFIG_ASTRO', if_true: files('astro.c'))
|
||||
specific_ss.add(when: 'CONFIG_ASTRO', if_true: files('astro.c'))
|
||||
pci_ss.add(when: 'CONFIG_DINO', if_true: files('dino.c'))
|
||||
|
||||
system_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss)
|
||||
|
||||
@@ -66,6 +66,10 @@ S390PCIDMACount *s390_pci_start_dma_count(S390pciState *s,
|
||||
|
||||
assert(vpdev);
|
||||
|
||||
if (!vpdev->vbasedev.group) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
id = vpdev->vbasedev.group->container->fd;
|
||||
|
||||
if (!s390_pci_update_dma_avail(id, &avail)) {
|
||||
@@ -132,7 +136,7 @@ static void s390_pci_read_base(S390PCIBusDevice *pbdev,
|
||||
* to the guest based upon the vfio DMA limit.
|
||||
*/
|
||||
vfio_size = pbdev->iommu->max_dma_limit << TARGET_PAGE_BITS;
|
||||
if (vfio_size < (cap->end_dma - cap->start_dma + 1)) {
|
||||
if (vfio_size > 0 && vfio_size < cap->end_dma - cap->start_dma + 1) {
|
||||
pbdev->zpci_fn.edma = cap->start_dma + vfio_size - 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -525,9 +525,7 @@ static void virtio_mem_activate_memslots_to_plug(VirtIOMEM *vmem,
|
||||
vmem->memslot_size;
|
||||
unsigned int idx;
|
||||
|
||||
if (!vmem->dynamic_memslots) {
|
||||
return;
|
||||
}
|
||||
assert(vmem->dynamic_memslots);
|
||||
|
||||
/* Activate all involved memslots in a single transaction. */
|
||||
memory_region_transaction_begin();
|
||||
@@ -547,9 +545,7 @@ static void virtio_mem_deactivate_unplugged_memslots(VirtIOMEM *vmem,
|
||||
vmem->memslot_size;
|
||||
unsigned int idx;
|
||||
|
||||
if (!vmem->dynamic_memslots) {
|
||||
return;
|
||||
}
|
||||
assert(vmem->dynamic_memslots);
|
||||
|
||||
/* Deactivate all memslots with unplugged blocks in a single transaction. */
|
||||
memory_region_transaction_begin();
|
||||
@@ -598,7 +594,9 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa,
|
||||
virtio_mem_notify_unplug(vmem, offset, size);
|
||||
virtio_mem_set_range_unplugged(vmem, start_gpa, size);
|
||||
/* Deactivate completely unplugged memslots after updating the state. */
|
||||
virtio_mem_deactivate_unplugged_memslots(vmem, offset, size);
|
||||
if (vmem->dynamic_memslots) {
|
||||
virtio_mem_deactivate_unplugged_memslots(vmem, offset, size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -635,9 +633,11 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa,
|
||||
* blocks we are plugging here. The following notification will inform
|
||||
* registered listeners about the blocks we're plugging.
|
||||
*/
|
||||
virtio_mem_activate_memslots_to_plug(vmem, offset, size);
|
||||
if (vmem->dynamic_memslots) {
|
||||
virtio_mem_activate_memslots_to_plug(vmem, offset, size);
|
||||
}
|
||||
ret = virtio_mem_notify_plug(vmem, offset, size);
|
||||
if (ret) {
|
||||
if (ret && vmem->dynamic_memslots) {
|
||||
virtio_mem_deactivate_unplugged_memslots(vmem, offset, size);
|
||||
}
|
||||
}
|
||||
@@ -749,7 +749,9 @@ static int virtio_mem_unplug_all(VirtIOMEM *vmem)
|
||||
notifier_list_notify(&vmem->size_change_notifiers, &vmem->size);
|
||||
|
||||
/* Deactivate all memslots after updating the state. */
|
||||
virtio_mem_deactivate_unplugged_memslots(vmem, 0, region_size);
|
||||
if (vmem->dynamic_memslots) {
|
||||
virtio_mem_deactivate_unplugged_memslots(vmem, 0, region_size);
|
||||
}
|
||||
}
|
||||
|
||||
trace_virtio_mem_unplugged_all();
|
||||
|
||||
@@ -44,6 +44,14 @@ struct RAMBlock {
|
||||
size_t page_size;
|
||||
/* dirty bitmap used during migration */
|
||||
unsigned long *bmap;
|
||||
/* shadow dirty bitmap used when migrating to a file */
|
||||
unsigned long *shadow_bmap;
|
||||
/*
|
||||
* offset in the file pages belonging to this ramblock are saved,
|
||||
* used only during migration to a file.
|
||||
*/
|
||||
off_t bitmap_offset;
|
||||
uint64_t pages_offset;
|
||||
/* bitmap of already received pages in postcopy */
|
||||
unsigned long *receivedmap;
|
||||
|
||||
|
||||
@@ -71,13 +71,12 @@ struct TranslationBlock {
|
||||
#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
|
||||
#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
|
||||
#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
|
||||
#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
|
||||
#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
|
||||
#define CF_USE_ICOUNT 0x00020000
|
||||
#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
|
||||
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
|
||||
#define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */
|
||||
#define CF_PCREL 0x00200000 /* Opcodes in TB are PC-relative */
|
||||
#define CF_MEMI_ONLY 0x00001000 /* Only instrument memory ops */
|
||||
#define CF_USE_ICOUNT 0x00002000
|
||||
#define CF_INVALID 0x00004000 /* TB is stale. Set with @jmp_lock held */
|
||||
#define CF_PARALLEL 0x00008000 /* Generate code for a parallel context */
|
||||
#define CF_NOIRQ 0x00010000 /* Generate an uninterruptible TB */
|
||||
#define CF_PCREL 0x00020000 /* Opcodes in TB are PC-relative */
|
||||
#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
|
||||
#define CF_CLUSTER_SHIFT 24
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ enum QIOChannelFeature {
|
||||
QIO_CHANNEL_FEATURE_LISTEN,
|
||||
QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY,
|
||||
QIO_CHANNEL_FEATURE_READ_MSG_PEEK,
|
||||
QIO_CHANNEL_FEATURE_SEEKABLE,
|
||||
};
|
||||
|
||||
|
||||
@@ -130,6 +131,16 @@ struct QIOChannelClass {
|
||||
Error **errp);
|
||||
|
||||
/* Optional callbacks */
|
||||
ssize_t (*io_pwritev)(QIOChannel *ioc,
|
||||
const struct iovec *iov,
|
||||
size_t niov,
|
||||
off_t offset,
|
||||
Error **errp);
|
||||
ssize_t (*io_preadv)(QIOChannel *ioc,
|
||||
const struct iovec *iov,
|
||||
size_t niov,
|
||||
off_t offset,
|
||||
Error **errp);
|
||||
int (*io_shutdown)(QIOChannel *ioc,
|
||||
QIOChannelShutdown how,
|
||||
Error **errp);
|
||||
@@ -528,6 +539,104 @@ void qio_channel_set_follow_coroutine_ctx(QIOChannel *ioc, bool enabled);
|
||||
int qio_channel_close(QIOChannel *ioc,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* qio_channel_pwritev
|
||||
* @ioc: the channel object
|
||||
* @iov: the array of memory regions to write data from
|
||||
* @niov: the length of the @iov array
|
||||
* @offset: offset in the channel where writes should begin
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Not all implementations will support this facility, so may report
|
||||
* an error. To avoid errors, the caller may check for the feature
|
||||
* flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
|
||||
*
|
||||
* Behaves as qio_channel_writev_full, apart from not supporting
|
||||
* sending of file handles as well as beginning the write at the
|
||||
* passed @offset
|
||||
*
|
||||
*/
|
||||
ssize_t qio_channel_pwritev(QIOChannel *ioc, const struct iovec *iov,
|
||||
size_t niov, off_t offset, Error **errp);
|
||||
|
||||
/**
|
||||
* qio_channel_pwritev_all:
|
||||
* @ioc: the channel object
|
||||
* @iov: the array of memory regions to write data from
|
||||
* @niov: the length of the @iov array
|
||||
* @offset: the iovec offset in the file where to write the data
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Returns: 0 if all bytes were written, or -1 on error
|
||||
*/
|
||||
int qio_channel_pwritev_all(QIOChannel *ioc, const struct iovec *iov,
|
||||
size_t niov, off_t offset, Error **errp);
|
||||
|
||||
/**
|
||||
* qio_channel_pwrite
|
||||
* @ioc: the channel object
|
||||
* @buf: the memory region to write data into
|
||||
* @buflen: the number of bytes to @buf
|
||||
* @offset: offset in the channel where writes should begin
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Not all implementations will support this facility, so may report
|
||||
* an error. To avoid errors, the caller may check for the feature
|
||||
* flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
|
||||
*
|
||||
*/
|
||||
ssize_t qio_channel_pwrite(QIOChannel *ioc, char *buf, size_t buflen,
|
||||
off_t offset, Error **errp);
|
||||
|
||||
/**
|
||||
* qio_channel_preadv
|
||||
* @ioc: the channel object
|
||||
* @iov: the array of memory regions to read data into
|
||||
* @niov: the length of the @iov array
|
||||
* @offset: offset in the channel where writes should begin
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Not all implementations will support this facility, so may report
|
||||
* an error. To avoid errors, the caller may check for the feature
|
||||
* flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
|
||||
*
|
||||
* Behaves as qio_channel_readv_full, apart from not supporting
|
||||
* receiving of file handles as well as beginning the read at the
|
||||
* passed @offset
|
||||
*
|
||||
*/
|
||||
ssize_t qio_channel_preadv(QIOChannel *ioc, const struct iovec *iov,
|
||||
size_t niov, off_t offset, Error **errp);
|
||||
|
||||
/**
|
||||
* qio_channel_preadv_all:
|
||||
* @ioc: the channel object
|
||||
* @iov: the array of memory regions to read data to
|
||||
* @niov: the length of the @iov array
|
||||
* @offset: the iovec offset in the file from where to read the data
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Returns: 0 if all bytes were read, or -1 on error
|
||||
*/
|
||||
int qio_channel_preadv_all(QIOChannel *ioc, const struct iovec *iov,
|
||||
size_t niov, off_t offset, Error **errp);
|
||||
|
||||
/**
|
||||
* qio_channel_pread
|
||||
* @ioc: the channel object
|
||||
* @buf: the memory region to write data into
|
||||
* @buflen: the number of bytes to @buf
|
||||
* @offset: offset in the channel where writes should begin
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Not all implementations will support this facility, so may report
|
||||
* an error. To avoid errors, the caller may check for the feature
|
||||
* flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
|
||||
*
|
||||
*/
|
||||
ssize_t qio_channel_pread(QIOChannel *ioc, char *buf, size_t buflen,
|
||||
off_t offset, Error **errp);
|
||||
|
||||
/**
|
||||
* qio_channel_shutdown:
|
||||
* @ioc: the channel object
|
||||
|
||||
@@ -50,6 +50,8 @@ unsigned int qemu_get_be16(QEMUFile *f);
|
||||
unsigned int qemu_get_be32(QEMUFile *f);
|
||||
uint64_t qemu_get_be64(QEMUFile *f);
|
||||
|
||||
bool qemu_file_is_seekable(QEMUFile *f);
|
||||
|
||||
static inline void qemu_put_be64s(QEMUFile *f, const uint64_t *pv)
|
||||
{
|
||||
qemu_put_be64(f, *pv);
|
||||
|
||||
@@ -67,6 +67,19 @@ static inline void clear_bit(long nr, unsigned long *addr)
|
||||
*p &= ~mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_bit_atomic - Clears a bit in memory atomically
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static inline void clear_bit_atomic(long nr, unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
|
||||
return qatomic_and(p, ~mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* change_bit - Toggle a bit in memory
|
||||
* @nr: Bit to change
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
*/
|
||||
#if defined(CONFIG_INT128) && !defined(CONFIG_TCG_INTERPRETER)
|
||||
typedef __int128_t Int128;
|
||||
typedef __int128_t __attribute__((aligned(16))) Int128Aligned;
|
||||
|
||||
static inline Int128 int128_make64(uint64_t a)
|
||||
{
|
||||
@@ -224,6 +225,7 @@ static inline Int128 int128_rems(Int128 a, Int128 b)
|
||||
#else /* !CONFIG_INT128 */
|
||||
|
||||
typedef struct Int128 Int128;
|
||||
typedef struct Int128 __attribute__((aligned(16))) Int128Aligned;
|
||||
|
||||
/*
|
||||
* We guarantee that the in-memory byte representation of an
|
||||
|
||||
@@ -597,6 +597,8 @@ int qemu_lock_fd_test(int fd, int64_t start, int64_t len, bool exclusive);
|
||||
bool qemu_has_ofd_lock(void);
|
||||
#endif
|
||||
|
||||
bool qemu_has_direct_io(void);
|
||||
|
||||
#if defined(__HAIKU__) && defined(__i386__)
|
||||
#define FMT_pid "%ld"
|
||||
#elif defined(WIN64)
|
||||
|
||||
@@ -24,6 +24,7 @@ typedef struct ArchDumpInfo {
|
||||
void (*arch_sections_add_fn)(DumpState *s);
|
||||
uint64_t (*arch_sections_write_hdr_fn)(DumpState *s, uint8_t *buff);
|
||||
int (*arch_sections_write_fn)(DumpState *s, uint8_t *buff);
|
||||
void (*arch_cleanup_fn)(DumpState *s);
|
||||
} ArchDumpInfo;
|
||||
|
||||
struct GuestPhysBlockList; /* memory_mapping.h */
|
||||
|
||||
@@ -36,6 +36,10 @@ qio_channel_file_new_fd(int fd)
|
||||
|
||||
ioc->fd = fd;
|
||||
|
||||
if (lseek(fd, 0, SEEK_CUR) != (off_t)-1) {
|
||||
qio_channel_set_feature(QIO_CHANNEL(ioc), QIO_CHANNEL_FEATURE_SEEKABLE);
|
||||
}
|
||||
|
||||
trace_qio_channel_file_new_fd(ioc, fd);
|
||||
|
||||
return ioc;
|
||||
@@ -60,6 +64,10 @@ qio_channel_file_new_path(const char *path,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (lseek(ioc->fd, 0, SEEK_CUR) != (off_t)-1) {
|
||||
qio_channel_set_feature(QIO_CHANNEL(ioc), QIO_CHANNEL_FEATURE_SEEKABLE);
|
||||
}
|
||||
|
||||
trace_qio_channel_file_new_path(ioc, path, flags, mode, ioc->fd);
|
||||
|
||||
return ioc;
|
||||
@@ -138,6 +146,58 @@ static ssize_t qio_channel_file_writev(QIOChannel *ioc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREADV
|
||||
static ssize_t qio_channel_file_preadv(QIOChannel *ioc,
|
||||
const struct iovec *iov,
|
||||
size_t niov,
|
||||
off_t offset,
|
||||
Error **errp)
|
||||
{
|
||||
QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc);
|
||||
ssize_t ret;
|
||||
|
||||
retry:
|
||||
ret = preadv(fioc->fd, iov, niov, offset);
|
||||
if (ret < 0) {
|
||||
if (errno == EAGAIN) {
|
||||
return QIO_CHANNEL_ERR_BLOCK;
|
||||
}
|
||||
if (errno == EINTR) {
|
||||
goto retry;
|
||||
}
|
||||
|
||||
error_setg_errno(errp, errno, "Unable to read from file");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t qio_channel_file_pwritev(QIOChannel *ioc,
|
||||
const struct iovec *iov,
|
||||
size_t niov,
|
||||
off_t offset,
|
||||
Error **errp)
|
||||
{
|
||||
QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc);
|
||||
ssize_t ret;
|
||||
|
||||
retry:
|
||||
ret = pwritev(fioc->fd, iov, niov, offset);
|
||||
if (ret <= 0) {
|
||||
if (errno == EAGAIN) {
|
||||
return QIO_CHANNEL_ERR_BLOCK;
|
||||
}
|
||||
if (errno == EINTR) {
|
||||
goto retry;
|
||||
}
|
||||
error_setg_errno(errp, errno, "Unable to write to file");
|
||||
return -1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_PREADV */
|
||||
|
||||
static int qio_channel_file_set_blocking(QIOChannel *ioc,
|
||||
bool enabled,
|
||||
Error **errp)
|
||||
@@ -182,6 +242,11 @@ static int qio_channel_file_close(QIOChannel *ioc,
|
||||
{
|
||||
QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc);
|
||||
|
||||
if (qemu_fdatasync(fioc->fd) < 0) {
|
||||
error_setg_errno(errp, errno,
|
||||
"Unable to synchronize file data with storage device");
|
||||
return -1;
|
||||
}
|
||||
if (qemu_close(fioc->fd) < 0) {
|
||||
error_setg_errno(errp, errno,
|
||||
"Unable to close file");
|
||||
@@ -223,6 +288,10 @@ static void qio_channel_file_class_init(ObjectClass *klass,
|
||||
ioc_klass->io_writev = qio_channel_file_writev;
|
||||
ioc_klass->io_readv = qio_channel_file_readv;
|
||||
ioc_klass->io_set_blocking = qio_channel_file_set_blocking;
|
||||
#ifdef CONFIG_PREADV
|
||||
ioc_klass->io_pwritev = qio_channel_file_pwritev;
|
||||
ioc_klass->io_preadv = qio_channel_file_preadv;
|
||||
#endif
|
||||
ioc_klass->io_seek = qio_channel_file_seek;
|
||||
ioc_klass->io_close = qio_channel_file_close;
|
||||
ioc_klass->io_create_watch = qio_channel_file_create_watch;
|
||||
|
||||
128
io/channel.c
128
io/channel.c
@@ -454,6 +454,134 @@ GSource *qio_channel_add_watch_source(QIOChannel *ioc,
|
||||
}
|
||||
|
||||
|
||||
ssize_t qio_channel_pwritev(QIOChannel *ioc, const struct iovec *iov,
|
||||
size_t niov, off_t offset, Error **errp)
|
||||
{
|
||||
QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
|
||||
|
||||
if (!klass->io_pwritev) {
|
||||
error_setg(errp, "Channel does not support pwritev");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_SEEKABLE)) {
|
||||
error_setg_errno(errp, EINVAL, "Requested channel is not seekable");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return klass->io_pwritev(ioc, iov, niov, offset, errp);
|
||||
}
|
||||
|
||||
static int qio_channel_preadv_pwritev_contiguous(QIOChannel *ioc,
|
||||
const struct iovec *iov,
|
||||
size_t niov, off_t offset,
|
||||
bool is_write, Error **errp)
|
||||
{
|
||||
ssize_t ret = -1;
|
||||
int i, slice_idx, slice_num;
|
||||
uintptr_t base, next, file_offset;
|
||||
size_t len;
|
||||
|
||||
slice_idx = 0;
|
||||
slice_num = 1;
|
||||
|
||||
/*
|
||||
* If the iov array doesn't have contiguous elements, we need to
|
||||
* split it in slices because we only have one (file) 'offset' for
|
||||
* the whole iov. Do this here so callers don't need to break the
|
||||
* iov array themselves.
|
||||
*/
|
||||
for (i = 0; i < niov; i++, slice_num++) {
|
||||
base = (uintptr_t) iov[i].iov_base;
|
||||
|
||||
if (i != niov - 1) {
|
||||
len = iov[i].iov_len;
|
||||
next = (uintptr_t) iov[i + 1].iov_base;
|
||||
|
||||
if (base + len == next) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the offset of the first element of the segment that
|
||||
* we're sending.
|
||||
*/
|
||||
file_offset = offset + (uintptr_t) iov[slice_idx].iov_base;
|
||||
|
||||
if (is_write) {
|
||||
ret = qio_channel_pwritev(ioc, &iov[slice_idx], slice_num,
|
||||
file_offset, errp);
|
||||
} else {
|
||||
ret = qio_channel_preadv(ioc, &iov[slice_idx], slice_num,
|
||||
file_offset, errp);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
slice_idx += slice_num;
|
||||
slice_num = 0;
|
||||
}
|
||||
|
||||
return (ret < 0) ? -1 : 0;
|
||||
}
|
||||
|
||||
int qio_channel_pwritev_all(QIOChannel *ioc, const struct iovec *iov,
|
||||
size_t niov, off_t offset, Error **errp)
|
||||
{
|
||||
return qio_channel_preadv_pwritev_contiguous(ioc, iov, niov,
|
||||
offset, true, errp);
|
||||
}
|
||||
|
||||
ssize_t qio_channel_pwrite(QIOChannel *ioc, char *buf, size_t buflen,
|
||||
off_t offset, Error **errp)
|
||||
{
|
||||
struct iovec iov = {
|
||||
.iov_base = buf,
|
||||
.iov_len = buflen
|
||||
};
|
||||
|
||||
return qio_channel_pwritev(ioc, &iov, 1, offset, errp);
|
||||
}
|
||||
|
||||
ssize_t qio_channel_preadv(QIOChannel *ioc, const struct iovec *iov,
|
||||
size_t niov, off_t offset, Error **errp)
|
||||
{
|
||||
QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
|
||||
|
||||
if (!klass->io_preadv) {
|
||||
error_setg(errp, "Channel does not support preadv");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_SEEKABLE)) {
|
||||
error_setg_errno(errp, EINVAL, "Requested channel is not seekable");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return klass->io_preadv(ioc, iov, niov, offset, errp);
|
||||
}
|
||||
|
||||
int qio_channel_preadv_all(QIOChannel *ioc, const struct iovec *iov,
|
||||
size_t niov, off_t offset, Error **errp)
|
||||
{
|
||||
return qio_channel_preadv_pwritev_contiguous(ioc, iov, niov,
|
||||
offset, false, errp);
|
||||
}
|
||||
|
||||
ssize_t qio_channel_pread(QIOChannel *ioc, char *buf, size_t buflen,
|
||||
off_t offset, Error **errp)
|
||||
{
|
||||
struct iovec iov = {
|
||||
.iov_base = buf,
|
||||
.iov_len = buflen
|
||||
};
|
||||
|
||||
return qio_channel_preadv(ioc, &iov, 1, offset, errp);
|
||||
}
|
||||
|
||||
int qio_channel_shutdown(QIOChannel *ioc,
|
||||
QIOChannelShutdown how,
|
||||
Error **errp)
|
||||
|
||||
@@ -462,6 +462,7 @@ warn_flags = [
|
||||
'-Wno-tautological-type-limit-compare',
|
||||
'-Wno-psabi',
|
||||
'-Wno-gnu-variable-sized-type-not-at-end',
|
||||
'-Wshadow=local',
|
||||
]
|
||||
|
||||
if targetos != 'darwin'
|
||||
|
||||
192
migration/file.c
192
migration/file.c
@@ -6,17 +6,26 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/qapi-commands-misc.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "channel.h"
|
||||
#include "file.h"
|
||||
#include "migration.h"
|
||||
#include "io/channel-file.h"
|
||||
#include "io/channel-util.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "options.h"
|
||||
#include "trace.h"
|
||||
|
||||
#define OFFSET_OPTION ",offset="
|
||||
|
||||
static struct FileOutgoingArgs {
|
||||
char *fname;
|
||||
int64_t fdset_id;
|
||||
} outgoing_args;
|
||||
|
||||
/* Remove the offset option from @filespec and return it in @offsetp. */
|
||||
|
||||
int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp)
|
||||
@@ -36,6 +45,139 @@ int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the open flags and file status flags from the file descriptors
|
||||
* in the fdset don't match what QEMU expects, errno gets set to
|
||||
* EACCES. Let's provide a more user-friendly message.
|
||||
*/
|
||||
static void file_fdset_error(int flags, Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
|
||||
if (errno == EACCES) {
|
||||
/* ditch the previous error */
|
||||
error_free(*errp);
|
||||
*errp = NULL;
|
||||
|
||||
error_setg(errp, "Fdset is missing a file descriptor with flags: 0x%x",
|
||||
flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void file_remove_fdset(void)
|
||||
{
|
||||
if (outgoing_args.fdset_id != -1) {
|
||||
qmp_remove_fd(outgoing_args.fdset_id, false, -1, NULL);
|
||||
outgoing_args.fdset_id = -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Due to the behavior of the dup() system call, we need the fdset to
|
||||
* have two non-duplicate fds so we can enable direct IO in the
|
||||
* secondary channels without affecting the main channel.
|
||||
*/
|
||||
static bool file_parse_fdset(const char *filename, int64_t *fdset_id,
|
||||
Error **errp)
|
||||
{
|
||||
FdsetInfoList *fds_info;
|
||||
FdsetFdInfoList *fd_info;
|
||||
const char *fdset_id_str;
|
||||
int nfds = 0;
|
||||
|
||||
*fdset_id = -1;
|
||||
|
||||
if (!strstart(filename, "/dev/fdset/", &fdset_id_str)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!migrate_multifd()) {
|
||||
error_setg(errp, "fdset is only supported with multifd");
|
||||
return false;
|
||||
}
|
||||
|
||||
*fdset_id = qemu_parse_fd(fdset_id_str);
|
||||
|
||||
for (fds_info = qmp_query_fdsets(NULL); fds_info;
|
||||
fds_info = fds_info->next) {
|
||||
|
||||
if (*fdset_id != fds_info->value->fdset_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (fd_info = fds_info->value->fds; fd_info; fd_info = fd_info->next) {
|
||||
if (nfds++ > 2) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (nfds != 2) {
|
||||
error_setg(errp, "Outgoing migration needs two fds in the fdset, "
|
||||
"got %d", nfds);
|
||||
qmp_remove_fd(*fdset_id, false, -1, NULL);
|
||||
*fdset_id = -1;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void qio_channel_file_connect_worker(QIOTask *task, gpointer opaque)
|
||||
{
|
||||
/* noop */
|
||||
}
|
||||
|
||||
int file_send_channel_destroy(QIOChannel *ioc)
|
||||
{
|
||||
if (ioc) {
|
||||
qio_channel_close(ioc, NULL);
|
||||
object_unref(OBJECT(ioc));
|
||||
}
|
||||
g_free(outgoing_args.fname);
|
||||
outgoing_args.fname = NULL;
|
||||
|
||||
file_remove_fdset();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void file_send_channel_create(QIOTaskFunc f, void *data)
|
||||
{
|
||||
QIOChannelFile *ioc = NULL;
|
||||
QIOTask *task;
|
||||
Error *err = NULL;
|
||||
int flags = O_WRONLY;
|
||||
|
||||
if (migrate_direct_io()) {
|
||||
#ifdef O_DIRECT
|
||||
/*
|
||||
* Enable O_DIRECT for the secondary channels. These are used
|
||||
* for sending ram pages and writes should be guaranteed to be
|
||||
* aligned to at least page size.
|
||||
*/
|
||||
flags |= O_DIRECT;
|
||||
#else
|
||||
error_setg(&err, "System does not support O_DIRECT");
|
||||
error_append_hint(&err,
|
||||
"Try disabling direct-io migration capability\n");
|
||||
/* errors are propagated through the qio_task below */
|
||||
#endif
|
||||
}
|
||||
|
||||
if (!err) {
|
||||
ioc = qio_channel_file_new_path(outgoing_args.fname, flags, 0, &err);
|
||||
}
|
||||
|
||||
task = qio_task_new(OBJECT(ioc), f, (gpointer)data, NULL);
|
||||
if (!ioc) {
|
||||
file_fdset_error(flags, &err);
|
||||
qio_task_set_error(task, err);
|
||||
}
|
||||
|
||||
qio_task_run_in_thread(task, qio_channel_file_connect_worker,
|
||||
(gpointer)data, NULL, NULL);
|
||||
}
|
||||
|
||||
void file_start_outgoing_migration(MigrationState *s,
|
||||
FileMigrationArgs *file_args, Error **errp)
|
||||
{
|
||||
@@ -43,12 +185,20 @@ void file_start_outgoing_migration(MigrationState *s,
|
||||
g_autofree char *filename = g_strdup(file_args->filename);
|
||||
uint64_t offset = file_args->offset;
|
||||
QIOChannel *ioc;
|
||||
int flags = O_CREAT | O_TRUNC | O_WRONLY;
|
||||
mode_t mode = 0660;
|
||||
|
||||
trace_migration_file_outgoing(filename);
|
||||
|
||||
fioc = qio_channel_file_new_path(filename, O_CREAT | O_WRONLY | O_TRUNC,
|
||||
0600, errp);
|
||||
if (!file_parse_fdset(filename, &outgoing_args.fdset_id, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
outgoing_args.fname = g_strdup(filename);
|
||||
|
||||
fioc = qio_channel_file_new_path(filename, flags, mode, errp);
|
||||
if (!fioc) {
|
||||
file_fdset_error(flags, errp);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -74,22 +224,40 @@ void file_start_incoming_migration(FileMigrationArgs *file_args, Error **errp)
|
||||
g_autofree char *filename = g_strdup(file_args->filename);
|
||||
QIOChannelFile *fioc = NULL;
|
||||
uint64_t offset = file_args->offset;
|
||||
QIOChannel *ioc;
|
||||
int channels = 1;
|
||||
int i = 0, fd, flags = O_RDONLY;
|
||||
|
||||
trace_migration_file_incoming(filename);
|
||||
|
||||
fioc = qio_channel_file_new_path(filename, O_RDONLY, 0, errp);
|
||||
fioc = qio_channel_file_new_path(filename, flags, 0, errp);
|
||||
if (!fioc) {
|
||||
file_fdset_error(flags, errp);
|
||||
return;
|
||||
}
|
||||
|
||||
ioc = QIO_CHANNEL(fioc);
|
||||
if (offset && qio_channel_io_seek(ioc, offset, SEEK_SET, errp) < 0) {
|
||||
if (offset &&
|
||||
qio_channel_io_seek(QIO_CHANNEL(fioc), offset, SEEK_SET, errp) < 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (migrate_multifd()) {
|
||||
channels += migrate_multifd_channels();
|
||||
}
|
||||
|
||||
fd = fioc->fd;
|
||||
|
||||
do {
|
||||
QIOChannel *ioc = QIO_CHANNEL(fioc);
|
||||
|
||||
qio_channel_set_name(ioc, "migration-file-incoming");
|
||||
qio_channel_add_watch_full(ioc, G_IO_IN,
|
||||
file_accept_incoming_migration,
|
||||
NULL, NULL,
|
||||
g_main_context_get_thread_default());
|
||||
} while (++i < channels && (fioc = qio_channel_file_new_fd(fd)));
|
||||
|
||||
if (!fioc) {
|
||||
error_setg(errp, "Error creating migration incoming channel");
|
||||
return;
|
||||
}
|
||||
qio_channel_set_name(QIO_CHANNEL(ioc), "migration-file-incoming");
|
||||
qio_channel_add_watch_full(ioc, G_IO_IN,
|
||||
file_accept_incoming_migration,
|
||||
NULL, NULL,
|
||||
g_main_context_get_thread_default());
|
||||
}
|
||||
|
||||
@@ -9,10 +9,15 @@
|
||||
#define QEMU_MIGRATION_FILE_H
|
||||
|
||||
#include "qapi/qapi-types-migration.h"
|
||||
#include "io/task.h"
|
||||
#include "channel.h"
|
||||
|
||||
void file_start_incoming_migration(FileMigrationArgs *file_args, Error **errp);
|
||||
|
||||
void file_start_outgoing_migration(MigrationState *s,
|
||||
FileMigrationArgs *file_args, Error **errp);
|
||||
int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp);
|
||||
|
||||
void file_send_channel_create(QIOTaskFunc f, void *data);
|
||||
int file_send_channel_destroy(QIOChannel *ioc);
|
||||
#endif
|
||||
|
||||
@@ -392,6 +392,13 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict)
|
||||
monitor_printf(mon, "%s: %s\n",
|
||||
MigrationParameter_str(MIGRATION_PARAMETER_MODE),
|
||||
qapi_enum_lookup(&MigMode_lookup, params->mode));
|
||||
|
||||
if (params->has_direct_io) {
|
||||
monitor_printf(mon, "%s: %s\n",
|
||||
MigrationParameter_str(
|
||||
MIGRATION_PARAMETER_DIRECT_IO),
|
||||
params->direct_io ? "on" : "off");
|
||||
}
|
||||
}
|
||||
|
||||
qapi_free_MigrationParameters(params);
|
||||
@@ -679,6 +686,10 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict)
|
||||
p->has_mode = true;
|
||||
visit_type_MigMode(v, param, &p->mode, &err);
|
||||
break;
|
||||
case MIGRATION_PARAMETER_DIRECT_IO:
|
||||
p->has_direct_io = true;
|
||||
visit_type_bool(v, param, &p->direct_io, &err);
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
}
|
||||
|
||||
@@ -128,20 +128,43 @@ static bool migration_needs_multiple_sockets(void)
|
||||
return migrate_multifd() || migrate_postcopy_preempt();
|
||||
}
|
||||
|
||||
static bool transport_supports_multi_channels(SocketAddress *saddr)
|
||||
static bool transport_supports_multi_channels(MigrationAddress *addr)
|
||||
{
|
||||
return saddr->type == SOCKET_ADDRESS_TYPE_INET ||
|
||||
saddr->type == SOCKET_ADDRESS_TYPE_UNIX ||
|
||||
saddr->type == SOCKET_ADDRESS_TYPE_VSOCK;
|
||||
if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) {
|
||||
SocketAddress *saddr = &addr->u.socket;
|
||||
|
||||
return (saddr->type == SOCKET_ADDRESS_TYPE_INET ||
|
||||
saddr->type == SOCKET_ADDRESS_TYPE_UNIX ||
|
||||
saddr->type == SOCKET_ADDRESS_TYPE_VSOCK);
|
||||
} else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) {
|
||||
return migrate_fixed_ram();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool migration_needs_seekable_channel(void)
|
||||
{
|
||||
return migrate_fixed_ram();
|
||||
}
|
||||
|
||||
static bool transport_supports_seeking(MigrationAddress *addr)
|
||||
{
|
||||
return addr->transport == MIGRATION_ADDRESS_TYPE_FILE;
|
||||
}
|
||||
|
||||
static bool
|
||||
migration_channels_and_transport_compatible(MigrationAddress *addr,
|
||||
Error **errp)
|
||||
{
|
||||
if (migration_needs_seekable_channel() &&
|
||||
!transport_supports_seeking(addr)) {
|
||||
error_setg(errp, "Migration requires seekable transport (e.g. file)");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (migration_needs_multiple_sockets() &&
|
||||
(addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) &&
|
||||
!transport_supports_multi_channels(&addr->u.socket)) {
|
||||
!transport_supports_multi_channels(addr)) {
|
||||
error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)");
|
||||
return false;
|
||||
}
|
||||
@@ -698,6 +721,13 @@ process_incoming_migration_co(void *opaque)
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
MigrationState *s = migrate_get_current();
|
||||
|
||||
if (migrate_has_error(s)) {
|
||||
WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
|
||||
error_report_err(s->error);
|
||||
}
|
||||
}
|
||||
error_report("load of migration failed: %s", strerror(-ret));
|
||||
goto fail;
|
||||
}
|
||||
@@ -822,7 +852,8 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
|
||||
uint32_t channel_magic = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (migrate_multifd() && !migrate_postcopy_ram() &&
|
||||
if (migrate_multifd() && migrate_multifd_packets() &&
|
||||
!migrate_postcopy_ram() &&
|
||||
qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) {
|
||||
/*
|
||||
* With multiple channels, it is possible that we receive channels
|
||||
|
||||
@@ -69,7 +69,7 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp)
|
||||
err_msg = "out of memory for buf";
|
||||
goto err_free_zbuff;
|
||||
}
|
||||
p->data = z;
|
||||
p->compress_data = z;
|
||||
return 0;
|
||||
|
||||
err_free_zbuff:
|
||||
@@ -92,15 +92,15 @@ err_free_z:
|
||||
*/
|
||||
static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp)
|
||||
{
|
||||
struct zlib_data *z = p->data;
|
||||
struct zlib_data *z = p->compress_data;
|
||||
|
||||
deflateEnd(&z->zs);
|
||||
g_free(z->zbuff);
|
||||
z->zbuff = NULL;
|
||||
g_free(z->buf);
|
||||
z->buf = NULL;
|
||||
g_free(p->data);
|
||||
p->data = NULL;
|
||||
g_free(p->compress_data);
|
||||
p->compress_data = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -116,7 +116,7 @@ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp)
|
||||
*/
|
||||
static int zlib_send_prepare(MultiFDSendParams *p, Error **errp)
|
||||
{
|
||||
struct zlib_data *z = p->data;
|
||||
struct zlib_data *z = p->compress_data;
|
||||
z_stream *zs = &z->zs;
|
||||
uint32_t out_size = 0;
|
||||
int ret;
|
||||
@@ -189,7 +189,7 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp)
|
||||
struct zlib_data *z = g_new0(struct zlib_data, 1);
|
||||
z_stream *zs = &z->zs;
|
||||
|
||||
p->data = z;
|
||||
p->compress_data = z;
|
||||
zs->zalloc = Z_NULL;
|
||||
zs->zfree = Z_NULL;
|
||||
zs->opaque = Z_NULL;
|
||||
@@ -219,13 +219,13 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp)
|
||||
*/
|
||||
static void zlib_recv_cleanup(MultiFDRecvParams *p)
|
||||
{
|
||||
struct zlib_data *z = p->data;
|
||||
struct zlib_data *z = p->compress_data;
|
||||
|
||||
inflateEnd(&z->zs);
|
||||
g_free(z->zbuff);
|
||||
z->zbuff = NULL;
|
||||
g_free(p->data);
|
||||
p->data = NULL;
|
||||
g_free(p->compress_data);
|
||||
p->compress_data = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -241,7 +241,7 @@ static void zlib_recv_cleanup(MultiFDRecvParams *p)
|
||||
*/
|
||||
static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp)
|
||||
{
|
||||
struct zlib_data *z = p->data;
|
||||
struct zlib_data *z = p->compress_data;
|
||||
z_stream *zs = &z->zs;
|
||||
uint32_t in_size = p->next_packet_size;
|
||||
/* we measure the change of total_out */
|
||||
@@ -314,7 +314,7 @@ static MultiFDMethods multifd_zlib_ops = {
|
||||
.send_prepare = zlib_send_prepare,
|
||||
.recv_setup = zlib_recv_setup,
|
||||
.recv_cleanup = zlib_recv_cleanup,
|
||||
.recv_pages = zlib_recv_pages
|
||||
.recv_data = zlib_recv_pages
|
||||
};
|
||||
|
||||
static void multifd_zlib_register(void)
|
||||
|
||||
@@ -52,7 +52,7 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp)
|
||||
struct zstd_data *z = g_new0(struct zstd_data, 1);
|
||||
int res;
|
||||
|
||||
p->data = z;
|
||||
p->compress_data = z;
|
||||
z->zcs = ZSTD_createCStream();
|
||||
if (!z->zcs) {
|
||||
g_free(z);
|
||||
@@ -90,14 +90,14 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp)
|
||||
*/
|
||||
static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp)
|
||||
{
|
||||
struct zstd_data *z = p->data;
|
||||
struct zstd_data *z = p->compress_data;
|
||||
|
||||
ZSTD_freeCStream(z->zcs);
|
||||
z->zcs = NULL;
|
||||
g_free(z->zbuff);
|
||||
z->zbuff = NULL;
|
||||
g_free(p->data);
|
||||
p->data = NULL;
|
||||
g_free(p->compress_data);
|
||||
p->compress_data = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -113,7 +113,7 @@ static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp)
|
||||
*/
|
||||
static int zstd_send_prepare(MultiFDSendParams *p, Error **errp)
|
||||
{
|
||||
struct zstd_data *z = p->data;
|
||||
struct zstd_data *z = p->compress_data;
|
||||
int ret;
|
||||
uint32_t i;
|
||||
|
||||
@@ -178,7 +178,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
|
||||
struct zstd_data *z = g_new0(struct zstd_data, 1);
|
||||
int ret;
|
||||
|
||||
p->data = z;
|
||||
p->compress_data = z;
|
||||
z->zds = ZSTD_createDStream();
|
||||
if (!z->zds) {
|
||||
g_free(z);
|
||||
@@ -216,14 +216,14 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
|
||||
*/
|
||||
static void zstd_recv_cleanup(MultiFDRecvParams *p)
|
||||
{
|
||||
struct zstd_data *z = p->data;
|
||||
struct zstd_data *z = p->compress_data;
|
||||
|
||||
ZSTD_freeDStream(z->zds);
|
||||
z->zds = NULL;
|
||||
g_free(z->zbuff);
|
||||
z->zbuff = NULL;
|
||||
g_free(p->data);
|
||||
p->data = NULL;
|
||||
g_free(p->compress_data);
|
||||
p->compress_data = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -243,7 +243,7 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp)
|
||||
uint32_t out_size = 0;
|
||||
uint32_t expected_size = p->normal_num * p->page_size;
|
||||
uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
|
||||
struct zstd_data *z = p->data;
|
||||
struct zstd_data *z = p->compress_data;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
@@ -305,7 +305,7 @@ static MultiFDMethods multifd_zstd_ops = {
|
||||
.send_prepare = zstd_send_prepare,
|
||||
.recv_setup = zstd_recv_setup,
|
||||
.recv_cleanup = zstd_recv_cleanup,
|
||||
.recv_pages = zstd_recv_pages
|
||||
.recv_data = zstd_recv_pages
|
||||
};
|
||||
|
||||
static void multifd_zstd_register(void)
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include "exec/ramblock.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qapi/error.h"
|
||||
#include "file.h"
|
||||
#include "ram.h"
|
||||
#include "migration.h"
|
||||
#include "migration-stats.h"
|
||||
@@ -28,6 +29,7 @@
|
||||
#include "threadinfo.h"
|
||||
#include "options.h"
|
||||
#include "qemu/yank.h"
|
||||
#include "io/channel-file.h"
|
||||
#include "io/channel-socket.h"
|
||||
#include "yank_functions.h"
|
||||
|
||||
@@ -128,7 +130,7 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p)
|
||||
}
|
||||
|
||||
/**
|
||||
* nocomp_recv_pages: read the data from the channel into actual pages
|
||||
* nocomp_recv_data: read the data from the channel
|
||||
*
|
||||
* For no compression we just need to read things into the correct place.
|
||||
*
|
||||
@@ -137,20 +139,39 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p)
|
||||
* @p: Params for the channel that we are using
|
||||
* @errp: pointer to an error
|
||||
*/
|
||||
static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp)
|
||||
static int nocomp_recv_data(MultiFDRecvParams *p, Error **errp)
|
||||
{
|
||||
uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
|
||||
ERRP_GUARD();
|
||||
|
||||
if (flags != MULTIFD_FLAG_NOCOMP) {
|
||||
error_setg(errp, "multifd %u: flags received %x flags expected %x",
|
||||
p->id, flags, MULTIFD_FLAG_NOCOMP);
|
||||
return -1;
|
||||
}
|
||||
for (int i = 0; i < p->normal_num; i++) {
|
||||
p->iov[i].iov_base = p->host + p->normal[i];
|
||||
p->iov[i].iov_len = p->page_size;
|
||||
|
||||
if (!migrate_multifd_packets()) {
|
||||
MultiFDRecvData *data = p->data;
|
||||
size_t ret;
|
||||
|
||||
ret = qio_channel_pread(p->c, (char *) data->opaque,
|
||||
data->size, data->file_offset, errp);
|
||||
if (ret != data->size) {
|
||||
error_prepend(errp,
|
||||
"multifd recv (%u): read 0x%zx, expected 0x%zx",
|
||||
p->id, ret, data->size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
for (int i = 0; i < p->normal_num; i++) {
|
||||
p->iov[i].iov_base = p->host + p->normal[i];
|
||||
p->iov[i].iov_len = p->page_size;
|
||||
}
|
||||
|
||||
return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp);
|
||||
}
|
||||
return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp);
|
||||
}
|
||||
|
||||
static MultiFDMethods multifd_nocomp_ops = {
|
||||
@@ -159,7 +180,7 @@ static MultiFDMethods multifd_nocomp_ops = {
|
||||
.send_prepare = nocomp_send_prepare,
|
||||
.recv_setup = nocomp_recv_setup,
|
||||
.recv_cleanup = nocomp_recv_cleanup,
|
||||
.recv_pages = nocomp_recv_pages
|
||||
.recv_data = nocomp_recv_data
|
||||
};
|
||||
|
||||
static MultiFDMethods *multifd_ops[MULTIFD_COMPRESSION__MAX] = {
|
||||
@@ -236,12 +257,12 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
|
||||
return msg.id;
|
||||
}
|
||||
|
||||
static MultiFDPages_t *multifd_pages_init(size_t size)
|
||||
static MultiFDPages_t *multifd_pages_init(uint32_t n)
|
||||
{
|
||||
MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
|
||||
|
||||
pages->allocated = size;
|
||||
pages->offset = g_new0(ram_addr_t, size);
|
||||
pages->allocated = n;
|
||||
pages->offset = g_new0(ram_addr_t, n);
|
||||
|
||||
return pages;
|
||||
}
|
||||
@@ -250,13 +271,23 @@ static void multifd_pages_clear(MultiFDPages_t *pages)
|
||||
{
|
||||
pages->num = 0;
|
||||
pages->allocated = 0;
|
||||
pages->packet_num = 0;
|
||||
pages->block = NULL;
|
||||
g_free(pages->offset);
|
||||
pages->offset = NULL;
|
||||
g_free(pages);
|
||||
}
|
||||
|
||||
static void multifd_set_file_bitmap(MultiFDSendParams *p)
|
||||
{
|
||||
MultiFDPages_t *pages = p->pages;
|
||||
|
||||
assert(pages->block);
|
||||
|
||||
for (int i = 0; i < p->normal_num; i++) {
|
||||
ramblock_set_shadow_bmap_atomic(pages->block, pages->offset[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void multifd_send_fill_packet(MultiFDSendParams *p)
|
||||
{
|
||||
MultiFDPacket_t *packet = p->packet;
|
||||
@@ -391,7 +422,7 @@ struct {
|
||||
* false.
|
||||
*/
|
||||
|
||||
static int multifd_send_pages(QEMUFile *f)
|
||||
static int multifd_send_pages(void)
|
||||
{
|
||||
int i;
|
||||
static int next_channel;
|
||||
@@ -403,6 +434,7 @@ static int multifd_send_pages(QEMUFile *f)
|
||||
}
|
||||
|
||||
qemu_sem_wait(&multifd_send_state->channels_ready);
|
||||
|
||||
/*
|
||||
* next_channel can remain from a previous migration that was
|
||||
* using more channels, so ensure it doesn't overflow if the
|
||||
@@ -437,7 +469,7 @@ static int multifd_send_pages(QEMUFile *f)
|
||||
return 1;
|
||||
}
|
||||
|
||||
int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
|
||||
int multifd_queue_page(RAMBlock *block, ram_addr_t offset)
|
||||
{
|
||||
MultiFDPages_t *pages = multifd_send_state->pages;
|
||||
bool changed = false;
|
||||
@@ -457,12 +489,12 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
|
||||
changed = true;
|
||||
}
|
||||
|
||||
if (multifd_send_pages(f) < 0) {
|
||||
if (multifd_send_pages() < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (changed) {
|
||||
return multifd_queue_page(f, block, offset);
|
||||
return multifd_queue_page(block, offset);
|
||||
}
|
||||
|
||||
return 1;
|
||||
@@ -511,7 +543,11 @@ static void multifd_send_terminate_threads(Error *err)
|
||||
|
||||
static int multifd_send_channel_destroy(QIOChannel *send)
|
||||
{
|
||||
return socket_send_channel_destroy(send);
|
||||
if (migrate_to_file()) {
|
||||
return file_send_channel_destroy(send);
|
||||
} else {
|
||||
return socket_send_channel_destroy(send);
|
||||
}
|
||||
}
|
||||
|
||||
void multifd_save_cleanup(void)
|
||||
@@ -584,7 +620,7 @@ static int multifd_zero_copy_flush(QIOChannel *c)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int multifd_send_sync_main(QEMUFile *f)
|
||||
int multifd_send_sync_main(void)
|
||||
{
|
||||
int i;
|
||||
bool flush_zero_copy;
|
||||
@@ -593,12 +629,40 @@ int multifd_send_sync_main(QEMUFile *f)
|
||||
return 0;
|
||||
}
|
||||
if (multifd_send_state->pages->num) {
|
||||
if (multifd_send_pages(f) < 0) {
|
||||
if (multifd_send_pages() < 0) {
|
||||
error_report("%s: multifd_send_pages fail", __func__);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!migrate_multifd_packets()) {
|
||||
/*
|
||||
* There's no sync packet to send. Just make sure the sending
|
||||
* above has finished.
|
||||
*/
|
||||
for (i = 0; i < migrate_multifd_channels(); i++) {
|
||||
qemu_sem_wait(&multifd_send_state->channels_ready);
|
||||
}
|
||||
|
||||
/* sanity check and release the channels */
|
||||
for (i = 0; i < migrate_multifd_channels(); i++) {
|
||||
MultiFDSendParams *p = &multifd_send_state->params[i];
|
||||
|
||||
qemu_mutex_lock(&p->mutex);
|
||||
if (p->quit) {
|
||||
error_report("%s: channel %d has already quit!", __func__, i);
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
return -1;
|
||||
}
|
||||
assert(!p->pending_job);
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
|
||||
qemu_sem_post(&p->sem);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* When using zero-copy, it's necessary to flush the pages before any of
|
||||
* the pages can be sent again, so we'll make sure the new version of the
|
||||
@@ -654,18 +718,22 @@ static void *multifd_send_thread(void *opaque)
|
||||
Error *local_err = NULL;
|
||||
int ret = 0;
|
||||
bool use_zero_copy_send = migrate_zero_copy_send();
|
||||
bool use_packets = migrate_multifd_packets();
|
||||
|
||||
thread = migration_threads_add(p->name, qemu_get_thread_id());
|
||||
|
||||
trace_multifd_send_thread_start(p->id);
|
||||
rcu_register_thread();
|
||||
|
||||
if (multifd_send_initial_packet(p, &local_err) < 0) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
if (use_packets) {
|
||||
if (multifd_send_initial_packet(p, &local_err) < 0) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* initial packet */
|
||||
p->num_packets = 1;
|
||||
}
|
||||
/* initial packet */
|
||||
p->num_packets = 1;
|
||||
|
||||
while (true) {
|
||||
qemu_sem_post(&multifd_send_state->channels_ready);
|
||||
@@ -677,11 +745,12 @@ static void *multifd_send_thread(void *opaque)
|
||||
qemu_mutex_lock(&p->mutex);
|
||||
|
||||
if (p->pending_job) {
|
||||
uint64_t packet_num = p->packet_num;
|
||||
uint32_t flags;
|
||||
uintptr_t write_base;
|
||||
|
||||
p->normal_num = 0;
|
||||
|
||||
if (use_zero_copy_send) {
|
||||
if (!use_packets || use_zero_copy_send) {
|
||||
p->iovs_num = 0;
|
||||
} else {
|
||||
p->iovs_num = 1;
|
||||
@@ -699,33 +768,53 @@ static void *multifd_send_thread(void *opaque)
|
||||
break;
|
||||
}
|
||||
}
|
||||
multifd_send_fill_packet(p);
|
||||
|
||||
if (use_packets) {
|
||||
multifd_send_fill_packet(p);
|
||||
p->num_packets++;
|
||||
} else {
|
||||
multifd_set_file_bitmap(p);
|
||||
|
||||
/*
|
||||
* If we subtract the host page now, we don't need to
|
||||
* pass it into qio_channel_write_full_all() below.
|
||||
*/
|
||||
write_base = p->pages->block->pages_offset -
|
||||
(uintptr_t)p->pages->block->host;
|
||||
}
|
||||
|
||||
flags = p->flags;
|
||||
p->flags = 0;
|
||||
p->num_packets++;
|
||||
p->total_normal_pages += p->normal_num;
|
||||
p->pages->num = 0;
|
||||
p->pages->block = NULL;
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
|
||||
trace_multifd_send(p->id, packet_num, p->normal_num, flags,
|
||||
trace_multifd_send(p->id, p->packet_num, p->normal_num, flags,
|
||||
p->next_packet_size);
|
||||
|
||||
if (use_zero_copy_send) {
|
||||
/* Send header first, without zerocopy */
|
||||
ret = qio_channel_write_all(p->c, (void *)p->packet,
|
||||
p->packet_len, &local_err);
|
||||
if (ret != 0) {
|
||||
break;
|
||||
if (use_packets) {
|
||||
if (use_zero_copy_send) {
|
||||
/* Send header first, without zerocopy */
|
||||
ret = qio_channel_write_all(p->c, (void *)p->packet,
|
||||
p->packet_len, &local_err);
|
||||
if (ret != 0) {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* Send header using the same writev call */
|
||||
p->iov[0].iov_len = p->packet_len;
|
||||
p->iov[0].iov_base = p->packet;
|
||||
}
|
||||
|
||||
ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num,
|
||||
NULL, 0, p->write_flags,
|
||||
&local_err);
|
||||
} else {
|
||||
/* Send header using the same writev call */
|
||||
p->iov[0].iov_len = p->packet_len;
|
||||
p->iov[0].iov_base = p->packet;
|
||||
ret = qio_channel_pwritev_all(p->c, p->iov, p->iovs_num,
|
||||
write_base, &local_err);
|
||||
}
|
||||
|
||||
ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL,
|
||||
0, p->write_flags, &local_err);
|
||||
if (ret != 0) {
|
||||
break;
|
||||
}
|
||||
@@ -858,8 +947,7 @@ static bool multifd_channel_connect(MultiFDSendParams *p,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void multifd_new_send_channel_cleanup(MultiFDSendParams *p,
|
||||
QIOChannel *ioc, Error *err)
|
||||
static void multifd_new_send_channel_cleanup(MultiFDSendParams *p, Error *err)
|
||||
{
|
||||
migrate_set_error(migrate_get_current(), err);
|
||||
/* Error happen, we need to tell who pay attention to me */
|
||||
@@ -871,20 +959,20 @@ static void multifd_new_send_channel_cleanup(MultiFDSendParams *p,
|
||||
* its status.
|
||||
*/
|
||||
p->quit = true;
|
||||
object_unref(OBJECT(ioc));
|
||||
error_free(err);
|
||||
}
|
||||
|
||||
static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
|
||||
{
|
||||
MultiFDSendParams *p = opaque;
|
||||
QIOChannel *ioc = QIO_CHANNEL(qio_task_get_source(task));
|
||||
Object *obj = qio_task_get_source(task);
|
||||
Error *local_err = NULL;
|
||||
|
||||
trace_multifd_new_send_channel_async(p->id);
|
||||
if (!qio_task_propagate_error(task, &local_err)) {
|
||||
p->c = ioc;
|
||||
qio_channel_set_delay(p->c, false);
|
||||
QIOChannel *ioc = QIO_CHANNEL(obj);
|
||||
|
||||
qio_channel_set_delay(ioc, false);
|
||||
p->running = true;
|
||||
if (multifd_channel_connect(p, ioc, &local_err)) {
|
||||
return;
|
||||
@@ -892,18 +980,24 @@ static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
|
||||
}
|
||||
|
||||
trace_multifd_new_send_channel_async_error(p->id, local_err);
|
||||
multifd_new_send_channel_cleanup(p, ioc, local_err);
|
||||
multifd_new_send_channel_cleanup(p, local_err);
|
||||
object_unref(obj);
|
||||
}
|
||||
|
||||
static void multifd_new_send_channel_create(gpointer opaque)
|
||||
{
|
||||
socket_send_channel_create(multifd_new_send_channel_async, opaque);
|
||||
if (migrate_to_file()) {
|
||||
file_send_channel_create(multifd_new_send_channel_async, opaque);
|
||||
} else {
|
||||
socket_send_channel_create(multifd_new_send_channel_async, opaque);
|
||||
}
|
||||
}
|
||||
|
||||
int multifd_save_setup(Error **errp)
|
||||
{
|
||||
int thread_count;
|
||||
uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
|
||||
bool use_packets = migrate_multifd_packets();
|
||||
uint8_t i;
|
||||
|
||||
if (!migrate_multifd()) {
|
||||
@@ -928,14 +1022,20 @@ int multifd_save_setup(Error **errp)
|
||||
p->pending_job = 0;
|
||||
p->id = i;
|
||||
p->pages = multifd_pages_init(page_count);
|
||||
p->packet_len = sizeof(MultiFDPacket_t)
|
||||
+ sizeof(uint64_t) * page_count;
|
||||
p->packet = g_malloc0(p->packet_len);
|
||||
p->packet->magic = cpu_to_be32(MULTIFD_MAGIC);
|
||||
p->packet->version = cpu_to_be32(MULTIFD_VERSION);
|
||||
|
||||
if (use_packets) {
|
||||
p->packet_len = sizeof(MultiFDPacket_t)
|
||||
+ sizeof(uint64_t) * page_count;
|
||||
p->packet = g_malloc0(p->packet_len);
|
||||
p->packet->magic = cpu_to_be32(MULTIFD_MAGIC);
|
||||
p->packet->version = cpu_to_be32(MULTIFD_VERSION);
|
||||
|
||||
/* We need one extra place for the packet header */
|
||||
p->iov = g_new0(struct iovec, page_count + 1);
|
||||
} else {
|
||||
p->iov = g_new0(struct iovec, page_count);
|
||||
}
|
||||
p->name = g_strdup_printf("multifdsend_%d", i);
|
||||
/* We need one extra place for the packet header */
|
||||
p->iov = g_new0(struct iovec, page_count + 1);
|
||||
p->normal = g_new0(ram_addr_t, page_count);
|
||||
p->page_size = qemu_target_page_size();
|
||||
p->page_count = page_count;
|
||||
@@ -965,6 +1065,7 @@ int multifd_save_setup(Error **errp)
|
||||
|
||||
struct {
|
||||
MultiFDRecvParams *params;
|
||||
MultiFDRecvData *data;
|
||||
/* number of created threads */
|
||||
int count;
|
||||
/* syncs main thread and channels */
|
||||
@@ -975,6 +1076,49 @@ struct {
|
||||
MultiFDMethods *ops;
|
||||
} *multifd_recv_state;
|
||||
|
||||
int multifd_recv(void)
|
||||
{
|
||||
int i;
|
||||
static int next_recv_channel;
|
||||
MultiFDRecvParams *p = NULL;
|
||||
MultiFDRecvData *data = multifd_recv_state->data;
|
||||
|
||||
/*
|
||||
* next_channel can remain from a previous migration that was
|
||||
* using more channels, so ensure it doesn't overflow if the
|
||||
* limit is lower now.
|
||||
*/
|
||||
next_recv_channel %= migrate_multifd_channels();
|
||||
for (i = next_recv_channel;; i = (i + 1) % migrate_multifd_channels()) {
|
||||
p = &multifd_recv_state->params[i];
|
||||
|
||||
qemu_mutex_lock(&p->mutex);
|
||||
if (p->quit) {
|
||||
error_report("%s: channel %d has already quit!", __func__, i);
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
return -1;
|
||||
}
|
||||
if (!p->pending_job) {
|
||||
p->pending_job++;
|
||||
next_recv_channel = (i + 1) % migrate_multifd_channels();
|
||||
break;
|
||||
}
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
}
|
||||
assert(p->data->size == 0);
|
||||
multifd_recv_state->data = p->data;
|
||||
p->data = data;
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
qemu_sem_post(&p->sem);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
MultiFDRecvData *multifd_get_recv_data(void)
|
||||
{
|
||||
return multifd_recv_state->data;
|
||||
}
|
||||
|
||||
static void multifd_recv_terminate_threads(Error *err)
|
||||
{
|
||||
int i;
|
||||
@@ -996,6 +1140,7 @@ static void multifd_recv_terminate_threads(Error *err)
|
||||
|
||||
qemu_mutex_lock(&p->mutex);
|
||||
p->quit = true;
|
||||
qemu_sem_post(&p->sem);
|
||||
/*
|
||||
* We could arrive here for two reasons:
|
||||
* - normal quit, i.e. everything went fine, just finished
|
||||
@@ -1045,6 +1190,7 @@ void multifd_load_cleanup(void)
|
||||
p->c = NULL;
|
||||
qemu_mutex_destroy(&p->mutex);
|
||||
qemu_sem_destroy(&p->sem_sync);
|
||||
qemu_sem_destroy(&p->sem);
|
||||
g_free(p->name);
|
||||
p->name = NULL;
|
||||
p->packet_len = 0;
|
||||
@@ -1059,6 +1205,8 @@ void multifd_load_cleanup(void)
|
||||
qemu_sem_destroy(&multifd_recv_state->sem_sync);
|
||||
g_free(multifd_recv_state->params);
|
||||
multifd_recv_state->params = NULL;
|
||||
g_free(multifd_recv_state->data);
|
||||
multifd_recv_state->data = NULL;
|
||||
g_free(multifd_recv_state);
|
||||
multifd_recv_state = NULL;
|
||||
}
|
||||
@@ -1067,9 +1215,24 @@ void multifd_recv_sync_main(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!migrate_multifd()) {
|
||||
if (!migrate_multifd() || !migrate_multifd_packets()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!migrate_multifd_packets()) {
|
||||
for (i = 0; i < migrate_multifd_channels(); i++) {
|
||||
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
||||
|
||||
qemu_sem_post(&p->sem);
|
||||
qemu_sem_wait(&p->sem_sync);
|
||||
|
||||
qemu_mutex_lock(&p->mutex);
|
||||
assert(!p->pending_job || p->quit);
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < migrate_multifd_channels(); i++) {
|
||||
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
||||
|
||||
@@ -1094,51 +1257,82 @@ static void *multifd_recv_thread(void *opaque)
|
||||
{
|
||||
MultiFDRecvParams *p = opaque;
|
||||
Error *local_err = NULL;
|
||||
bool use_packets = migrate_multifd_packets();
|
||||
int ret;
|
||||
|
||||
trace_multifd_recv_thread_start(p->id);
|
||||
rcu_register_thread();
|
||||
|
||||
while (true) {
|
||||
uint32_t flags;
|
||||
uint32_t flags = 0;
|
||||
bool has_data = false;
|
||||
p->normal_num = 0;
|
||||
|
||||
if (p->quit) {
|
||||
break;
|
||||
}
|
||||
|
||||
ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
|
||||
p->packet_len, &local_err);
|
||||
if (ret == 0 || ret == -1) { /* 0: EOF -1: Error */
|
||||
break;
|
||||
if (use_packets) {
|
||||
ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
|
||||
p->packet_len, &local_err);
|
||||
if (ret == 0 || ret == -1) { /* 0: EOF -1: Error */
|
||||
break;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&p->mutex);
|
||||
ret = multifd_recv_unfill_packet(p, &local_err);
|
||||
if (ret) {
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
break;
|
||||
}
|
||||
p->num_packets++;
|
||||
|
||||
flags = p->flags;
|
||||
/* recv methods don't know how to handle the SYNC flag */
|
||||
p->flags &= ~MULTIFD_FLAG_SYNC;
|
||||
trace_multifd_recv(p->id, p->packet_num, p->normal_num, flags,
|
||||
p->next_packet_size);
|
||||
|
||||
p->total_normal_pages += p->normal_num;
|
||||
has_data = !!p->normal_num;
|
||||
} else {
|
||||
/*
|
||||
* No packets, so we need to wait for the vmstate code to
|
||||
* give us work.
|
||||
*/
|
||||
qemu_sem_wait(&p->sem);
|
||||
qemu_mutex_lock(&p->mutex);
|
||||
if (!p->pending_job) {
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
break;
|
||||
}
|
||||
has_data = !!p->data->size;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&p->mutex);
|
||||
ret = multifd_recv_unfill_packet(p, &local_err);
|
||||
if (ret) {
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
break;
|
||||
}
|
||||
|
||||
flags = p->flags;
|
||||
/* recv methods don't know how to handle the SYNC flag */
|
||||
p->flags &= ~MULTIFD_FLAG_SYNC;
|
||||
trace_multifd_recv(p->id, p->packet_num, p->normal_num, flags,
|
||||
p->next_packet_size);
|
||||
p->num_packets++;
|
||||
p->total_normal_pages += p->normal_num;
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
|
||||
if (p->normal_num) {
|
||||
ret = multifd_recv_state->ops->recv_pages(p, &local_err);
|
||||
if (has_data) {
|
||||
ret = multifd_recv_state->ops->recv_data(p, &local_err);
|
||||
if (ret != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & MULTIFD_FLAG_SYNC) {
|
||||
if (use_packets && (flags & MULTIFD_FLAG_SYNC)) {
|
||||
qemu_sem_post(&multifd_recv_state->sem_sync);
|
||||
qemu_sem_wait(&p->sem_sync);
|
||||
}
|
||||
|
||||
if (!use_packets) {
|
||||
qemu_mutex_lock(&p->mutex);
|
||||
p->data->size = 0;
|
||||
p->pending_job--;
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
if (!use_packets) {
|
||||
qemu_sem_post(&p->sem_sync);
|
||||
}
|
||||
|
||||
if (local_err) {
|
||||
@@ -1159,6 +1353,7 @@ int multifd_load_setup(Error **errp)
|
||||
{
|
||||
int thread_count;
|
||||
uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
|
||||
bool use_packets = migrate_multifd_packets();
|
||||
uint8_t i;
|
||||
|
||||
/*
|
||||
@@ -1172,6 +1367,10 @@ int multifd_load_setup(Error **errp)
|
||||
thread_count = migrate_multifd_channels();
|
||||
multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
|
||||
multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
|
||||
|
||||
multifd_recv_state->data = g_new0(MultiFDRecvData, 1);
|
||||
multifd_recv_state->data->size = 0;
|
||||
|
||||
qatomic_set(&multifd_recv_state->count, 0);
|
||||
qemu_sem_init(&multifd_recv_state->sem_sync, 0);
|
||||
multifd_recv_state->ops = multifd_ops[migrate_multifd_compression()];
|
||||
@@ -1181,11 +1380,19 @@ int multifd_load_setup(Error **errp)
|
||||
|
||||
qemu_mutex_init(&p->mutex);
|
||||
qemu_sem_init(&p->sem_sync, 0);
|
||||
qemu_sem_init(&p->sem, 0);
|
||||
p->quit = false;
|
||||
p->pending_job = 0;
|
||||
p->id = i;
|
||||
p->packet_len = sizeof(MultiFDPacket_t)
|
||||
+ sizeof(uint64_t) * page_count;
|
||||
p->packet = g_malloc0(p->packet_len);
|
||||
|
||||
p->data = g_new0(MultiFDRecvData, 1);
|
||||
p->data->size = 0;
|
||||
|
||||
if (use_packets) {
|
||||
p->packet_len = sizeof(MultiFDPacket_t)
|
||||
+ sizeof(uint64_t) * page_count;
|
||||
p->packet = g_malloc0(p->packet_len);
|
||||
}
|
||||
p->name = g_strdup_printf("multifdrecv_%d", i);
|
||||
p->iov = g_new0(struct iovec, page_count);
|
||||
p->normal = g_new0(ram_addr_t, page_count);
|
||||
@@ -1231,18 +1438,26 @@ void multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
|
||||
{
|
||||
MultiFDRecvParams *p;
|
||||
Error *local_err = NULL;
|
||||
int id;
|
||||
bool use_packets = migrate_multifd_packets();
|
||||
int id, num_packets = 0;
|
||||
|
||||
id = multifd_recv_initial_packet(ioc, &local_err);
|
||||
if (id < 0) {
|
||||
multifd_recv_terminate_threads(local_err);
|
||||
error_propagate_prepend(errp, local_err,
|
||||
"failed to receive packet"
|
||||
" via multifd channel %d: ",
|
||||
qatomic_read(&multifd_recv_state->count));
|
||||
return;
|
||||
if (use_packets) {
|
||||
id = multifd_recv_initial_packet(ioc, &local_err);
|
||||
if (id < 0) {
|
||||
multifd_recv_terminate_threads(local_err);
|
||||
error_propagate_prepend(errp, local_err,
|
||||
"failed to receive packet"
|
||||
" via multifd channel %d: ",
|
||||
qatomic_read(&multifd_recv_state->count));
|
||||
return;
|
||||
}
|
||||
trace_multifd_recv_new_channel(id);
|
||||
|
||||
/* initial packet */
|
||||
num_packets = 1;
|
||||
} else {
|
||||
id = qatomic_read(&multifd_recv_state->count);
|
||||
}
|
||||
trace_multifd_recv_new_channel(id);
|
||||
|
||||
p = &multifd_recv_state->params[id];
|
||||
if (p->c != NULL) {
|
||||
@@ -1253,9 +1468,8 @@ void multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
|
||||
return;
|
||||
}
|
||||
p->c = ioc;
|
||||
p->num_packets = num_packets;
|
||||
object_ref(OBJECT(ioc));
|
||||
/* initial packet */
|
||||
p->num_packets = 1;
|
||||
|
||||
p->running = true;
|
||||
qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
|
||||
|
||||
@@ -13,16 +13,21 @@
|
||||
#ifndef QEMU_MIGRATION_MULTIFD_H
|
||||
#define QEMU_MIGRATION_MULTIFD_H
|
||||
|
||||
typedef struct MultiFDRecvData MultiFDRecvData;
|
||||
|
||||
int multifd_save_setup(Error **errp);
|
||||
void multifd_save_cleanup(void);
|
||||
int multifd_load_setup(Error **errp);
|
||||
void multifd_load_cleanup(void);
|
||||
void multifd_load_shutdown(void);
|
||||
bool multifd_recv_first_channel(void);
|
||||
bool multifd_recv_all_channels_created(void);
|
||||
void multifd_recv_new_channel(QIOChannel *ioc, Error **errp);
|
||||
void multifd_recv_sync_main(void);
|
||||
int multifd_send_sync_main(QEMUFile *f);
|
||||
int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset);
|
||||
int multifd_send_sync_main(void);
|
||||
int multifd_queue_page(RAMBlock *block, ram_addr_t offset);
|
||||
int multifd_recv(void);
|
||||
MultiFDRecvData *multifd_get_recv_data(void);
|
||||
|
||||
/* Multifd Compression flags */
|
||||
#define MULTIFD_FLAG_SYNC (1 << 0)
|
||||
@@ -58,13 +63,18 @@ typedef struct {
|
||||
uint32_t num;
|
||||
/* number of allocated pages */
|
||||
uint32_t allocated;
|
||||
/* global number of generated multifd packets */
|
||||
uint64_t packet_num;
|
||||
/* offset of each page */
|
||||
ram_addr_t *offset;
|
||||
RAMBlock *block;
|
||||
} MultiFDPages_t;
|
||||
|
||||
struct MultiFDRecvData {
|
||||
void *opaque;
|
||||
size_t size;
|
||||
/* for preadv */
|
||||
off_t file_offset;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
/* Fields are only written at creating/deletion time */
|
||||
/* No lock required for them, they are read only */
|
||||
@@ -131,7 +141,7 @@ typedef struct {
|
||||
/* num of non zero pages */
|
||||
uint32_t normal_num;
|
||||
/* used for compression methods */
|
||||
void *data;
|
||||
void *compress_data;
|
||||
} MultiFDSendParams;
|
||||
|
||||
typedef struct {
|
||||
@@ -155,6 +165,8 @@ typedef struct {
|
||||
|
||||
/* syncs main thread and channels */
|
||||
QemuSemaphore sem_sync;
|
||||
/* sem where to wait for more work */
|
||||
QemuSemaphore sem;
|
||||
|
||||
/* this mutex protects the following parameters */
|
||||
QemuMutex mutex;
|
||||
@@ -166,6 +178,13 @@ typedef struct {
|
||||
uint32_t flags;
|
||||
/* global number of generated multifd packets */
|
||||
uint64_t packet_num;
|
||||
int pending_job;
|
||||
/*
|
||||
* The owner of 'data' depends of 'pending_job' value:
|
||||
* pending_job == 0 -> migration_thread can use it.
|
||||
* pending_job != 0 -> multifd_channel can use it.
|
||||
*/
|
||||
MultiFDRecvData *data;
|
||||
|
||||
/* thread local variables. No locking required */
|
||||
|
||||
@@ -188,7 +207,7 @@ typedef struct {
|
||||
/* num of non zero pages */
|
||||
uint32_t normal_num;
|
||||
/* used for de-compression methods */
|
||||
void *data;
|
||||
void *compress_data;
|
||||
} MultiFDRecvParams;
|
||||
|
||||
typedef struct {
|
||||
@@ -202,11 +221,10 @@ typedef struct {
|
||||
int (*recv_setup)(MultiFDRecvParams *p, Error **errp);
|
||||
/* Cleanup for receiving side */
|
||||
void (*recv_cleanup)(MultiFDRecvParams *p);
|
||||
/* Read all pages */
|
||||
int (*recv_pages)(MultiFDRecvParams *p, Error **errp);
|
||||
/* Read all data */
|
||||
int (*recv_data)(MultiFDRecvParams *p, Error **errp);
|
||||
} MultiFDMethods;
|
||||
|
||||
void multifd_register_ops(int method, MultiFDMethods *ops);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -204,6 +204,7 @@ Property migration_properties[] = {
|
||||
DEFINE_PROP_MIG_CAP("x-switchover-ack",
|
||||
MIGRATION_CAPABILITY_SWITCHOVER_ACK),
|
||||
DEFINE_PROP_MIG_CAP("x-dirty-limit", MIGRATION_CAPABILITY_DIRTY_LIMIT),
|
||||
DEFINE_PROP_MIG_CAP("x-fixed-ram", MIGRATION_CAPABILITY_FIXED_RAM),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
@@ -263,6 +264,13 @@ bool migrate_events(void)
|
||||
return s->capabilities[MIGRATION_CAPABILITY_EVENTS];
|
||||
}
|
||||
|
||||
bool migrate_fixed_ram(void)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
|
||||
return s->capabilities[MIGRATION_CAPABILITY_FIXED_RAM];
|
||||
}
|
||||
|
||||
bool migrate_ignore_shared(void)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
@@ -377,6 +385,11 @@ bool migrate_multifd_flush_after_each_section(void)
|
||||
return s->multifd_flush_after_each_section;
|
||||
}
|
||||
|
||||
bool migrate_multifd_packets(void)
|
||||
{
|
||||
return !migrate_fixed_ram();
|
||||
}
|
||||
|
||||
bool migrate_postcopy(void)
|
||||
{
|
||||
return migrate_postcopy_ram() || migrate_dirty_bitmaps();
|
||||
@@ -396,6 +409,13 @@ bool migrate_tls(void)
|
||||
return s->parameters.tls_creds && *s->parameters.tls_creds;
|
||||
}
|
||||
|
||||
bool migrate_to_file(void)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
|
||||
return qemu_file_is_seekable(s->to_dst_file);
|
||||
}
|
||||
|
||||
typedef enum WriteTrackingSupport {
|
||||
WT_SUPPORT_UNKNOWN = 0,
|
||||
WT_SUPPORT_ABSENT,
|
||||
@@ -645,6 +665,26 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
|
||||
}
|
||||
}
|
||||
|
||||
if (new_caps[MIGRATION_CAPABILITY_FIXED_RAM]) {
|
||||
if (new_caps[MIGRATION_CAPABILITY_XBZRLE]) {
|
||||
error_setg(errp,
|
||||
"Fixed-ram migration is incompatible with xbzrle");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
|
||||
error_setg(errp,
|
||||
"Fixed-ram migration is incompatible with compression");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
|
||||
error_setg(errp,
|
||||
"Fixed-ram migration is incompatible with postcopy ram");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -795,6 +835,22 @@ int migrate_decompress_threads(void)
|
||||
return s->parameters.decompress_threads;
|
||||
}
|
||||
|
||||
bool migrate_direct_io(void)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
|
||||
/* For now O_DIRECT is only supported with fixed-ram */
|
||||
if (!s->capabilities[MIGRATION_CAPABILITY_FIXED_RAM]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (s->parameters.has_direct_io) {
|
||||
return s->parameters.direct_io;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
uint64_t migrate_downtime_limit(void)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
@@ -1012,6 +1068,11 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp)
|
||||
params->has_mode = true;
|
||||
params->mode = s->parameters.mode;
|
||||
|
||||
if (s->parameters.has_direct_io) {
|
||||
params->has_direct_io = true;
|
||||
params->direct_io = s->parameters.direct_io;
|
||||
}
|
||||
|
||||
return params;
|
||||
}
|
||||
|
||||
@@ -1047,6 +1108,7 @@ void migrate_params_init(MigrationParameters *params)
|
||||
params->has_x_vcpu_dirty_limit_period = true;
|
||||
params->has_vcpu_dirty_limit = true;
|
||||
params->has_mode = true;
|
||||
params->has_direct_io = qemu_has_direct_io();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1348,6 +1410,10 @@ static void migrate_params_test_apply(MigrateSetParameters *params,
|
||||
if (params->has_mode) {
|
||||
dest->mode = params->mode;
|
||||
}
|
||||
|
||||
if (params->has_direct_io) {
|
||||
dest->direct_io = params->direct_io;
|
||||
}
|
||||
}
|
||||
|
||||
static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
|
||||
@@ -1492,6 +1558,10 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
|
||||
if (params->has_mode) {
|
||||
s->parameters.mode = params->mode;
|
||||
}
|
||||
|
||||
if (params->has_direct_io) {
|
||||
s->parameters.direct_io = params->direct_io;
|
||||
}
|
||||
}
|
||||
|
||||
void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp)
|
||||
|
||||
@@ -31,6 +31,7 @@ bool migrate_compress(void);
|
||||
bool migrate_dirty_bitmaps(void);
|
||||
bool migrate_dirty_limit(void);
|
||||
bool migrate_events(void);
|
||||
bool migrate_fixed_ram(void);
|
||||
bool migrate_ignore_shared(void);
|
||||
bool migrate_late_block_activate(void);
|
||||
bool migrate_multifd(void);
|
||||
@@ -55,9 +56,11 @@ bool migrate_zero_copy_send(void);
|
||||
*/
|
||||
|
||||
bool migrate_multifd_flush_after_each_section(void);
|
||||
bool migrate_multifd_packets(void);
|
||||
bool migrate_postcopy(void);
|
||||
bool migrate_rdma(void);
|
||||
bool migrate_tls(void);
|
||||
bool migrate_to_file(void);
|
||||
|
||||
/* capabilities helpers */
|
||||
|
||||
@@ -78,6 +81,7 @@ uint8_t migrate_cpu_throttle_increment(void);
|
||||
uint8_t migrate_cpu_throttle_initial(void);
|
||||
bool migrate_cpu_throttle_tailslow(void);
|
||||
int migrate_decompress_threads(void);
|
||||
bool migrate_direct_io(void);
|
||||
uint64_t migrate_downtime_limit(void);
|
||||
uint8_t migrate_max_cpu_throttle(void);
|
||||
uint64_t migrate_max_bandwidth(void);
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include "options.h"
|
||||
#include "qapi/error.h"
|
||||
#include "rdma.h"
|
||||
#include "io/channel-file.h"
|
||||
|
||||
#define IO_BUF_SIZE 32768
|
||||
#define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64)
|
||||
@@ -255,6 +256,10 @@ static void qemu_iovec_release_ram(QEMUFile *f)
|
||||
memset(f->may_free, 0, sizeof(f->may_free));
|
||||
}
|
||||
|
||||
bool qemu_file_is_seekable(QEMUFile *f)
|
||||
{
|
||||
return qio_channel_has_feature(f->ioc, QIO_CHANNEL_FEATURE_SEEKABLE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes QEMUFile buffer
|
||||
@@ -447,6 +452,83 @@ void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size)
|
||||
}
|
||||
}
|
||||
|
||||
void qemu_put_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen,
|
||||
off_t pos)
|
||||
{
|
||||
Error *err = NULL;
|
||||
|
||||
if (f->last_error) {
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_fflush(f);
|
||||
qio_channel_pwrite(f->ioc, (char *)buf, buflen, pos, &err);
|
||||
|
||||
if (err) {
|
||||
qemu_file_set_error_obj(f, -EIO, err);
|
||||
} else {
|
||||
stat64_add(&mig_stats.qemu_file_transferred, buflen);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
size_t qemu_get_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen,
|
||||
off_t pos)
|
||||
{
|
||||
Error *err = NULL;
|
||||
ssize_t ret;
|
||||
|
||||
if (f->last_error) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = qio_channel_pread(f->ioc, (char *)buf, buflen, pos, &err);
|
||||
if (ret == -1 || err) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
return (size_t)ret;
|
||||
|
||||
error:
|
||||
qemu_file_set_error_obj(f, -EIO, err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qemu_set_offset(QEMUFile *f, off_t off, int whence)
|
||||
{
|
||||
Error *err = NULL;
|
||||
off_t ret;
|
||||
|
||||
qemu_fflush(f);
|
||||
|
||||
if (!qemu_file_is_writable(f)) {
|
||||
f->buf_index = 0;
|
||||
f->buf_size = 0;
|
||||
}
|
||||
|
||||
ret = qio_channel_io_seek(f->ioc, off, whence, &err);
|
||||
if (ret == (off_t)-1) {
|
||||
qemu_file_set_error_obj(f, -EIO, err);
|
||||
}
|
||||
}
|
||||
|
||||
off_t qemu_get_offset(QEMUFile *f)
|
||||
{
|
||||
Error *err = NULL;
|
||||
off_t ret;
|
||||
|
||||
qemu_fflush(f);
|
||||
|
||||
ret = qio_channel_io_seek(f->ioc, 0, SEEK_CUR, &err);
|
||||
if (ret == (off_t)-1) {
|
||||
qemu_file_set_error_obj(f, -EIO, err);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void qemu_put_byte(QEMUFile *f, int v)
|
||||
{
|
||||
if (f->last_error) {
|
||||
|
||||
@@ -75,7 +75,12 @@ QEMUFile *qemu_file_get_return_path(QEMUFile *f);
|
||||
int qemu_fflush(QEMUFile *f);
|
||||
void qemu_file_set_blocking(QEMUFile *f, bool block);
|
||||
int qemu_file_get_to_fd(QEMUFile *f, int fd, size_t size);
|
||||
void qemu_set_offset(QEMUFile *f, off_t off, int whence);
|
||||
off_t qemu_get_offset(QEMUFile *f);
|
||||
void qemu_put_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen,
|
||||
off_t pos);
|
||||
size_t qemu_get_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen,
|
||||
off_t pos);
|
||||
|
||||
QIOChannel *qemu_file_get_ioc(QEMUFile *file);
|
||||
|
||||
#endif
|
||||
|
||||
304
migration/ram.c
304
migration/ram.c
@@ -94,6 +94,25 @@
|
||||
#define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200
|
||||
/* We can't use any flag that is bigger than 0x200 */
|
||||
|
||||
/*
|
||||
* fixed-ram migration supports O_DIRECT, so we need to make sure the
|
||||
* userspace buffer, the IO operation size and the file offset are
|
||||
* aligned according to the underlying device's block size. The first
|
||||
* two are already aligned to page size, but we need to add padding to
|
||||
* the file to align the offset. We cannot read the block size
|
||||
* dynamically because the migration file can be moved between
|
||||
* different systems, so use 1M to cover most block sizes and to keep
|
||||
* the file offset aligned at page size as well.
|
||||
*/
|
||||
#define FIXED_RAM_FILE_OFFSET_ALIGNMENT 0x100000
|
||||
|
||||
/*
|
||||
* When doing fixed-ram migration, this is the amount we read from the
|
||||
* pages region in the migration file at a time.
|
||||
*/
|
||||
#define FIXED_RAM_LOAD_BUF_SIZE 0x100000
|
||||
#define FIXED_RAM_MULTIFD_LOAD_BUF_SIZE 0x100000
|
||||
|
||||
XBZRLECacheStats xbzrle_counters;
|
||||
|
||||
/* used by the search for pages to send */
|
||||
@@ -1127,12 +1146,18 @@ static int save_zero_page(RAMState *rs, PageSearchStatus *pss,
|
||||
return 0;
|
||||
}
|
||||
|
||||
stat64_add(&mig_stats.zero_pages, 1);
|
||||
|
||||
if (migrate_fixed_ram()) {
|
||||
/* zero pages are not transferred with fixed-ram */
|
||||
clear_bit_atomic(offset >> TARGET_PAGE_BITS, pss->block->shadow_bmap);
|
||||
return 1;
|
||||
}
|
||||
|
||||
len += save_page_header(pss, file, pss->block, offset | RAM_SAVE_FLAG_ZERO);
|
||||
qemu_put_byte(file, 0);
|
||||
len += 1;
|
||||
ram_release_page(pss->block->idstr, offset);
|
||||
|
||||
stat64_add(&mig_stats.zero_pages, 1);
|
||||
ram_transferred_add(len);
|
||||
|
||||
/*
|
||||
@@ -1190,14 +1215,20 @@ static int save_normal_page(PageSearchStatus *pss, RAMBlock *block,
|
||||
{
|
||||
QEMUFile *file = pss->pss_channel;
|
||||
|
||||
ram_transferred_add(save_page_header(pss, pss->pss_channel, block,
|
||||
offset | RAM_SAVE_FLAG_PAGE));
|
||||
if (async) {
|
||||
qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE,
|
||||
migrate_release_ram() &&
|
||||
migration_in_postcopy());
|
||||
if (migrate_fixed_ram()) {
|
||||
qemu_put_buffer_at(file, buf, TARGET_PAGE_SIZE,
|
||||
block->pages_offset + offset);
|
||||
set_bit(offset >> TARGET_PAGE_BITS, block->shadow_bmap);
|
||||
} else {
|
||||
qemu_put_buffer(file, buf, TARGET_PAGE_SIZE);
|
||||
ram_transferred_add(save_page_header(pss, pss->pss_channel, block,
|
||||
offset | RAM_SAVE_FLAG_PAGE));
|
||||
if (async) {
|
||||
qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE,
|
||||
migrate_release_ram() &&
|
||||
migration_in_postcopy());
|
||||
} else {
|
||||
qemu_put_buffer(file, buf, TARGET_PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
ram_transferred_add(TARGET_PAGE_SIZE);
|
||||
stat64_add(&mig_stats.normal_pages, 1);
|
||||
@@ -1250,10 +1281,9 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
|
||||
return pages;
|
||||
}
|
||||
|
||||
static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block,
|
||||
ram_addr_t offset)
|
||||
static int ram_save_multifd_page(RAMBlock *block, ram_addr_t offset)
|
||||
{
|
||||
if (multifd_queue_page(file, block, offset) < 0) {
|
||||
if (multifd_queue_page(block, offset) < 0) {
|
||||
return -1;
|
||||
}
|
||||
stat64_add(&mig_stats.normal_pages, 1);
|
||||
@@ -1333,10 +1363,10 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
|
||||
pss->page = 0;
|
||||
pss->block = QLIST_NEXT_RCU(pss->block, next);
|
||||
if (!pss->block) {
|
||||
if (migrate_multifd() &&
|
||||
if (migrate_multifd() && !migrate_fixed_ram() &&
|
||||
!migrate_multifd_flush_after_each_section()) {
|
||||
QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
|
||||
int ret = multifd_send_sync_main(f);
|
||||
int ret = multifd_send_sync_main();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -2067,7 +2097,7 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
|
||||
* still see partially copied pages which is data corruption.
|
||||
*/
|
||||
if (migrate_multifd() && !migration_in_postcopy()) {
|
||||
return ram_save_multifd_page(pss->pss_channel, block, offset);
|
||||
return ram_save_multifd_page(block, offset);
|
||||
}
|
||||
|
||||
return ram_save_page(rs, pss);
|
||||
@@ -2780,6 +2810,7 @@ static void ram_list_init_bitmaps(void)
|
||||
*/
|
||||
block->bmap = bitmap_new(pages);
|
||||
bitmap_set(block->bmap, 0, pages);
|
||||
block->shadow_bmap = bitmap_new(pages);
|
||||
block->clear_bmap_shift = shift;
|
||||
block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
|
||||
}
|
||||
@@ -2917,6 +2948,87 @@ void qemu_guest_free_page_hint(void *addr, size_t len)
|
||||
}
|
||||
}
|
||||
|
||||
#define FIXED_RAM_HDR_VERSION 1
|
||||
struct FixedRamHeader {
|
||||
uint32_t version;
|
||||
/*
|
||||
* The target's page size, so we know how many pages are in the
|
||||
* bitmap.
|
||||
*/
|
||||
uint64_t page_size;
|
||||
/*
|
||||
* The offset in the migration file where the pages bitmap is
|
||||
* found.
|
||||
*/
|
||||
uint64_t bitmap_offset;
|
||||
/*
|
||||
* The offset in the migration file where the actual pages (data)
|
||||
* are found.
|
||||
*/
|
||||
uint64_t pages_offset;
|
||||
/* end of v1 */
|
||||
} QEMU_PACKED;
|
||||
typedef struct FixedRamHeader FixedRamHeader;
|
||||
|
||||
static void fixed_ram_insert_header(QEMUFile *file, RAMBlock *block)
|
||||
{
|
||||
g_autofree FixedRamHeader *header;
|
||||
size_t header_size, bitmap_size;
|
||||
long num_pages;
|
||||
|
||||
header = g_new0(FixedRamHeader, 1);
|
||||
header_size = sizeof(FixedRamHeader);
|
||||
|
||||
num_pages = block->used_length >> TARGET_PAGE_BITS;
|
||||
bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long);
|
||||
|
||||
/*
|
||||
* Save the file offsets of where the bitmap and the pages should
|
||||
* go as they are written at the end of migration and during the
|
||||
* iterative phase, respectively.
|
||||
*/
|
||||
block->bitmap_offset = qemu_get_offset(file) + header_size;
|
||||
block->pages_offset = ROUND_UP(block->bitmap_offset +
|
||||
bitmap_size,
|
||||
FIXED_RAM_FILE_OFFSET_ALIGNMENT);
|
||||
|
||||
header->version = cpu_to_be32(FIXED_RAM_HDR_VERSION);
|
||||
header->page_size = cpu_to_be64(TARGET_PAGE_SIZE);
|
||||
header->bitmap_offset = cpu_to_be64(block->bitmap_offset);
|
||||
header->pages_offset = cpu_to_be64(block->pages_offset);
|
||||
|
||||
qemu_put_buffer(file, (uint8_t *) header, header_size);
|
||||
}
|
||||
|
||||
static bool fixed_ram_read_header(QEMUFile *file, FixedRamHeader *header,
|
||||
Error **errp)
|
||||
{
|
||||
size_t ret, header_size = sizeof(FixedRamHeader);
|
||||
|
||||
ret = qemu_get_buffer(file, (uint8_t *)header, header_size);
|
||||
if (ret != header_size) {
|
||||
error_setg(errp, "Could not read whole fixed-ram migration header "
|
||||
"(expected %zd, got %zd bytes)", header_size, ret);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* migration stream is big-endian */
|
||||
header->version = be32_to_cpu(header->version);
|
||||
|
||||
if (header->version > FIXED_RAM_HDR_VERSION) {
|
||||
error_setg(errp, "Migration fixed-ram capability version mismatch "
|
||||
"(expected %d, got %d)", FIXED_RAM_HDR_VERSION,
|
||||
header->version);
|
||||
return false;
|
||||
}
|
||||
|
||||
header->page_size = be64_to_cpu(header->page_size);
|
||||
header->bitmap_offset = be64_to_cpu(header->bitmap_offset);
|
||||
header->pages_offset = be64_to_cpu(header->pages_offset);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
|
||||
* long-running RCU critical section. When rcu-reclaims in the code
|
||||
@@ -2966,6 +3078,13 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||
if (migrate_ignore_shared()) {
|
||||
qemu_put_be64(f, block->mr->addr);
|
||||
}
|
||||
|
||||
if (migrate_fixed_ram()) {
|
||||
fixed_ram_insert_header(f, block);
|
||||
/* prepare offset for next ramblock */
|
||||
qemu_set_offset(f, block->pages_offset + block->used_length,
|
||||
SEEK_SET);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2985,13 +3104,14 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||
migration_ops->ram_save_target_page = ram_save_target_page_legacy;
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
ret = multifd_send_sync_main(f);
|
||||
ret = multifd_send_sync_main();
|
||||
qemu_mutex_lock_iothread();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
|
||||
if (migrate_multifd() && !migrate_multifd_flush_after_each_section()
|
||||
&& !migrate_fixed_ram()) {
|
||||
qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
|
||||
}
|
||||
|
||||
@@ -2999,6 +3119,32 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||
return qemu_fflush(f);
|
||||
}
|
||||
|
||||
static void ram_save_shadow_bmap(QEMUFile *f)
|
||||
{
|
||||
RAMBlock *block;
|
||||
|
||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
||||
long num_pages = block->used_length >> TARGET_PAGE_BITS;
|
||||
long bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long);
|
||||
qemu_put_buffer_at(f, (uint8_t *)block->shadow_bmap, bitmap_size,
|
||||
block->bitmap_offset);
|
||||
ram_transferred_add(bitmap_size);
|
||||
|
||||
/*
|
||||
* Free the bitmap here to catch any synchronization issues
|
||||
* with multifd channels. No channels should be sending pages
|
||||
* after we've written the bitmap to file.
|
||||
*/
|
||||
g_free(block->shadow_bmap);
|
||||
block->shadow_bmap = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void ramblock_set_shadow_bmap_atomic(RAMBlock *block, ram_addr_t offset)
|
||||
{
|
||||
set_bit_atomic(offset >> TARGET_PAGE_BITS, block->shadow_bmap);
|
||||
}
|
||||
|
||||
/**
|
||||
* ram_save_iterate: iterative stage for migration
|
||||
*
|
||||
@@ -3108,8 +3254,10 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
||||
out:
|
||||
if (ret >= 0
|
||||
&& migration_is_setup_or_active(migrate_get_current()->state)) {
|
||||
if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
|
||||
ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
|
||||
if (migrate_multifd() &&
|
||||
(migrate_multifd_flush_after_each_section() ||
|
||||
migrate_fixed_ram())) {
|
||||
ret = multifd_send_sync_main();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@@ -3183,14 +3331,19 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
|
||||
}
|
||||
}
|
||||
|
||||
ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
|
||||
ret = multifd_send_sync_main();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (migrate_fixed_ram()) {
|
||||
ram_save_shadow_bmap(f);
|
||||
}
|
||||
|
||||
if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
|
||||
qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
|
||||
}
|
||||
|
||||
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||
return qemu_fflush(f);
|
||||
}
|
||||
@@ -3789,6 +3942,107 @@ void colo_flush_ram_cache(void)
|
||||
trace_colo_flush_ram_cache_end();
|
||||
}
|
||||
|
||||
static size_t ram_load_multifd_pages(RAMBlock *block, ram_addr_t start_offset,
|
||||
size_t size)
|
||||
{
|
||||
MultiFDRecvData *data = multifd_get_recv_data();
|
||||
|
||||
/*
|
||||
* Pointing the opaque directly to the host buffer, no
|
||||
* preprocessing needed.
|
||||
*/
|
||||
data->opaque = block->host + start_offset;
|
||||
|
||||
data->file_offset = block->pages_offset + start_offset;
|
||||
data->size = size;
|
||||
|
||||
if (multifd_recv() < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static void read_ramblock_fixed_ram(QEMUFile *f, RAMBlock *block,
|
||||
long num_pages, unsigned long *bitmap)
|
||||
{
|
||||
unsigned long set_bit_idx, clear_bit_idx;
|
||||
ram_addr_t offset;
|
||||
void *host;
|
||||
size_t read, unread, size;
|
||||
size_t buf_size = (migrate_multifd() ? FIXED_RAM_MULTIFD_LOAD_BUF_SIZE :
|
||||
FIXED_RAM_LOAD_BUF_SIZE);
|
||||
|
||||
for (set_bit_idx = find_first_bit(bitmap, num_pages);
|
||||
set_bit_idx < num_pages;
|
||||
set_bit_idx = find_next_bit(bitmap, num_pages, clear_bit_idx + 1)) {
|
||||
|
||||
clear_bit_idx = find_next_zero_bit(bitmap, num_pages, set_bit_idx + 1);
|
||||
|
||||
unread = TARGET_PAGE_SIZE * (clear_bit_idx - set_bit_idx);
|
||||
offset = set_bit_idx << TARGET_PAGE_BITS;
|
||||
|
||||
while (unread > 0) {
|
||||
host = host_from_ram_block_offset(block, offset);
|
||||
size = MIN(unread, buf_size);
|
||||
|
||||
if (migrate_multifd()) {
|
||||
read = ram_load_multifd_pages(block, offset, size);
|
||||
} else {
|
||||
read = qemu_get_buffer_at(f, host, size,
|
||||
block->pages_offset + offset);
|
||||
}
|
||||
offset += read;
|
||||
unread -= read;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int parse_ramblock_fixed_ram(QEMUFile *f, RAMBlock *block,
|
||||
ram_addr_t length, Error **errp)
|
||||
{
|
||||
g_autofree unsigned long *bitmap = NULL;
|
||||
FixedRamHeader header;
|
||||
size_t bitmap_size;
|
||||
long num_pages;
|
||||
|
||||
if (!fixed_ram_read_header(f, &header, errp)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
block->pages_offset = header.pages_offset;
|
||||
|
||||
/*
|
||||
* Check the alignment of the file region that contains pages. We
|
||||
* don't enforce FIXED_RAM_FILE_OFFSET_ALIGNMENT to allow that
|
||||
* value to change in the future. Do only a sanity check with page
|
||||
* size alignment.
|
||||
*/
|
||||
if (!QEMU_IS_ALIGNED(block->pages_offset, TARGET_PAGE_SIZE)) {
|
||||
error_setg(errp,
|
||||
"Error reading ramblock %s pages, region has bad alignment",
|
||||
block->idstr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
num_pages = length / header.page_size;
|
||||
bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long);
|
||||
|
||||
bitmap = g_malloc0(bitmap_size);
|
||||
if (qemu_get_buffer_at(f, (uint8_t *)bitmap, bitmap_size,
|
||||
header.bitmap_offset) != bitmap_size) {
|
||||
error_setg(errp, "Error reading dirty bitmap");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
read_ramblock_fixed_ram(f, block, num_pages, bitmap);
|
||||
|
||||
/* Skip pages array */
|
||||
qemu_set_offset(f, block->pages_offset + length, SEEK_SET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length)
|
||||
{
|
||||
int ret = 0;
|
||||
@@ -3797,6 +4051,16 @@ static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length)
|
||||
|
||||
assert(block);
|
||||
|
||||
if (migrate_fixed_ram()) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
ret = parse_ramblock_fixed_ram(f, block, length, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!qemu_ram_is_migratable(block)) {
|
||||
error_report("block %s should not be migrated !", block->idstr);
|
||||
return -EINVAL;
|
||||
|
||||
@@ -75,6 +75,7 @@ bool ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb, Error **errp);
|
||||
bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start);
|
||||
void postcopy_preempt_shutdown_file(MigrationState *s);
|
||||
void *postcopy_preempt_thread(void *opaque);
|
||||
void ramblock_set_shadow_bmap_atomic(RAMBlock *block, ram_addr_t offset);
|
||||
|
||||
/* ram cache */
|
||||
int colo_init_ram_cache(void);
|
||||
|
||||
@@ -245,6 +245,7 @@ static bool should_validate_capability(int capability)
|
||||
/* Validate only new capabilities to keep compatibility. */
|
||||
switch (capability) {
|
||||
case MIGRATION_CAPABILITY_X_IGNORE_SHARED:
|
||||
case MIGRATION_CAPABILITY_FIXED_RAM:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
||||
@@ -173,9 +173,9 @@ static void monitor_fdset_cleanup(MonFdset *mon_fdset)
|
||||
MonFdsetFd *mon_fdset_fd_next;
|
||||
|
||||
QLIST_FOREACH_SAFE(mon_fdset_fd, &mon_fdset->fds, next, mon_fdset_fd_next) {
|
||||
if ((mon_fdset_fd->removed ||
|
||||
(QLIST_EMPTY(&mon_fdset->dup_fds) && mon_refcount == 0)) &&
|
||||
runstate_is_running()) {
|
||||
if (mon_fdset_fd->removed ||
|
||||
(QLIST_EMPTY(&mon_fdset->dup_fds) && mon_refcount == 0 &&
|
||||
runstate_is_running())) {
|
||||
close(mon_fdset_fd->fd);
|
||||
g_free(mon_fdset_fd->opaque);
|
||||
QLIST_REMOVE(mon_fdset_fd, next);
|
||||
@@ -406,6 +406,25 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
|
||||
return fdinfo;
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
static bool monitor_fdset_flags_match(int flags, int fd_flags)
|
||||
{
|
||||
bool match = false;
|
||||
|
||||
if ((flags & O_ACCMODE) == (fd_flags & O_ACCMODE)) {
|
||||
match = true;
|
||||
|
||||
#ifdef O_DIRECT
|
||||
if ((flags & O_DIRECT) != (fd_flags & O_DIRECT)) {
|
||||
match = false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return match;
|
||||
}
|
||||
#endif
|
||||
|
||||
int monitor_fdset_dup_fd_add(int64_t fdset_id, int flags)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
@@ -431,7 +450,7 @@ int monitor_fdset_dup_fd_add(int64_t fdset_id, int flags)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((flags & O_ACCMODE) == (mon_fd_flags & O_ACCMODE)) {
|
||||
if (monitor_fdset_flags_match(flags, mon_fd_flags)) {
|
||||
fd = mon_fdset_fd->fd;
|
||||
break;
|
||||
}
|
||||
|
||||
Binary file not shown.
@@ -531,6 +531,10 @@
|
||||
# and can result in more stable read performance. Requires KVM
|
||||
# with accelerator property "dirty-ring-size" set. (Since 8.1)
|
||||
#
|
||||
# @fixed-ram: Migrate using fixed offsets for each RAM page. Requires
|
||||
# a migration URI that supports seeking, such as a file. (since
|
||||
# 8.2)
|
||||
#
|
||||
# Features:
|
||||
#
|
||||
# @deprecated: Member @block is deprecated. Use blockdev-mirror with
|
||||
@@ -555,7 +559,7 @@
|
||||
{ 'name': 'x-ignore-shared', 'features': [ 'unstable' ] },
|
||||
'validate-uuid', 'background-snapshot',
|
||||
'zero-copy-send', 'postcopy-preempt', 'switchover-ack',
|
||||
'dirty-limit'] }
|
||||
'dirty-limit', 'fixed-ram'] }
|
||||
|
||||
##
|
||||
# @MigrationCapabilityStatus:
|
||||
@@ -874,6 +878,9 @@
|
||||
# @mode: Migration mode. See description in @MigMode. Default is 'normal'.
|
||||
# (Since 8.2)
|
||||
#
|
||||
# @direct-io: Open migration files with O_DIRECT when possible. This
|
||||
# requires that the 'fixed-ram' capability is enabled. (since 9.0)
|
||||
#
|
||||
# Features:
|
||||
#
|
||||
# @deprecated: Member @block-incremental is deprecated. Use
|
||||
@@ -907,7 +914,8 @@
|
||||
'block-bitmap-mapping',
|
||||
{ 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] },
|
||||
'vcpu-dirty-limit',
|
||||
'mode'] }
|
||||
'mode',
|
||||
'direct-io'] }
|
||||
|
||||
##
|
||||
# @MigrateSetParameters:
|
||||
@@ -1062,6 +1070,9 @@
|
||||
# @mode: Migration mode. See description in @MigMode. Default is 'normal'.
|
||||
# (Since 8.2)
|
||||
#
|
||||
# @direct-io: Open migration files with O_DIRECT when possible. Not
|
||||
# all migration transports support this. (since 8.1)
|
||||
#
|
||||
# Features:
|
||||
#
|
||||
# @deprecated: Member @block-incremental is deprecated. Use
|
||||
@@ -1115,7 +1126,8 @@
|
||||
'*x-vcpu-dirty-limit-period': { 'type': 'uint64',
|
||||
'features': [ 'unstable' ] },
|
||||
'*vcpu-dirty-limit': 'uint64',
|
||||
'*mode': 'MigMode'} }
|
||||
'*mode': 'MigMode',
|
||||
'*direct-io': 'bool' } }
|
||||
|
||||
##
|
||||
# @migrate-set-parameters:
|
||||
@@ -1290,6 +1302,9 @@
|
||||
# @mode: Migration mode. See description in @MigMode. Default is 'normal'.
|
||||
# (Since 8.2)
|
||||
#
|
||||
# @direct-io: Open migration files with O_DIRECT when possible. Not
|
||||
# all migration transports support this. (since 8.1)
|
||||
#
|
||||
# Features:
|
||||
#
|
||||
# @deprecated: Member @block-incremental is deprecated. Use
|
||||
@@ -1340,7 +1355,8 @@
|
||||
'*x-vcpu-dirty-limit-period': { 'type': 'uint64',
|
||||
'features': [ 'unstable' ] },
|
||||
'*vcpu-dirty-limit': 'uint64',
|
||||
'*mode': 'MigMode'} }
|
||||
'*mode': 'MigMode',
|
||||
'*direct-io': 'bool' } }
|
||||
|
||||
##
|
||||
# @query-migrate-parameters:
|
||||
|
||||
Submodule roms/seabios-hppa updated: fd5b6cf823...2a23dd388f
@@ -76,7 +76,8 @@ class QAPISchemaEntity:
|
||||
def __repr__(self):
|
||||
if self.name is None:
|
||||
return "<%s at 0x%x>" % (type(self).__name__, id(self))
|
||||
return "<%s:%s at 0x%x>" % type(self).__name__, self.name, id(self)
|
||||
return "<%s:%s at 0x%x>" % (type(self).__name__, self.name,
|
||||
id(self))
|
||||
|
||||
def c_name(self):
|
||||
return c_name(self.name)
|
||||
|
||||
@@ -179,8 +179,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
*/
|
||||
if (!cpu->neg.can_do_io) {
|
||||
/* Force execution of one insn next time. */
|
||||
cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ
|
||||
| curr_cflags(cpu);
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
|
||||
cpu_loop_exit_restore(cpu, ra);
|
||||
}
|
||||
/*
|
||||
@@ -212,8 +211,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
cpu_loop_exit(cpu);
|
||||
} else {
|
||||
/* Force execution of one insn next time. */
|
||||
cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ
|
||||
| curr_cflags(cpu);
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
|
||||
mmap_unlock();
|
||||
cpu_loop_exit_noexc(cpu);
|
||||
}
|
||||
|
||||
@@ -351,6 +351,7 @@ static void cortex_a8_initfn(Object *obj)
|
||||
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
|
||||
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
||||
set_feature(&cpu->env, ARM_FEATURE_EL3);
|
||||
set_feature(&cpu->env, ARM_FEATURE_PMU);
|
||||
cpu->midr = 0x410fc080;
|
||||
cpu->reset_fpsid = 0x410330c0;
|
||||
cpu->isar.mvfr0 = 0x11110222;
|
||||
@@ -418,6 +419,7 @@ static void cortex_a9_initfn(Object *obj)
|
||||
set_feature(&cpu->env, ARM_FEATURE_NEON);
|
||||
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
|
||||
set_feature(&cpu->env, ARM_FEATURE_EL3);
|
||||
set_feature(&cpu->env, ARM_FEATURE_PMU);
|
||||
/*
|
||||
* Note that A9 supports the MP extensions even for
|
||||
* A9UP and single-core A9MP (which are both different
|
||||
|
||||
@@ -1101,10 +1101,18 @@ uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||
uint32_t n;
|
||||
|
||||
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
|
||||
/* True probe; this will never fault */
|
||||
/*
|
||||
* True probe; this will never fault. Note that our caller passes
|
||||
* us a pointer to the end of the region, but allocation_tag_mem_probe()
|
||||
* wants a pointer to the start. Because we know we don't span a page
|
||||
* boundary and that allocation_tag_mem_probe() doesn't otherwise care
|
||||
* about the size, pass in a size of 1 byte. This is simpler than
|
||||
* adjusting the ptr to point to the start of the region and then having
|
||||
* to adjust the returned 'mem' to get the end of the tag memory.
|
||||
*/
|
||||
mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
|
||||
w ? MMU_DATA_STORE : MMU_DATA_LOAD,
|
||||
size, MMU_DATA_LOAD, true, 0);
|
||||
1, MMU_DATA_LOAD, true, 0);
|
||||
if (!mem) {
|
||||
return size;
|
||||
}
|
||||
|
||||
@@ -2351,6 +2351,8 @@ static bool trans_SVC(DisasContext *s, arg_i *a)
|
||||
|
||||
static bool trans_HVC(DisasContext *s, arg_i *a)
|
||||
{
|
||||
int target_el = s->current_el == 3 ? 3 : 2;
|
||||
|
||||
if (s->current_el == 0) {
|
||||
unallocated_encoding(s);
|
||||
return true;
|
||||
@@ -2363,7 +2365,7 @@ static bool trans_HVC(DisasContext *s, arg_i *a)
|
||||
gen_helper_pre_hvc(tcg_env);
|
||||
/* Architecture requires ss advance before we do the actual work */
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), 2);
|
||||
gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), target_el);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
# define TARGET_PHYS_ADDR_SPACE_BITS 32
|
||||
# define TARGET_VIRT_ADDR_SPACE_BITS 32
|
||||
#else
|
||||
# define TARGET_PHYS_ADDR_SPACE_BITS 64
|
||||
/* ??? PA-8000 through 8600 have 40 bits; PA-8700 and 8900 have 44 bits. */
|
||||
# define TARGET_PHYS_ADDR_SPACE_BITS 40
|
||||
# define TARGET_VIRT_ADDR_SPACE_BITS 64
|
||||
#endif
|
||||
|
||||
|
||||
@@ -31,23 +31,25 @@
|
||||
basis. It's probably easier to fall back to a strong memory model. */
|
||||
#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
|
||||
|
||||
#define MMU_KERNEL_IDX 7
|
||||
#define MMU_KERNEL_P_IDX 8
|
||||
#define MMU_PL1_IDX 9
|
||||
#define MMU_PL1_P_IDX 10
|
||||
#define MMU_PL2_IDX 11
|
||||
#define MMU_PL2_P_IDX 12
|
||||
#define MMU_USER_IDX 13
|
||||
#define MMU_USER_P_IDX 14
|
||||
#define MMU_PHYS_IDX 15
|
||||
#define MMU_ABS_W_IDX 6
|
||||
#define MMU_ABS_IDX 7
|
||||
#define MMU_KERNEL_IDX 8
|
||||
#define MMU_KERNEL_P_IDX 9
|
||||
#define MMU_PL1_IDX 10
|
||||
#define MMU_PL1_P_IDX 11
|
||||
#define MMU_PL2_IDX 12
|
||||
#define MMU_PL2_P_IDX 13
|
||||
#define MMU_USER_IDX 14
|
||||
#define MMU_USER_P_IDX 15
|
||||
|
||||
#define MMU_IDX_MMU_DISABLED(MIDX) ((MIDX) < MMU_KERNEL_IDX)
|
||||
#define MMU_IDX_TO_PRIV(MIDX) (((MIDX) - MMU_KERNEL_IDX) / 2)
|
||||
#define MMU_IDX_TO_P(MIDX) (((MIDX) - MMU_KERNEL_IDX) & 1)
|
||||
#define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P) + MMU_KERNEL_IDX)
|
||||
|
||||
#define TARGET_INSN_START_EXTRA_WORDS 2
|
||||
|
||||
/* No need to flush MMU_PHYS_IDX */
|
||||
/* No need to flush MMU_ABS*_IDX */
|
||||
#define HPPA_MMU_FLUSH_MASK \
|
||||
(1 << MMU_KERNEL_IDX | 1 << MMU_KERNEL_P_IDX | \
|
||||
1 << MMU_PL1_IDX | 1 << MMU_PL1_P_IDX | \
|
||||
@@ -287,7 +289,8 @@ static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
|
||||
if (env->psw & (ifetch ? PSW_C : PSW_D)) {
|
||||
return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P);
|
||||
}
|
||||
return MMU_PHYS_IDX; /* mmu disabled */
|
||||
/* mmu disabled */
|
||||
return env->psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@@ -126,7 +126,7 @@ void hppa_cpu_do_interrupt(CPUState *cs)
|
||||
env->cr[CR_IIASQ] =
|
||||
hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32;
|
||||
env->cr_back[0] =
|
||||
hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32;
|
||||
hppa_form_gva_psw(old_psw, env->iasq_b, env->iaoq_b) >> 32;
|
||||
} else {
|
||||
env->cr[CR_IIASQ] = 0;
|
||||
env->cr_back[0] = 0;
|
||||
|
||||
@@ -27,41 +27,39 @@
|
||||
|
||||
hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
|
||||
{
|
||||
if (likely(extract64(addr, 58, 4) != 0xf)) {
|
||||
/* Memory address space */
|
||||
return addr & MAKE_64BIT_MASK(0, 62);
|
||||
}
|
||||
if (extract64(addr, 54, 4) != 0) {
|
||||
/* I/O address space */
|
||||
return addr | MAKE_64BIT_MASK(62, 2);
|
||||
}
|
||||
/* PDC address space */
|
||||
return (addr & MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4);
|
||||
/*
|
||||
* Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
|
||||
* an algorithm in which a 62-bit absolute address is transformed to
|
||||
* a 64-bit physical address. This must then be combined with that
|
||||
* pictured in Figure H-11 "Physical Address Space Mapping", in which
|
||||
* the full physical address is truncated to the N-bit physical address
|
||||
* supported by the implementation.
|
||||
*
|
||||
* Since the supported physical address space is below 54 bits, the
|
||||
* H-8 algorithm is moot and all that is left is to truncate.
|
||||
*/
|
||||
QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
|
||||
return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
|
||||
}
|
||||
|
||||
hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
|
||||
{
|
||||
/*
|
||||
* See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
|
||||
* combined with Figure H-11, as above.
|
||||
*/
|
||||
if (likely(extract32(addr, 28, 4) != 0xf)) {
|
||||
/* Memory address space */
|
||||
return addr & MAKE_64BIT_MASK(0, 32);
|
||||
}
|
||||
if (extract32(addr, 24, 4) != 0) {
|
||||
addr = (uint32_t)addr;
|
||||
} else if (extract32(addr, 24, 4) != 0) {
|
||||
/* I/O address space */
|
||||
return addr | MAKE_64BIT_MASK(32, 32);
|
||||
}
|
||||
/* PDC address space */
|
||||
return (addr & MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4);
|
||||
}
|
||||
|
||||
static hwaddr hppa_abs_to_phys(CPUHPPAState *env, vaddr addr)
|
||||
{
|
||||
if (!hppa_is_pa20(env)) {
|
||||
return addr;
|
||||
} else if (env->psw & PSW_W) {
|
||||
return hppa_abs_to_phys_pa2_w1(addr);
|
||||
addr = (int32_t)addr;
|
||||
} else {
|
||||
return hppa_abs_to_phys_pa2_w0(addr);
|
||||
/* PDC address space */
|
||||
addr &= MAKE_64BIT_MASK(0, 24);
|
||||
addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
|
||||
@@ -161,9 +159,22 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||
*tlb_entry = NULL;
|
||||
}
|
||||
|
||||
/* Virtual translation disabled. Direct map virtual to physical. */
|
||||
if (mmu_idx == MMU_PHYS_IDX) {
|
||||
phys = addr;
|
||||
/* Virtual translation disabled. Map absolute to physical. */
|
||||
if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
|
||||
switch (mmu_idx) {
|
||||
case MMU_ABS_W_IDX:
|
||||
phys = hppa_abs_to_phys_pa2_w1(addr);
|
||||
break;
|
||||
case MMU_ABS_IDX:
|
||||
if (hppa_is_pa20(env)) {
|
||||
phys = hppa_abs_to_phys_pa2_w0(addr);
|
||||
} else {
|
||||
phys = (uint32_t)addr;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
goto egress;
|
||||
}
|
||||
@@ -261,7 +272,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||
}
|
||||
|
||||
egress:
|
||||
*pphys = phys = hppa_abs_to_phys(env, phys);
|
||||
*pphys = phys;
|
||||
*pprot = prot;
|
||||
trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
|
||||
return ret;
|
||||
@@ -271,16 +282,15 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
{
|
||||
HPPACPU *cpu = HPPA_CPU(cs);
|
||||
hwaddr phys;
|
||||
int prot, excp;
|
||||
int prot, excp, mmu_idx;
|
||||
|
||||
/* If the (data) mmu is disabled, bypass translation. */
|
||||
/* ??? We really ought to know if the code mmu is disabled too,
|
||||
in order to get the correct debugging dumps. */
|
||||
if (!(cpu->env.psw & PSW_D)) {
|
||||
return hppa_abs_to_phys(&cpu->env, addr);
|
||||
}
|
||||
mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
|
||||
cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
|
||||
|
||||
excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
|
||||
excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
|
||||
&phys, &prot, NULL);
|
||||
|
||||
/* Since we're translating for debugging, the only error that is a
|
||||
@@ -367,8 +377,8 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
||||
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
|
||||
|
||||
/* Failure. Raise the indicated exception. */
|
||||
raise_exception_with_ior(env, excp, retaddr,
|
||||
addr, mmu_idx == MMU_PHYS_IDX);
|
||||
raise_exception_with_ior(env, excp, retaddr, addr,
|
||||
MMU_IDX_MMU_DISABLED(mmu_idx));
|
||||
}
|
||||
|
||||
trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
|
||||
@@ -450,7 +460,7 @@ static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
|
||||
int mask_shift;
|
||||
|
||||
mask_shift = 2 * (r1 & 0xf);
|
||||
va_size = TARGET_PAGE_SIZE << mask_shift;
|
||||
va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
|
||||
va_b &= -va_size;
|
||||
va_e = va_b + va_size - 1;
|
||||
|
||||
@@ -459,7 +469,14 @@ static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
|
||||
|
||||
ent->itree.start = va_b;
|
||||
ent->itree.last = va_e;
|
||||
ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift);
|
||||
|
||||
/* Extract all 52 bits present in the page table entry. */
|
||||
ent->pa = r1 << (TARGET_PAGE_BITS - 5);
|
||||
/* Align per the page size. */
|
||||
ent->pa &= TARGET_PAGE_MASK << mask_shift;
|
||||
/* Ignore the bits beyond physical address space. */
|
||||
ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
|
||||
|
||||
ent->t = extract64(r2, 61, 1);
|
||||
ent->d = extract64(r2, 60, 1);
|
||||
ent->b = extract64(r2, 59, 1);
|
||||
@@ -505,7 +522,7 @@ static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
|
||||
*/
|
||||
end = start & 0xf;
|
||||
start &= TARGET_PAGE_MASK;
|
||||
end = TARGET_PAGE_SIZE << (2 * end);
|
||||
end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
|
||||
end = start + end - 1;
|
||||
|
||||
hppa_flush_tlb_range(env, start, end);
|
||||
|
||||
@@ -338,7 +338,7 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return page_check_range(addr, 1, want);
|
||||
#else
|
||||
int prot, excp;
|
||||
int prot, excp, mmu_idx;
|
||||
hwaddr phys;
|
||||
|
||||
trace_hppa_tlb_probe(addr, level, want);
|
||||
@@ -347,7 +347,8 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
excp = hppa_get_physical_address(env, addr, level, 0, &phys,
|
||||
mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
|
||||
excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys,
|
||||
&prot, NULL);
|
||||
if (excp >= 0) {
|
||||
if (env->psw & PSW_Q) {
|
||||
|
||||
@@ -69,19 +69,24 @@ typedef struct DisasContext {
|
||||
} DisasContext;
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#define UNALIGN(C) (C)->unalign
|
||||
#define UNALIGN(C) (C)->unalign
|
||||
#define MMU_DISABLED(C) false
|
||||
#else
|
||||
#define UNALIGN(C) MO_ALIGN
|
||||
#define UNALIGN(C) MO_ALIGN
|
||||
#define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx)
|
||||
#endif
|
||||
|
||||
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
|
||||
static int expand_sm_imm(DisasContext *ctx, int val)
|
||||
{
|
||||
if (val & PSW_SM_E) {
|
||||
val = (val & ~PSW_SM_E) | PSW_E;
|
||||
}
|
||||
if (val & PSW_SM_W) {
|
||||
val = (val & ~PSW_SM_W) | PSW_W;
|
||||
/* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
|
||||
if (ctx->is_pa20) {
|
||||
if (val & PSW_SM_W) {
|
||||
val |= PSW_W;
|
||||
}
|
||||
val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
|
||||
} else {
|
||||
val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
@@ -1372,7 +1377,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
|
||||
assert(ctx->null_cond.c == TCG_COND_NEVER);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
|
||||
if (modify) {
|
||||
save_gpr(ctx, rb, ofs);
|
||||
@@ -1390,7 +1395,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
|
||||
assert(ctx->null_cond.c == TCG_COND_NEVER);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
|
||||
if (modify) {
|
||||
save_gpr(ctx, rb, ofs);
|
||||
@@ -1408,7 +1413,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
|
||||
assert(ctx->null_cond.c == TCG_COND_NEVER);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
|
||||
if (modify) {
|
||||
save_gpr(ctx, rb, ofs);
|
||||
@@ -1426,7 +1431,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
|
||||
assert(ctx->null_cond.c == TCG_COND_NEVER);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
|
||||
if (modify) {
|
||||
save_gpr(ctx, rb, ofs);
|
||||
@@ -2294,7 +2299,7 @@ static bool trans_probe(DisasContext *ctx, arg_probe *a)
|
||||
form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
|
||||
|
||||
if (a->imm) {
|
||||
level = tcg_constant_i32(a->ri);
|
||||
level = tcg_constant_i32(a->ri & 3);
|
||||
} else {
|
||||
level = tcg_temp_new_i32();
|
||||
tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
|
||||
@@ -3075,7 +3080,7 @@ static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
|
||||
}
|
||||
|
||||
form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
|
||||
a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
a->disp, a->sp, a->m, MMU_DISABLED(ctx));
|
||||
|
||||
/*
|
||||
* For hppa1.1, LDCW is undefined unless aligned mod 16.
|
||||
@@ -3105,7 +3110,7 @@ static bool trans_stby(DisasContext *ctx, arg_stby *a)
|
||||
nullify_over(ctx);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
val = load_gpr(ctx, a->r);
|
||||
if (a->a) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
|
||||
@@ -3139,7 +3144,7 @@ static bool trans_stdby(DisasContext *ctx, arg_stby *a)
|
||||
nullify_over(ctx);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
val = load_gpr(ctx, a->r);
|
||||
if (a->a) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
|
||||
@@ -3167,7 +3172,7 @@ static bool trans_lda(DisasContext *ctx, arg_ldst *a)
|
||||
int hold_mmu_idx = ctx->mmu_idx;
|
||||
|
||||
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
|
||||
ctx->mmu_idx = MMU_PHYS_IDX;
|
||||
ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
|
||||
trans_ld(ctx, a);
|
||||
ctx->mmu_idx = hold_mmu_idx;
|
||||
return true;
|
||||
@@ -3178,7 +3183,7 @@ static bool trans_sta(DisasContext *ctx, arg_ldst *a)
|
||||
int hold_mmu_idx = ctx->mmu_idx;
|
||||
|
||||
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
|
||||
ctx->mmu_idx = MMU_PHYS_IDX;
|
||||
ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
|
||||
trans_st(ctx, a);
|
||||
ctx->mmu_idx = hold_mmu_idx;
|
||||
return true;
|
||||
@@ -4430,7 +4435,7 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||
ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
|
||||
ctx->mmu_idx = (ctx->tb_flags & PSW_D
|
||||
? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
|
||||
: MMU_PHYS_IDX);
|
||||
: ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
|
||||
|
||||
/* Recover the IAOQ values from the GVA + PRIV. */
|
||||
uint64_t cs_base = ctx->base.tb->cs_base;
|
||||
|
||||
@@ -433,6 +433,22 @@ static int arch_sections_write(DumpState *s, uint8_t *buff)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void arch_cleanup(DumpState *s)
|
||||
{
|
||||
g_autofree uint8_t *buff = NULL;
|
||||
int rc;
|
||||
|
||||
if (!pv_dump_initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
buff = g_malloc(kvm_s390_pv_dmp_get_size_completion_data());
|
||||
rc = kvm_s390_dump_completion_data(buff);
|
||||
if (!rc) {
|
||||
pv_dump_initialized = false;
|
||||
}
|
||||
}
|
||||
|
||||
int cpu_get_dump_info(ArchDumpInfo *info,
|
||||
const struct GuestPhysBlockList *guest_phys_blocks)
|
||||
{
|
||||
@@ -448,10 +464,7 @@ int cpu_get_dump_info(ArchDumpInfo *info,
|
||||
info->arch_sections_add_fn = *arch_sections_add;
|
||||
info->arch_sections_write_hdr_fn = *arch_sections_write_hdr;
|
||||
info->arch_sections_write_fn = *arch_sections_write;
|
||||
} else {
|
||||
info->arch_sections_add_fn = NULL;
|
||||
info->arch_sections_write_hdr_fn = NULL;
|
||||
info->arch_sections_write_fn = NULL;
|
||||
info->arch_cleanup_fn = *arch_cleanup;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -4096,12 +4096,12 @@ TRANS(RETT, 32, do_add_special, a, do_rett)
|
||||
static bool do_return(DisasContext *dc, int rd, TCGv src)
|
||||
{
|
||||
gen_check_align(dc, src, 3);
|
||||
gen_helper_restore(tcg_env);
|
||||
|
||||
gen_mov_pc_npc(dc);
|
||||
tcg_gen_mov_tl(cpu_npc, src);
|
||||
gen_address_mask(dc, cpu_npc);
|
||||
|
||||
gen_helper_restore(tcg_env);
|
||||
dc->npc = DYNAMIC_PC_LOOKUP;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -36,8 +36,8 @@ class S390CCWVirtioMachine(QemuSystemTest):
|
||||
dmesg_clear_count = 1
|
||||
def clear_guest_dmesg(self):
|
||||
exec_command_and_wait_for_pattern(self, 'dmesg -c > /dev/null; '
|
||||
'echo dm_clear\ ' + str(self.dmesg_clear_count),
|
||||
'dm_clear ' + str(self.dmesg_clear_count))
|
||||
r'echo dm_clear\ ' + str(self.dmesg_clear_count),
|
||||
r'dm_clear ' + str(self.dmesg_clear_count))
|
||||
self.dmesg_clear_count += 1
|
||||
|
||||
def test_s390x_devices(self):
|
||||
@@ -121,15 +121,15 @@ class S390CCWVirtioMachine(QemuSystemTest):
|
||||
'cat /sys/bus/ccw/devices/0.1.1111/cutype',
|
||||
'3832/01')
|
||||
exec_command_and_wait_for_pattern(self,
|
||||
'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor',
|
||||
'0x1af4')
|
||||
r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor',
|
||||
r'0x1af4')
|
||||
exec_command_and_wait_for_pattern(self,
|
||||
'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device',
|
||||
'0x0001')
|
||||
r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device',
|
||||
r'0x0001')
|
||||
# check fid propagation
|
||||
exec_command_and_wait_for_pattern(self,
|
||||
'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id',
|
||||
'0x0000000c')
|
||||
r'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id',
|
||||
r'0x0000000c')
|
||||
# add another device
|
||||
self.clear_guest_dmesg()
|
||||
self.vm.cmd('device_add', driver='virtio-net-ccw',
|
||||
@@ -235,7 +235,7 @@ class S390CCWVirtioMachine(QemuSystemTest):
|
||||
'while ! (dmesg | grep gpudrmfb) ; do sleep 1 ; done',
|
||||
'virtio_gpudrmfb frame buffer device')
|
||||
exec_command_and_wait_for_pattern(self,
|
||||
'echo -e "\e[?25l" > /dev/tty0', ':/#')
|
||||
r'echo -e "\e[?25l" > /dev/tty0', ':/#')
|
||||
exec_command_and_wait_for_pattern(self, 'for ((i=0;i<250;i++)); do '
|
||||
'echo " The qu ick fo x j ump s o ver a laz y d og" >> fox.txt;'
|
||||
'done',
|
||||
|
||||
356
tests/avocado/mem-addr-space-check.py
Normal file
356
tests/avocado/mem-addr-space-check.py
Normal file
@@ -0,0 +1,356 @@
|
||||
# Check for crash when using memory beyond the available guest processor
|
||||
# address space.
|
||||
#
|
||||
# Copyright (c) 2023 Red Hat, Inc.
|
||||
#
|
||||
# Author:
|
||||
# Ani Sinha <anisinha@redhat.com>
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
from avocado_qemu import QemuSystemTest
|
||||
import signal
|
||||
import time
|
||||
|
||||
class MemAddrCheck(QemuSystemTest):
|
||||
# after launch, in order to generate the logs from QEMU we need to
|
||||
# wait for some time. Launching and then immediately shutting down
|
||||
# the VM generates empty logs. A delay of 1 second is added for
|
||||
# this reason.
|
||||
DELAY_Q35_BOOT_SEQUENCE = 1
|
||||
|
||||
# first, lets test some 32-bit processors.
|
||||
# for all 32-bit cases, pci64_hole_size is 0.
|
||||
def test_phybits_low_pse36(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
With pse36 feature ON, a processor has 36 bits of addressing. So it can
|
||||
access up to a maximum of 64GiB of memory. Memory hotplug region begins
|
||||
at 4 GiB boundary when "above_4g_mem_size" is 0 (this would be true when
|
||||
we have 0.5 GiB of VM memory, see pc_q35_init()). This means total
|
||||
hotpluggable memory size is 60 GiB. Per slot, we reserve 1 GiB of memory
|
||||
for dimm alignment for all newer machines (see enforce_aligned_dimm
|
||||
property for pc machines and pc_get_device_memory_range()). That leaves
|
||||
total hotpluggable actual memory size of 59 GiB. If the VM is started
|
||||
with 0.5 GiB of memory, maxmem should be set to a maximum value of
|
||||
59.5 GiB to ensure that the processor can address all memory directly.
|
||||
Note that 64-bit pci hole size is 0 in this case. If maxmem is set to
|
||||
59.6G, QEMU should fail to start with a message "phy-bits are too low".
|
||||
If maxmem is set to 59.5G with all other QEMU parameters identical, QEMU
|
||||
should start fine.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=59.6G',
|
||||
'-cpu', 'pentium,pse36=on', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_low_pae(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
With pae feature ON, a processor has 36 bits of addressing. So it can
|
||||
access up to a maximum of 64GiB of memory. Rest is the same as the case
|
||||
with pse36 above.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=59.6G',
|
||||
'-cpu', 'pentium,pae=on', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_pentium_pse36(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Setting maxmem to 59.5G and making sure that QEMU can start with the
|
||||
same options as the failing case above with pse36 cpu feature.
|
||||
"""
|
||||
self.vm.add_args('-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=59.5G',
|
||||
'-cpu', 'pentium,pse36=on', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_pentium_pae(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Test is same as above but now with pae cpu feature turned on.
|
||||
Setting maxmem to 59.5G and making sure that QEMU can start fine
|
||||
with the same options as the case above.
|
||||
"""
|
||||
self.vm.add_args('-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=59.5G',
|
||||
'-cpu', 'pentium,pae=on', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_pentium2(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Pentium2 has 36 bits of addressing, so its same as pentium
|
||||
with pse36 ON.
|
||||
"""
|
||||
self.vm.add_args('-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=59.5G',
|
||||
'-cpu', 'pentium2', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_low_nonpse36(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Pentium processor has 32 bits of addressing without pse36 or pae
|
||||
so it can access physical address up to 4 GiB. Setting maxmem to
|
||||
4 GiB should make QEMU fail to start with "phys-bits too low"
|
||||
message because the region for memory hotplug is always placed
|
||||
above 4 GiB due to the PCI hole and simplicity.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=4G',
|
||||
'-cpu', 'pentium', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
# now lets test some 64-bit CPU cases.
|
||||
def test_phybits_low_tcg_q35_70_amd(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
For q35 7.1 machines and above, there is a HT window that starts at
|
||||
1024 GiB and ends at 1 TiB - 1. If the max GPA falls in this range,
|
||||
"above_4G" memory is adjusted to start at 1 TiB boundary for AMD cpus
|
||||
in the default case. Lets test without that case for machines 7.0.
|
||||
For q35-7.0 machines, "above 4G" memory starts are 4G.
|
||||
pci64_hole size is 32 GiB. Since TCG_PHYS_ADDR_BITS is defined to
|
||||
be 40, TCG emulated CPUs have maximum of 1 TiB (1024 GiB) of
|
||||
directly addressible memory.
|
||||
Hence, maxmem value at most can be
|
||||
1024 GiB - 4 GiB - 1 GiB per slot for alignment - 32 GiB + 0.5 GiB
|
||||
which is equal to 987.5 GiB. Setting the value to 988 GiB should
|
||||
make QEMU fail with the error message.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m',
|
||||
'512,slots=1,maxmem=988G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_low_tcg_q35_71_amd(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
AMD_HT_START is defined to be at 1012 GiB. So for q35 machines
|
||||
version > 7.0 and AMD cpus, instead of 1024 GiB limit for 40 bit
|
||||
processor address space, it has to be 1012 GiB , that is 12 GiB
|
||||
less than the case above in order to accomodate HT hole.
|
||||
Make sure QEMU fails when maxmem size is 976 GiB (12 GiB less
|
||||
than 988 GiB).
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m',
|
||||
'512,slots=1,maxmem=976G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_tcg_q35_70_amd(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Same as q35-7.0 AMD case except that here we check that QEMU can
|
||||
successfully start when maxmem is < 988G.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m',
|
||||
'512,slots=1,maxmem=987.5G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_tcg_q35_71_amd(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Same as q35-7.1 AMD case except that here we check that QEMU can
|
||||
successfully start when maxmem is < 976G.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m',
|
||||
'512,slots=1,maxmem=975.5G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_tcg_q35_71_intel(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Same parameters as test_phybits_low_tcg_q35_71_amd() but use
|
||||
Intel cpu instead. QEMU should start fine in this case as
|
||||
"above_4G" memory starts at 4G.
|
||||
"""
|
||||
self.vm.add_args('-S', '-cpu', 'Skylake-Server',
|
||||
'-machine', 'pc-q35-7.1', '-m',
|
||||
'512,slots=1,maxmem=976G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_low_tcg_q35_71_amd_41bits(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
AMD processor with 41 bits. Max cpu hw address = 2 TiB.
|
||||
By setting maxram above 1012 GiB - 32 GiB - 4 GiB = 976 GiB, we can
|
||||
force "above_4G" memory to start at 1 TiB for q35-7.1 machines
|
||||
(max GPA will be above AMD_HT_START which is defined as 1012 GiB).
|
||||
|
||||
With pci_64_hole size at 32 GiB, in this case, maxmem should be 991.5
|
||||
GiB with 1 GiB per slot for alignment and 0.5 GiB as non-hotplug
|
||||
memory for the VM (1024 - 32 - 1 + 0.5). With 992 GiB, QEMU should
|
||||
fail to start.
|
||||
"""
|
||||
self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
|
||||
'-machine', 'pc-q35-7.1', '-m',
|
||||
'512,slots=1,maxmem=992G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_tcg_q35_71_amd_41bits(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
AMD processor with 41 bits. Max cpu hw address = 2 TiB.
|
||||
Same as above but by setting maxram beween 976 GiB and 992 Gib,
|
||||
QEMU should start fine.
|
||||
"""
|
||||
self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
|
||||
'-machine', 'pc-q35-7.1', '-m',
|
||||
'512,slots=1,maxmem=990G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_low_tcg_q35_intel_cxl(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
cxl memory window starts after memory device range. Here, we use 1 GiB
|
||||
of cxl window memory. 4G_mem end aligns at 4G. pci64_hole is 32 GiB and
|
||||
starts after the cxl memory window.
|
||||
So maxmem here should be at most 986 GiB considering all memory boundary
|
||||
alignment constraints with 40 bits (1 TiB) of processor physical bits.
|
||||
"""
|
||||
self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
|
||||
'-machine', 'q35,cxl=on', '-m',
|
||||
'512,slots=1,maxmem=987G',
|
||||
'-display', 'none',
|
||||
'-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1',
|
||||
'-M', 'cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=1G')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_tcg_q35_intel_cxl(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Same as above but here we do not reserve any cxl memory window. Hence,
|
||||
with the exact same parameters as above, QEMU should start fine even
|
||||
with cxl enabled.
|
||||
"""
|
||||
self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
|
||||
'-machine', 'q35,cxl=on', '-m',
|
||||
'512,slots=1,maxmem=987G',
|
||||
'-display', 'none',
|
||||
'-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
@@ -136,12 +136,11 @@ def test_frontend(fname):
|
||||
def open_test_result(dir_name, file_name, update):
|
||||
mode = 'r+' if update else 'r'
|
||||
try:
|
||||
fp = open(os.path.join(dir_name, file_name), mode)
|
||||
return open(os.path.join(dir_name, file_name), mode, encoding='utf-8')
|
||||
except FileNotFoundError:
|
||||
if not update:
|
||||
raise
|
||||
fp = open(os.path.join(dir_name, file_name), 'w+')
|
||||
return fp
|
||||
return open(os.path.join(dir_name, file_name), 'w+', encoding='utf-8')
|
||||
|
||||
|
||||
def test_and_diff(test_name, dir_name, update):
|
||||
@@ -218,9 +217,9 @@ def main(argv):
|
||||
test_name = os.path.splitext(base_name)[0]
|
||||
status |= test_and_diff(test_name, dir_name, args.update)
|
||||
|
||||
exit(status)
|
||||
sys.exit(status)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
||||
exit(0)
|
||||
sys.exit(0)
|
||||
|
||||
@@ -118,6 +118,12 @@ void migrate_incoming_qmp(QTestState *to, const char *uri, const char *fmt, ...)
|
||||
|
||||
rsp = qtest_qmp(to, "{ 'execute': 'migrate-incoming', 'arguments': %p}",
|
||||
args);
|
||||
|
||||
if (!qdict_haskey(rsp, "return")) {
|
||||
g_autoptr(GString) s = qobject_to_json_pretty(QOBJECT(rsp), true);
|
||||
g_test_message("%s", s->str);
|
||||
}
|
||||
|
||||
g_assert(qdict_haskey(rsp, "return"));
|
||||
qobject_unref(rsp);
|
||||
|
||||
@@ -292,3 +298,77 @@ char *resolve_machine_version(const char *alias, const char *var1,
|
||||
|
||||
return find_common_machine_version(machine_name, var1, var2);
|
||||
}
|
||||
|
||||
#ifdef O_DIRECT
|
||||
/*
|
||||
* Probe for O_DIRECT support on the filesystem. Since this is used
|
||||
* for tests, be conservative, if anything fails, assume it's
|
||||
* unsupported.
|
||||
*/
|
||||
bool probe_o_direct_support(const char *tmpfs)
|
||||
{
|
||||
g_autofree char *filename = g_strdup_printf("%s/probe-o-direct", tmpfs);
|
||||
int fd, flags = O_CREAT | O_RDWR | O_TRUNC | O_DIRECT;
|
||||
void *buf;
|
||||
ssize_t ret, len;
|
||||
uint64_t offset;
|
||||
|
||||
fd = open(filename, flags, 0660);
|
||||
if (fd < 0) {
|
||||
unlink(filename);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Assuming 4k should be enough to satisfy O_DIRECT alignment
|
||||
* requirements. The migration code uses 1M to be conservative.
|
||||
*/
|
||||
len = 0x100000;
|
||||
offset = 0x100000;
|
||||
|
||||
buf = aligned_alloc(len, len);
|
||||
g_assert(buf);
|
||||
|
||||
ret = pwrite(fd, buf, len, offset);
|
||||
unlink(filename);
|
||||
g_free(buf);
|
||||
|
||||
if (ret < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
char *name;
|
||||
void (*func)(void);
|
||||
} MigrationTest;
|
||||
|
||||
static void migration_test_destroy(gpointer data)
|
||||
{
|
||||
MigrationTest *test = (MigrationTest *)data;
|
||||
|
||||
g_free(test->name);
|
||||
g_free(test);
|
||||
}
|
||||
|
||||
static void migration_test_wrapper(const void *data)
|
||||
{
|
||||
MigrationTest *test = (MigrationTest *)data;
|
||||
|
||||
g_test_message("Running /%s%s", qtest_get_arch(), test->name);
|
||||
test->func();
|
||||
}
|
||||
|
||||
void migration_test_add(const char *path, void (*fn)(void))
|
||||
{
|
||||
MigrationTest *test = g_new0(MigrationTest, 1);
|
||||
|
||||
test->func = fn;
|
||||
test->name = g_strdup(path);
|
||||
|
||||
qtest_add_data_func_full(path, test, migration_test_wrapper,
|
||||
migration_test_destroy);
|
||||
}
|
||||
|
||||
@@ -47,4 +47,6 @@ char *find_common_machine_version(const char *mtype, const char *var1,
|
||||
const char *var2);
|
||||
char *resolve_machine_version(const char *alias, const char *var1,
|
||||
const char *var2);
|
||||
bool probe_o_direct_support(const char *tmpfs);
|
||||
void migration_test_add(const char *path, void (*fn)(void));
|
||||
#endif /* MIGRATION_HELPERS_H */
|
||||
|
||||
@@ -2135,6 +2135,14 @@ static void *test_mode_reboot_start(QTestState *from, QTestState *to)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *migrate_fixed_ram_start(QTestState *from, QTestState *to)
|
||||
{
|
||||
migrate_set_capability(from, "fixed-ram", true);
|
||||
migrate_set_capability(to, "fixed-ram", true);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void test_mode_reboot(void)
|
||||
{
|
||||
g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
|
||||
@@ -2149,6 +2157,184 @@ static void test_mode_reboot(void)
|
||||
test_file_common(&args, true);
|
||||
}
|
||||
|
||||
static void test_precopy_file_fixed_ram_live(void)
|
||||
{
|
||||
g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
|
||||
FILE_TEST_FILENAME);
|
||||
MigrateCommon args = {
|
||||
.connect_uri = uri,
|
||||
.listen_uri = "defer",
|
||||
.start_hook = migrate_fixed_ram_start,
|
||||
};
|
||||
|
||||
test_file_common(&args, false);
|
||||
}
|
||||
|
||||
static void test_precopy_file_fixed_ram(void)
|
||||
{
|
||||
g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
|
||||
FILE_TEST_FILENAME);
|
||||
MigrateCommon args = {
|
||||
.connect_uri = uri,
|
||||
.listen_uri = "defer",
|
||||
.start_hook = migrate_fixed_ram_start,
|
||||
};
|
||||
|
||||
test_file_common(&args, true);
|
||||
}
|
||||
|
||||
static void *migrate_multifd_fixed_ram_start(QTestState *from, QTestState *to)
|
||||
{
|
||||
migrate_fixed_ram_start(from, to);
|
||||
|
||||
migrate_set_parameter_int(from, "multifd-channels", 4);
|
||||
migrate_set_parameter_int(to, "multifd-channels", 4);
|
||||
|
||||
migrate_set_capability(from, "multifd", true);
|
||||
migrate_set_capability(to, "multifd", true);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void test_multifd_file_fixed_ram_live(void)
|
||||
{
|
||||
g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
|
||||
FILE_TEST_FILENAME);
|
||||
MigrateCommon args = {
|
||||
.connect_uri = uri,
|
||||
.listen_uri = "defer",
|
||||
.start_hook = migrate_multifd_fixed_ram_start,
|
||||
};
|
||||
|
||||
test_file_common(&args, false);
|
||||
}
|
||||
|
||||
static void test_multifd_file_fixed_ram(void)
|
||||
{
|
||||
g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
|
||||
FILE_TEST_FILENAME);
|
||||
MigrateCommon args = {
|
||||
.connect_uri = uri,
|
||||
.listen_uri = "defer",
|
||||
.start_hook = migrate_multifd_fixed_ram_start,
|
||||
};
|
||||
|
||||
test_file_common(&args, true);
|
||||
}
|
||||
|
||||
#ifdef O_DIRECT
|
||||
static void *migrate_multifd_fixed_ram_dio_start(QTestState *from,
|
||||
QTestState *to)
|
||||
{
|
||||
migrate_multifd_fixed_ram_start(from, to);
|
||||
|
||||
migrate_set_parameter_bool(from, "direct-io", true);
|
||||
migrate_set_parameter_bool(to, "direct-io", true);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void test_multifd_file_fixed_ram_dio(void)
|
||||
{
|
||||
g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
|
||||
FILE_TEST_FILENAME);
|
||||
MigrateCommon args = {
|
||||
.connect_uri = uri,
|
||||
.listen_uri = "defer",
|
||||
.start_hook = migrate_multifd_fixed_ram_dio_start,
|
||||
};
|
||||
|
||||
if (!probe_o_direct_support(tmpfs)) {
|
||||
g_test_skip("Filesystem does not support O_DIRECT");
|
||||
return;
|
||||
}
|
||||
|
||||
test_file_common(&args, true);
|
||||
}
|
||||
|
||||
static void migrate_multifd_fixed_ram_fdset_dio_end(QTestState *from,
|
||||
QTestState *to,
|
||||
void *opaque)
|
||||
{
|
||||
QDict *resp;
|
||||
QList *fdsets;
|
||||
|
||||
/*
|
||||
* Check that we removed the fdsets after migration, otherwise a
|
||||
* second migration would fail due to too many fdsets.
|
||||
*/
|
||||
|
||||
resp = qtest_qmp(from, "{'execute': 'query-fdsets', "
|
||||
"'arguments': {}}");
|
||||
g_assert(qdict_haskey(resp, "return"));
|
||||
fdsets = qdict_get_qlist(resp, "return");
|
||||
g_assert(fdsets && qlist_empty(fdsets));
|
||||
}
|
||||
#endif /* O_DIRECT */
|
||||
|
||||
#ifndef _WIN32
|
||||
static void *migrate_multifd_fixed_ram_fdset(QTestState *from, QTestState *to)
|
||||
{
|
||||
g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
|
||||
int fds[3];
|
||||
int src_flags = O_CREAT | O_WRONLY;
|
||||
int dst_flags = O_CREAT | O_RDONLY;
|
||||
|
||||
/* main outgoing channel: no O_DIRECT */
|
||||
fds[0] = open(file, src_flags, 0660);
|
||||
assert(fds[0] != -1);
|
||||
|
||||
#ifdef O_DIRECT
|
||||
src_flags |= O_DIRECT;
|
||||
#endif
|
||||
|
||||
/* secondary outgoing channels */
|
||||
fds[1] = open(file, src_flags, 0660);
|
||||
assert(fds[1] != -1);
|
||||
|
||||
qtest_qmp_fds_assert_success(from, &fds[0], 1, "{'execute': 'add-fd', "
|
||||
"'arguments': {'fdset-id': 1}}");
|
||||
|
||||
qtest_qmp_fds_assert_success(from, &fds[1], 1, "{'execute': 'add-fd', "
|
||||
"'arguments': {'fdset-id': 1}}");
|
||||
|
||||
/* incoming channel */
|
||||
fds[2] = open(file, dst_flags, 0660);
|
||||
assert(fds[2] != -1);
|
||||
|
||||
qtest_qmp_fds_assert_success(to, &fds[2], 1, "{'execute': 'add-fd', "
|
||||
"'arguments': {'fdset-id': 1}}");
|
||||
|
||||
#ifdef O_DIRECT
|
||||
migrate_multifd_fixed_ram_dio_start(from, to);
|
||||
#else
|
||||
migrate_multifd_fixed_ram_start(from, to);
|
||||
#endif
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void test_multifd_file_fixed_ram_fdset(void)
|
||||
{
|
||||
g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=0x100");
|
||||
MigrateCommon args = {
|
||||
.connect_uri = uri,
|
||||
.listen_uri = "defer",
|
||||
.start_hook = migrate_multifd_fixed_ram_fdset,
|
||||
#ifdef O_DIRECT
|
||||
.finish_hook = migrate_multifd_fixed_ram_fdset_dio_end,
|
||||
#endif
|
||||
};
|
||||
|
||||
if (!probe_o_direct_support(tmpfs)) {
|
||||
g_test_skip("Filesystem does not support O_DIRECT");
|
||||
return;
|
||||
}
|
||||
|
||||
test_file_common(&args, true);
|
||||
}
|
||||
#endif /* _WIN32 */
|
||||
|
||||
static void test_precopy_tcp_plain(void)
|
||||
{
|
||||
MigrateCommon args = {
|
||||
@@ -3339,62 +3525,84 @@ int main(int argc, char **argv)
|
||||
module_call_init(MODULE_INIT_QOM);
|
||||
|
||||
if (has_uffd) {
|
||||
qtest_add_func("/migration/postcopy/plain", test_postcopy);
|
||||
qtest_add_func("/migration/postcopy/recovery/plain",
|
||||
test_postcopy_recovery);
|
||||
qtest_add_func("/migration/postcopy/preempt/plain", test_postcopy_preempt);
|
||||
qtest_add_func("/migration/postcopy/preempt/recovery/plain",
|
||||
test_postcopy_preempt_recovery);
|
||||
migration_test_add("/migration/postcopy/plain", test_postcopy);
|
||||
migration_test_add("/migration/postcopy/recovery/plain",
|
||||
test_postcopy_recovery);
|
||||
migration_test_add("/migration/postcopy/preempt/plain",
|
||||
test_postcopy_preempt);
|
||||
migration_test_add("/migration/postcopy/preempt/recovery/plain",
|
||||
test_postcopy_preempt_recovery);
|
||||
if (getenv("QEMU_TEST_FLAKY_TESTS")) {
|
||||
qtest_add_func("/migration/postcopy/compress/plain",
|
||||
test_postcopy_compress);
|
||||
qtest_add_func("/migration/postcopy/recovery/compress/plain",
|
||||
test_postcopy_recovery_compress);
|
||||
migration_test_add("/migration/postcopy/compress/plain",
|
||||
test_postcopy_compress);
|
||||
migration_test_add("/migration/postcopy/recovery/compress/plain",
|
||||
test_postcopy_recovery_compress);
|
||||
}
|
||||
#ifndef _WIN32
|
||||
qtest_add_func("/migration/postcopy/recovery/double-failures",
|
||||
test_postcopy_recovery_double_fail);
|
||||
migration_test_add("/migration/postcopy/recovery/double-failures",
|
||||
test_postcopy_recovery_double_fail);
|
||||
#endif /* _WIN32 */
|
||||
|
||||
}
|
||||
|
||||
qtest_add_func("/migration/bad_dest", test_baddest);
|
||||
migration_test_add("/migration/bad_dest", test_baddest);
|
||||
#ifndef _WIN32
|
||||
if (!g_str_equal(arch, "s390x")) {
|
||||
qtest_add_func("/migration/analyze-script", test_analyze_script);
|
||||
migration_test_add("/migration/analyze-script", test_analyze_script);
|
||||
}
|
||||
#endif
|
||||
qtest_add_func("/migration/precopy/unix/plain", test_precopy_unix_plain);
|
||||
qtest_add_func("/migration/precopy/unix/xbzrle", test_precopy_unix_xbzrle);
|
||||
migration_test_add("/migration/precopy/unix/plain",
|
||||
test_precopy_unix_plain);
|
||||
migration_test_add("/migration/precopy/unix/xbzrle",
|
||||
test_precopy_unix_xbzrle);
|
||||
/*
|
||||
* Compression fails from time to time.
|
||||
* Put test here but don't enable it until everything is fixed.
|
||||
*/
|
||||
if (getenv("QEMU_TEST_FLAKY_TESTS")) {
|
||||
qtest_add_func("/migration/precopy/unix/compress/wait",
|
||||
test_precopy_unix_compress);
|
||||
qtest_add_func("/migration/precopy/unix/compress/nowait",
|
||||
test_precopy_unix_compress_nowait);
|
||||
migration_test_add("/migration/precopy/unix/compress/wait",
|
||||
test_precopy_unix_compress);
|
||||
migration_test_add("/migration/precopy/unix/compress/nowait",
|
||||
test_precopy_unix_compress_nowait);
|
||||
}
|
||||
|
||||
qtest_add_func("/migration/precopy/file",
|
||||
test_precopy_file);
|
||||
qtest_add_func("/migration/precopy/file/offset",
|
||||
test_precopy_file_offset);
|
||||
qtest_add_func("/migration/precopy/file/offset/bad",
|
||||
test_precopy_file_offset_bad);
|
||||
migration_test_add("/migration/precopy/file",
|
||||
test_precopy_file);
|
||||
migration_test_add("/migration/precopy/file/offset",
|
||||
test_precopy_file_offset);
|
||||
migration_test_add("/migration/precopy/file/offset/bad",
|
||||
test_precopy_file_offset_bad);
|
||||
|
||||
/*
|
||||
* Our CI system has problems with shared memory.
|
||||
* Don't run this test until we find a workaround.
|
||||
*/
|
||||
if (getenv("QEMU_TEST_FLAKY_TESTS")) {
|
||||
qtest_add_func("/migration/mode/reboot", test_mode_reboot);
|
||||
migration_test_add("/migration/mode/reboot", test_mode_reboot);
|
||||
}
|
||||
|
||||
migration_test_add("/migration/precopy/file/fixed-ram",
|
||||
test_precopy_file_fixed_ram);
|
||||
migration_test_add("/migration/precopy/file/fixed-ram/live",
|
||||
test_precopy_file_fixed_ram_live);
|
||||
|
||||
migration_test_add("/migration/multifd/file/fixed-ram",
|
||||
test_multifd_file_fixed_ram);
|
||||
migration_test_add("/migration/multifd/file/fixed-ram/live",
|
||||
test_multifd_file_fixed_ram_live);
|
||||
#ifdef O_DIRECT
|
||||
migration_test_add("/migration/multifd/file/fixed-ram/dio",
|
||||
test_multifd_file_fixed_ram_dio);
|
||||
#endif
|
||||
|
||||
#ifndef _WIN32
|
||||
migration_test_add("/migration/multifd/file/fixed-ram/fdset",
|
||||
test_multifd_file_fixed_ram_fdset);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GNUTLS
|
||||
qtest_add_func("/migration/precopy/unix/tls/psk",
|
||||
test_precopy_unix_tls_psk);
|
||||
migration_test_add("/migration/precopy/unix/tls/psk",
|
||||
test_precopy_unix_tls_psk);
|
||||
|
||||
if (has_uffd) {
|
||||
/*
|
||||
@@ -3402,110 +3610,112 @@ int main(int argc, char **argv)
|
||||
* channels are tested under precopy. Here what we want to test is the
|
||||
* general postcopy path that has TLS channel enabled.
|
||||
*/
|
||||
qtest_add_func("/migration/postcopy/tls/psk", test_postcopy_tls_psk);
|
||||
qtest_add_func("/migration/postcopy/recovery/tls/psk",
|
||||
test_postcopy_recovery_tls_psk);
|
||||
qtest_add_func("/migration/postcopy/preempt/tls/psk",
|
||||
test_postcopy_preempt_tls_psk);
|
||||
qtest_add_func("/migration/postcopy/preempt/recovery/tls/psk",
|
||||
test_postcopy_preempt_all);
|
||||
migration_test_add("/migration/postcopy/tls/psk",
|
||||
test_postcopy_tls_psk);
|
||||
migration_test_add("/migration/postcopy/recovery/tls/psk",
|
||||
test_postcopy_recovery_tls_psk);
|
||||
migration_test_add("/migration/postcopy/preempt/tls/psk",
|
||||
test_postcopy_preempt_tls_psk);
|
||||
migration_test_add("/migration/postcopy/preempt/recovery/tls/psk",
|
||||
test_postcopy_preempt_all);
|
||||
}
|
||||
#ifdef CONFIG_TASN1
|
||||
qtest_add_func("/migration/precopy/unix/tls/x509/default-host",
|
||||
test_precopy_unix_tls_x509_default_host);
|
||||
qtest_add_func("/migration/precopy/unix/tls/x509/override-host",
|
||||
test_precopy_unix_tls_x509_override_host);
|
||||
migration_test_add("/migration/precopy/unix/tls/x509/default-host",
|
||||
test_precopy_unix_tls_x509_default_host);
|
||||
migration_test_add("/migration/precopy/unix/tls/x509/override-host",
|
||||
test_precopy_unix_tls_x509_override_host);
|
||||
#endif /* CONFIG_TASN1 */
|
||||
#endif /* CONFIG_GNUTLS */
|
||||
|
||||
qtest_add_func("/migration/precopy/tcp/plain", test_precopy_tcp_plain);
|
||||
migration_test_add("/migration/precopy/tcp/plain", test_precopy_tcp_plain);
|
||||
|
||||
qtest_add_func("/migration/precopy/tcp/plain/switchover-ack",
|
||||
test_precopy_tcp_switchover_ack);
|
||||
migration_test_add("/migration/precopy/tcp/plain/switchover-ack",
|
||||
test_precopy_tcp_switchover_ack);
|
||||
|
||||
#ifdef CONFIG_GNUTLS
|
||||
qtest_add_func("/migration/precopy/tcp/tls/psk/match",
|
||||
test_precopy_tcp_tls_psk_match);
|
||||
qtest_add_func("/migration/precopy/tcp/tls/psk/mismatch",
|
||||
test_precopy_tcp_tls_psk_mismatch);
|
||||
migration_test_add("/migration/precopy/tcp/tls/psk/match",
|
||||
test_precopy_tcp_tls_psk_match);
|
||||
migration_test_add("/migration/precopy/tcp/tls/psk/mismatch",
|
||||
test_precopy_tcp_tls_psk_mismatch);
|
||||
#ifdef CONFIG_TASN1
|
||||
qtest_add_func("/migration/precopy/tcp/tls/x509/default-host",
|
||||
test_precopy_tcp_tls_x509_default_host);
|
||||
qtest_add_func("/migration/precopy/tcp/tls/x509/override-host",
|
||||
test_precopy_tcp_tls_x509_override_host);
|
||||
qtest_add_func("/migration/precopy/tcp/tls/x509/mismatch-host",
|
||||
test_precopy_tcp_tls_x509_mismatch_host);
|
||||
qtest_add_func("/migration/precopy/tcp/tls/x509/friendly-client",
|
||||
test_precopy_tcp_tls_x509_friendly_client);
|
||||
qtest_add_func("/migration/precopy/tcp/tls/x509/hostile-client",
|
||||
test_precopy_tcp_tls_x509_hostile_client);
|
||||
qtest_add_func("/migration/precopy/tcp/tls/x509/allow-anon-client",
|
||||
test_precopy_tcp_tls_x509_allow_anon_client);
|
||||
qtest_add_func("/migration/precopy/tcp/tls/x509/reject-anon-client",
|
||||
test_precopy_tcp_tls_x509_reject_anon_client);
|
||||
migration_test_add("/migration/precopy/tcp/tls/x509/default-host",
|
||||
test_precopy_tcp_tls_x509_default_host);
|
||||
migration_test_add("/migration/precopy/tcp/tls/x509/override-host",
|
||||
test_precopy_tcp_tls_x509_override_host);
|
||||
migration_test_add("/migration/precopy/tcp/tls/x509/mismatch-host",
|
||||
test_precopy_tcp_tls_x509_mismatch_host);
|
||||
migration_test_add("/migration/precopy/tcp/tls/x509/friendly-client",
|
||||
test_precopy_tcp_tls_x509_friendly_client);
|
||||
migration_test_add("/migration/precopy/tcp/tls/x509/hostile-client",
|
||||
test_precopy_tcp_tls_x509_hostile_client);
|
||||
migration_test_add("/migration/precopy/tcp/tls/x509/allow-anon-client",
|
||||
test_precopy_tcp_tls_x509_allow_anon_client);
|
||||
migration_test_add("/migration/precopy/tcp/tls/x509/reject-anon-client",
|
||||
test_precopy_tcp_tls_x509_reject_anon_client);
|
||||
#endif /* CONFIG_TASN1 */
|
||||
#endif /* CONFIG_GNUTLS */
|
||||
|
||||
/* qtest_add_func("/migration/ignore_shared", test_ignore_shared); */
|
||||
/* migration_test_add("/migration/ignore_shared", test_ignore_shared); */
|
||||
#ifndef _WIN32
|
||||
qtest_add_func("/migration/fd_proto", test_migrate_fd_proto);
|
||||
migration_test_add("/migration/fd_proto", test_migrate_fd_proto);
|
||||
#endif
|
||||
qtest_add_func("/migration/validate_uuid", test_validate_uuid);
|
||||
qtest_add_func("/migration/validate_uuid_error", test_validate_uuid_error);
|
||||
qtest_add_func("/migration/validate_uuid_src_not_set",
|
||||
test_validate_uuid_src_not_set);
|
||||
qtest_add_func("/migration/validate_uuid_dst_not_set",
|
||||
test_validate_uuid_dst_not_set);
|
||||
migration_test_add("/migration/validate_uuid", test_validate_uuid);
|
||||
migration_test_add("/migration/validate_uuid_error",
|
||||
test_validate_uuid_error);
|
||||
migration_test_add("/migration/validate_uuid_src_not_set",
|
||||
test_validate_uuid_src_not_set);
|
||||
migration_test_add("/migration/validate_uuid_dst_not_set",
|
||||
test_validate_uuid_dst_not_set);
|
||||
/*
|
||||
* See explanation why this test is slow on function definition
|
||||
*/
|
||||
if (g_test_slow()) {
|
||||
qtest_add_func("/migration/auto_converge", test_migrate_auto_converge);
|
||||
migration_test_add("/migration/auto_converge",
|
||||
test_migrate_auto_converge);
|
||||
if (g_str_equal(arch, "x86_64") &&
|
||||
has_kvm && kvm_dirty_ring_supported()) {
|
||||
qtest_add_func("/migration/dirty_limit", test_migrate_dirty_limit);
|
||||
migration_test_add("/migration/dirty_limit",
|
||||
test_migrate_dirty_limit);
|
||||
}
|
||||
}
|
||||
qtest_add_func("/migration/multifd/tcp/plain/none",
|
||||
test_multifd_tcp_none);
|
||||
migration_test_add("/migration/multifd/tcp/plain/none",
|
||||
test_multifd_tcp_none);
|
||||
/*
|
||||
* This test is flaky and sometimes fails in CI and otherwise:
|
||||
* don't run unless user opts in via environment variable.
|
||||
*/
|
||||
if (getenv("QEMU_TEST_FLAKY_TESTS")) {
|
||||
qtest_add_func("/migration/multifd/tcp/plain/cancel",
|
||||
migration_test_add("/migration/multifd/tcp/plain/cancel",
|
||||
test_multifd_tcp_cancel);
|
||||
}
|
||||
qtest_add_func("/migration/multifd/tcp/plain/zlib",
|
||||
test_multifd_tcp_zlib);
|
||||
migration_test_add("/migration/multifd/tcp/plain/zlib",
|
||||
test_multifd_tcp_zlib);
|
||||
#ifdef CONFIG_ZSTD
|
||||
qtest_add_func("/migration/multifd/tcp/plain/zstd",
|
||||
test_multifd_tcp_zstd);
|
||||
migration_test_add("/migration/multifd/tcp/plain/zstd",
|
||||
test_multifd_tcp_zstd);
|
||||
#endif
|
||||
#ifdef CONFIG_GNUTLS
|
||||
qtest_add_func("/migration/multifd/tcp/tls/psk/match",
|
||||
test_multifd_tcp_tls_psk_match);
|
||||
qtest_add_func("/migration/multifd/tcp/tls/psk/mismatch",
|
||||
test_multifd_tcp_tls_psk_mismatch);
|
||||
migration_test_add("/migration/multifd/tcp/tls/psk/match",
|
||||
test_multifd_tcp_tls_psk_match);
|
||||
migration_test_add("/migration/multifd/tcp/tls/psk/mismatch",
|
||||
test_multifd_tcp_tls_psk_mismatch);
|
||||
#ifdef CONFIG_TASN1
|
||||
qtest_add_func("/migration/multifd/tcp/tls/x509/default-host",
|
||||
test_multifd_tcp_tls_x509_default_host);
|
||||
qtest_add_func("/migration/multifd/tcp/tls/x509/override-host",
|
||||
test_multifd_tcp_tls_x509_override_host);
|
||||
qtest_add_func("/migration/multifd/tcp/tls/x509/mismatch-host",
|
||||
test_multifd_tcp_tls_x509_mismatch_host);
|
||||
qtest_add_func("/migration/multifd/tcp/tls/x509/allow-anon-client",
|
||||
test_multifd_tcp_tls_x509_allow_anon_client);
|
||||
qtest_add_func("/migration/multifd/tcp/tls/x509/reject-anon-client",
|
||||
test_multifd_tcp_tls_x509_reject_anon_client);
|
||||
migration_test_add("/migration/multifd/tcp/tls/x509/default-host",
|
||||
test_multifd_tcp_tls_x509_default_host);
|
||||
migration_test_add("/migration/multifd/tcp/tls/x509/override-host",
|
||||
test_multifd_tcp_tls_x509_override_host);
|
||||
migration_test_add("/migration/multifd/tcp/tls/x509/mismatch-host",
|
||||
test_multifd_tcp_tls_x509_mismatch_host);
|
||||
migration_test_add("/migration/multifd/tcp/tls/x509/allow-anon-client",
|
||||
test_multifd_tcp_tls_x509_allow_anon_client);
|
||||
migration_test_add("/migration/multifd/tcp/tls/x509/reject-anon-client",
|
||||
test_multifd_tcp_tls_x509_reject_anon_client);
|
||||
#endif /* CONFIG_TASN1 */
|
||||
#endif /* CONFIG_GNUTLS */
|
||||
|
||||
if (g_str_equal(arch, "x86_64") && has_kvm && kvm_dirty_ring_supported()) {
|
||||
qtest_add_func("/migration/dirty_ring",
|
||||
test_precopy_unix_dirty_ring);
|
||||
qtest_add_func("/migration/vcpu_dirty_limit",
|
||||
test_vcpu_dirty_limit);
|
||||
migration_test_add("/migration/dirty_ring",
|
||||
test_precopy_unix_dirty_ring);
|
||||
migration_test_add("/migration/vcpu_dirty_limit",
|
||||
test_vcpu_dirty_limit);
|
||||
}
|
||||
|
||||
ret = g_test_run();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# This is an example blacklist.
|
||||
# To enable use of the blacklist add this to configure:
|
||||
# "--extra-cflags=-fsanitize-blacklist=<src path>/tests/tsan/blacklist.tsan"
|
||||
# This is an example ignore list.
|
||||
# To enable use of the ignore list add this to configure:
|
||||
# "--extra-cflags=-fsanitize-blacklist=<src path>/tests/tsan/ignore.tsan"
|
||||
# The eventual goal would be to fix these warnings.
|
||||
|
||||
# TSan is not happy about setting/getting of dirty bits,
|
||||
@@ -44,6 +44,10 @@ static void compare_ranges(const char *prefix, GList *ranges,
|
||||
print_ranges("out", ranges);
|
||||
print_ranges("expected", expected);
|
||||
#endif
|
||||
if (!expected) {
|
||||
g_assert_true(!ranges);
|
||||
return;
|
||||
}
|
||||
g_assert_cmpint(g_list_length(ranges), ==, g_list_length(expected));
|
||||
for (l = ranges, e = expected; l ; l = l->next, e = e->next) {
|
||||
Range *r = (Range *)l->data;
|
||||
|
||||
@@ -30,8 +30,8 @@ class NetBSDVM(basevm.BaseVM):
|
||||
"git-base",
|
||||
"pkgconf",
|
||||
"xz",
|
||||
"python310",
|
||||
"py310-expat",
|
||||
"python311",
|
||||
"py311-expat",
|
||||
"ninja-build",
|
||||
|
||||
# gnu tools
|
||||
|
||||
@@ -277,6 +277,15 @@ int qemu_lock_fd_test(int fd, int64_t start, int64_t len, bool exclusive)
|
||||
}
|
||||
#endif
|
||||
|
||||
bool qemu_has_direct_io(void)
|
||||
{
|
||||
#ifdef O_DIRECT
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int qemu_open_cloexec(const char *name, int flags, mode_t mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
Reference in New Issue
Block a user